aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2023-02-19 10:20:19 -0700
committerAndrew Kelley <andrew@ziglang.org>2023-02-19 10:20:19 -0700
commitb5b634e4e8a2a1fe32fba50ccd175257b4213936 (patch)
tree4eed625db81dd4d6907e386084df36ec3ac85687 /src
parentefdc94c10712f610e7de5e49fd9cd6f88b4bbbae (diff)
parent02f5d2673f1bb21e7329acdd664fed565ecd4317 (diff)
downloadzig-b5b634e4e8a2a1fe32fba50ccd175257b4213936.tar.gz
zig-b5b634e4e8a2a1fe32fba50ccd175257b4213936.zip
Merge remote-tracking branch 'origin/master' into llvm16
Diffstat (limited to 'src')
-rw-r--r--src/AstGen.zig285
-rw-r--r--src/Autodoc.zig16
-rw-r--r--src/Compilation.zig22
-rw-r--r--src/Liveness.zig8
-rw-r--r--src/Manifest.zig2
-rw-r--r--src/Module.zig79
-rw-r--r--src/Package.zig2
-rw-r--r--src/RangeSet.zig2
-rw-r--r--src/Sema.zig509
-rw-r--r--src/Zir.zig27
-rw-r--r--src/arch/aarch64/CodeGen.zig26
-rw-r--r--src/arch/aarch64/Emit.zig6
-rw-r--r--src/arch/arm/CodeGen.zig26
-rw-r--r--src/arch/arm/Emit.zig6
-rw-r--r--src/arch/arm/bits.zig4
-rw-r--r--src/arch/riscv64/CodeGen.zig4
-rw-r--r--src/arch/riscv64/Emit.zig2
-rw-r--r--src/arch/sparc64/CodeGen.zig8
-rw-r--r--src/arch/sparc64/Emit.zig6
-rw-r--r--src/arch/wasm/CodeGen.zig18
-rw-r--r--src/arch/wasm/Emit.zig2
-rw-r--r--src/arch/x86_64/CodeGen.zig20
-rw-r--r--src/arch/x86_64/Emit.zig4
-rw-r--r--src/arch/x86_64/Mir.zig2
-rw-r--r--src/arch/x86_64/abi.zig10
-rw-r--r--src/codegen.zig4
-rw-r--r--src/codegen/c.zig52
-rw-r--r--src/codegen/llvm.zig60
-rw-r--r--src/codegen/spirv.zig4
-rw-r--r--src/codegen/spirv/Assembler.zig2
-rw-r--r--src/codegen/spirv/Module.zig4
-rw-r--r--src/codegen/spirv/Section.zig2
-rw-r--r--src/codegen/spirv/type.zig2
-rw-r--r--src/glibc.zig6
-rw-r--r--src/libc_installation.zig6
-rw-r--r--src/libunwind.zig2
-rw-r--r--src/link/Coff.zig6
-rw-r--r--src/link/Dwarf.zig10
-rw-r--r--src/link/Elf.zig18
-rw-r--r--src/link/MachO.zig36
-rw-r--r--src/link/MachO/DebugSymbols.zig8
-rw-r--r--src/link/MachO/Dylib.zig2
-rw-r--r--src/link/MachO/Object.zig14
-rw-r--r--src/link/MachO/UnwindInfo.zig16
-rw-r--r--src/link/MachO/dead_strip.zig2
-rw-r--r--src/link/MachO/dyld_info/Rebase.zig2
-rw-r--r--src/link/MachO/dyld_info/bind.zig2
-rw-r--r--src/link/MachO/eh_frame.zig8
-rw-r--r--src/link/MachO/thunks.zig2
-rw-r--r--src/link/MachO/zld.zig62
-rw-r--r--src/link/SpirV.zig4
-rw-r--r--src/link/Wasm.zig20
-rw-r--r--src/link/Wasm/Object.zig4
-rw-r--r--src/link/tapi.zig4
-rw-r--r--src/link/tapi/yaml.zig14
-rw-r--r--src/main.zig6
-rw-r--r--src/mingw.zig4
-rw-r--r--src/objcopy.zig2
-rw-r--r--src/print_air.zig16
-rw-r--r--src/print_targets.zig4
-rw-r--r--src/print_zir.zig30
-rw-r--r--src/register_manager.zig6
-rw-r--r--src/test.zig10
-rw-r--r--src/translate_c.zig14
-rw-r--r--src/translate_c/ast.zig22
-rw-r--r--src/type.zig77
-rw-r--r--src/value.zig131
67 files changed, 1108 insertions, 688 deletions
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 40eef32d4e..de259521bc 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -518,6 +518,7 @@ fn lvalExpr(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Ins
.error_union,
.merge_error_sets,
.switch_range,
+ .for_range,
.@"await",
.bit_not,
.negation,
@@ -646,6 +647,8 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE
.asm_output => unreachable, // Handled in `asmExpr`.
.asm_input => unreachable, // Handled in `asmExpr`.
+ .for_range => unreachable, // Handled in `forExpr`.
+
.assign => {
try assign(gz, scope, node);
return rvalue(gz, ri, .void_value, node);
@@ -834,7 +837,7 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE
.@"while",
=> return whileExpr(gz, scope, ri.br(), node, tree.fullWhile(node).?, false),
- .for_simple, .@"for" => return forExpr(gz, scope, ri.br(), node, tree.fullWhile(node).?, false),
+ .for_simple, .@"for" => return forExpr(gz, scope, ri.br(), node, tree.fullFor(node).?, false),
.slice_open => {
const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs);
@@ -1502,7 +1505,7 @@ fn arrayInitExprInner(
extra_index += 1;
}
- for (elements) |elem_init, i| {
+ for (elements, 0..) |elem_init, i| {
const ri = if (elem_ty != .none)
ResultInfo{ .rl = .{ .coerced_ty = elem_ty } }
else if (array_ty_inst != .none and nodeMayNeedMemoryLocation(astgen.tree, elem_init, true)) ri: {
@@ -1559,7 +1562,7 @@ fn arrayInitExprRlPtrInner(
});
var extra_index = try reserveExtra(astgen, elements.len);
- for (elements) |elem_init, i| {
+ for (elements, 0..) |elem_init, i| {
const elem_ptr = try gz.addPlNode(.elem_ptr_imm, elem_init, Zir.Inst.ElemPtrImm{
.ptr = result_ptr,
.index = @intCast(u32, i),
@@ -2342,7 +2345,7 @@ fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Nod
.@"while", => _ = try whileExpr(gz, scope, .{ .rl = .discard }, inner_node, tree.fullWhile(inner_node).?, true),
.for_simple,
- .@"for", => _ = try forExpr(gz, scope, .{ .rl = .discard }, inner_node, tree.fullWhile(inner_node).?, true),
+ .@"for", => _ = try forExpr(gz, scope, .{ .rl = .discard }, inner_node, tree.fullFor(inner_node).?, true),
else => noreturn_src_node = try unusedResultExpr(gz, scope, inner_node),
// zig fmt: on
@@ -2397,6 +2400,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.add,
.addwrap,
.add_sat,
+ .add_unsafe,
.param,
.param_comptime,
.param_anytype,
@@ -2595,6 +2599,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.field_base_ptr,
.ret_ptr,
.ret_type,
+ .for_len,
.@"try",
.try_ptr,
//.try_inline,
@@ -6282,7 +6287,7 @@ fn forExpr(
scope: *Scope,
ri: ResultInfo,
node: Ast.Node.Index,
- for_full: Ast.full.While,
+ for_full: Ast.full.For,
is_statement: bool,
) InnerError!Zir.Inst.Ref {
const astgen = parent_gz.astgen;
@@ -6291,22 +6296,41 @@ fn forExpr(
try astgen.checkLabelRedefinition(scope, label_token);
}
- // Set up variables and constants.
const is_inline = parent_gz.force_comptime or for_full.inline_token != null;
const tree = astgen.tree;
const token_tags = tree.tokens.items(.tag);
+ const node_tags = tree.nodes.items(.tag);
+ const node_data = tree.nodes.items(.data);
+ const gpa = astgen.gpa;
- const payload_is_ref = if (for_full.payload_token) |payload_token|
- token_tags[payload_token] == .asterisk
- else
- false;
-
- try emitDbgNode(parent_gz, for_full.ast.cond_expr);
+ // TODO this can be deleted after zig 0.11.0 is released because it
+ // will be caught in the parser.
+ if (for_full.isOldSyntax(token_tags)) {
+ return astgen.failTokNotes(
+ for_full.payload_token + 2,
+ "extra capture in for loop",
+ .{},
+ &[_]u32{
+ try astgen.errNoteTok(
+ for_full.payload_token + 2,
+ "run 'zig fmt' to upgrade your code automatically",
+ .{},
+ ),
+ },
+ );
+ }
- const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none };
- const array_ptr = try expr(parent_gz, scope, cond_ri, for_full.ast.cond_expr);
- const len = try parent_gz.addUnNode(.indexable_ptr_len, array_ptr, for_full.ast.cond_expr);
+ // For counters, this is the start value; for indexables, this is the base
+ // pointer that can be used with elem_ptr and similar instructions.
+ // Special value `none` means that this is a counter and its start value is
+ // zero, indicating that the main index counter can be used directly.
+ const indexables = try gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len);
+ defer gpa.free(indexables);
+ // elements of this array can be `none`, indicating no length check.
+ const lens = try gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len);
+ defer gpa.free(lens);
+ // We will use a single zero-based counter no matter how many indexables there are.
const index_ptr = blk: {
const alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime_mut else .alloc;
const index_ptr = try parent_gz.addUnNode(alloc_tag, .usize_type, node);
@@ -6315,22 +6339,95 @@ fn forExpr(
break :blk index_ptr;
};
+ var any_len_checks = false;
+
+ {
+ var capture_token = for_full.payload_token;
+ for (for_full.ast.inputs, 0..) |input, i_usize| {
+ const i = @intCast(u32, i_usize);
+ const capture_is_ref = token_tags[capture_token] == .asterisk;
+ const ident_tok = capture_token + @boolToInt(capture_is_ref);
+ const is_discard = mem.eql(u8, tree.tokenSlice(ident_tok), "_");
+
+ if (is_discard and capture_is_ref) {
+ return astgen.failTok(capture_token, "pointer modifier invalid on discard", .{});
+ }
+ // Skip over the comma, and on to the next capture (or the ending pipe character).
+ capture_token = ident_tok + 2;
+
+ try emitDbgNode(parent_gz, input);
+ if (node_tags[input] == .for_range) {
+ if (capture_is_ref) {
+ return astgen.failTok(ident_tok, "cannot capture reference to range", .{});
+ }
+ const start_node = node_data[input].lhs;
+ const start_val = try expr(parent_gz, scope, .{ .rl = .none }, start_node);
+
+ const end_node = node_data[input].rhs;
+ const end_val = if (end_node != 0)
+ try expr(parent_gz, scope, .{ .rl = .none }, node_data[input].rhs)
+ else
+ .none;
+
+ if (end_val == .none and is_discard) {
+ return astgen.failTok(ident_tok, "discard of unbounded counter", .{});
+ }
+
+ const start_is_zero = nodeIsTriviallyZero(tree, start_node);
+ const range_len = if (end_val == .none or start_is_zero)
+ end_val
+ else
+ try parent_gz.addPlNode(.sub, input, Zir.Inst.Bin{
+ .lhs = end_val,
+ .rhs = start_val,
+ });
+
+ any_len_checks = any_len_checks or range_len != .none;
+ indexables[i] = if (start_is_zero) .none else start_val;
+ lens[i] = range_len;
+ } else {
+ const indexable = try expr(parent_gz, scope, .{ .rl = .none }, input);
+
+ any_len_checks = true;
+ indexables[i] = indexable;
+ lens[i] = indexable;
+ }
+ }
+ }
+
+ if (!any_len_checks) {
+ return astgen.failNode(node, "unbounded for loop", .{});
+ }
+
+ // We use a dedicated ZIR instruction to assert the lengths to assist with
+ // nicer error reporting as well as fewer ZIR bytes emitted.
+ const len: Zir.Inst.Ref = len: {
+ const lens_len = @intCast(u32, lens.len);
+ try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.MultiOp).Struct.fields.len + lens_len);
+ const len = try parent_gz.addPlNode(.for_len, node, Zir.Inst.MultiOp{
+ .operands_len = lens_len,
+ });
+ appendRefsAssumeCapacity(astgen, lens);
+ break :len len;
+ };
+
const loop_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .loop;
const loop_block = try parent_gz.makeBlockInst(loop_tag, node);
- try parent_gz.instructions.append(astgen.gpa, loop_block);
+ try parent_gz.instructions.append(gpa, loop_block);
var loop_scope = parent_gz.makeSubBlock(scope);
loop_scope.is_inline = is_inline;
loop_scope.setBreakResultInfo(ri);
defer loop_scope.unstack();
- defer loop_scope.labeled_breaks.deinit(astgen.gpa);
+ defer loop_scope.labeled_breaks.deinit(gpa);
+
+ const index = try loop_scope.addUnNode(.load, index_ptr, node);
var cond_scope = parent_gz.makeSubBlock(&loop_scope.base);
defer cond_scope.unstack();
- // check condition i < array_expr.len
- const index = try cond_scope.addUnNode(.load, index_ptr, for_full.ast.cond_expr);
- const cond = try cond_scope.addPlNode(.cmp_lt, for_full.ast.cond_expr, Zir.Inst.Bin{
+ // Check the condition.
+ const cond = try cond_scope.addPlNode(.cmp_lt, node, Zir.Inst.Bin{
.lhs = index,
.rhs = len,
});
@@ -6341,12 +6438,11 @@ fn forExpr(
const cond_block = try loop_scope.makeBlockInst(block_tag, node);
try cond_scope.setBlockBody(cond_block);
// cond_block unstacked now, can add new instructions to loop_scope
- try loop_scope.instructions.append(astgen.gpa, cond_block);
+ try loop_scope.instructions.append(gpa, cond_block);
// Increment the index variable.
- const index_2 = try loop_scope.addUnNode(.load, index_ptr, for_full.ast.cond_expr);
- const index_plus_one = try loop_scope.addPlNode(.add, node, Zir.Inst.Bin{
- .lhs = index_2,
+ const index_plus_one = try loop_scope.addPlNode(.add_unsafe, node, Zir.Inst.Bin{
+ .lhs = index,
.rhs = .one_usize,
});
_ = try loop_scope.addBin(.store, index_ptr, index_plus_one);
@@ -6367,62 +6463,67 @@ fn forExpr(
defer then_scope.unstack();
try then_scope.addDbgBlockBegin();
- var payload_val_scope: Scope.LocalVal = undefined;
- var index_scope: Scope.LocalPtr = undefined;
+
+ const capture_scopes = try gpa.alloc(Scope.LocalVal, for_full.ast.inputs.len);
+ defer gpa.free(capture_scopes);
+
const then_sub_scope = blk: {
- const payload_token = for_full.payload_token.?;
- const ident = if (token_tags[payload_token] == .asterisk)
- payload_token + 1
- else
- payload_token;
- const is_ptr = ident != payload_token;
- const value_name = tree.tokenSlice(ident);
- var payload_sub_scope: *Scope = undefined;
- if (!mem.eql(u8, value_name, "_")) {
- const name_str_index = try astgen.identAsString(ident);
- const tag: Zir.Inst.Tag = if (is_ptr) .elem_ptr else .elem_val;
- const payload_inst = try then_scope.addPlNode(tag, for_full.ast.cond_expr, Zir.Inst.Bin{
- .lhs = array_ptr,
- .rhs = index,
- });
- try astgen.detectLocalShadowing(&then_scope.base, name_str_index, ident, value_name, .capture);
- payload_val_scope = .{
- .parent = &then_scope.base,
+ var capture_token = for_full.payload_token;
+ var capture_sub_scope: *Scope = &then_scope.base;
+ for (for_full.ast.inputs, 0..) |input, i_usize| {
+ const i = @intCast(u32, i_usize);
+ const capture_is_ref = token_tags[capture_token] == .asterisk;
+ const ident_tok = capture_token + @boolToInt(capture_is_ref);
+ const capture_name = tree.tokenSlice(ident_tok);
+ // Skip over the comma, and on to the next capture (or the ending pipe character).
+ capture_token = ident_tok + 2;
+
+ if (mem.eql(u8, capture_name, "_")) continue;
+
+ const name_str_index = try astgen.identAsString(ident_tok);
+ try astgen.detectLocalShadowing(capture_sub_scope, name_str_index, ident_tok, capture_name, .capture);
+
+ const capture_inst = inst: {
+ const is_counter = node_tags[input] == .for_range;
+
+ if (indexables[i] == .none) {
+ // Special case: the main index can be used directly.
+ assert(is_counter);
+ assert(!capture_is_ref);
+ break :inst index;
+ }
+
+ // For counters, we add the index variable to the start value; for
+ // indexables, we use it as an element index. This is so similar
+ // that they can share the same code paths, branching only on the
+ // ZIR tag.
+ const switch_cond = (@as(u2, @boolToInt(capture_is_ref)) << 1) | @boolToInt(is_counter);
+ const tag: Zir.Inst.Tag = switch (switch_cond) {
+ 0b00 => .elem_val,
+ 0b01 => .add,
+ 0b10 => .elem_ptr,
+ 0b11 => unreachable, // compile error emitted already
+ };
+ break :inst try then_scope.addPlNode(tag, input, Zir.Inst.Bin{
+ .lhs = indexables[i],
+ .rhs = index,
+ });
+ };
+
+ capture_scopes[i] = .{
+ .parent = capture_sub_scope,
.gen_zir = &then_scope,
.name = name_str_index,
- .inst = payload_inst,
- .token_src = ident,
+ .inst = capture_inst,
+ .token_src = ident_tok,
.id_cat = .capture,
};
- try then_scope.addDbgVar(.dbg_var_val, name_str_index, payload_inst);
- payload_sub_scope = &payload_val_scope.base;
- } else if (is_ptr) {
- return astgen.failTok(payload_token, "pointer modifier invalid on discard", .{});
- } else {
- payload_sub_scope = &then_scope.base;
+
+ try then_scope.addDbgVar(.dbg_var_val, name_str_index, capture_inst);
+ capture_sub_scope = &capture_scopes[i].base;
}
- const index_token = if (token_tags[ident + 1] == .comma)
- ident + 2
- else
- break :blk payload_sub_scope;
- const token_bytes = tree.tokenSlice(index_token);
- if (mem.eql(u8, token_bytes, "_")) {
- return astgen.failTok(index_token, "discard of index capture; omit it instead", .{});
- }
- const index_name = try astgen.identAsString(index_token);
- try astgen.detectLocalShadowing(payload_sub_scope, index_name, index_token, token_bytes, .@"loop index capture");
- index_scope = .{
- .parent = payload_sub_scope,
- .gen_zir = &then_scope,
- .name = index_name,
- .ptr = index_ptr,
- .token_src = index_token,
- .maybe_comptime = is_inline,
- .id_cat = .@"loop index capture",
- };
- try then_scope.addDbgVar(.dbg_var_val, index_name, index_ptr);
- break :blk &index_scope.base;
+ break :blk capture_sub_scope;
};
const then_result = try expr(&then_scope, then_sub_scope, .{ .rl = .none }, for_full.ast.then_expr);
@@ -6879,7 +6980,7 @@ fn switchExpr(
zir_datas[switch_block].pl_node.payload_index = payload_index;
const strat = ri.rl.strategy(&block_scope);
- for (payloads.items[case_table_start..case_table_end]) |start_index, i| {
+ for (payloads.items[case_table_start..case_table_end], 0..) |start_index, i| {
var body_len_index = start_index;
var end_index = start_index;
const table_index = case_table_start + i;
@@ -7543,7 +7644,7 @@ fn asmExpr(
var output_type_bits: u32 = 0;
- for (full.outputs) |output_node, i| {
+ for (full.outputs, 0..) |output_node, i| {
const symbolic_name = main_tokens[output_node];
const name = try astgen.identAsString(symbolic_name);
const constraint_token = symbolic_name + 2;
@@ -7580,7 +7681,7 @@ fn asmExpr(
var inputs_buffer: [32]Zir.Inst.Asm.Input = undefined;
const inputs = inputs_buffer[0..full.inputs.len];
- for (full.inputs) |input_node, i| {
+ for (full.inputs, 0..) |input_node, i| {
const symbolic_name = main_tokens[input_node];
const name = try astgen.identAsString(symbolic_name);
const constraint_token = symbolic_name + 2;
@@ -7753,7 +7854,7 @@ fn typeOf(
var typeof_scope = gz.makeSubBlock(scope);
typeof_scope.force_comptime = false;
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
const param_ref = try reachableExpr(&typeof_scope, &typeof_scope.base, .{ .rl = .none }, arg, node);
astgen.extra.items[args_index + i] = @enumToInt(param_ref);
}
@@ -8901,6 +9002,25 @@ comptime {
}
}
+fn nodeIsTriviallyZero(tree: *const Ast, node: Ast.Node.Index) bool {
+ const node_tags = tree.nodes.items(.tag);
+ const main_tokens = tree.nodes.items(.main_token);
+
+ switch (node_tags[node]) {
+ .number_literal => {
+ const ident = main_tokens[node];
+ return switch (std.zig.parseNumberLiteral(tree.tokenSlice(ident))) {
+ .int => |number| switch (number) {
+ 0 => true,
+ else => false,
+ },
+ else => false,
+ };
+ },
+ else => return false,
+ }
+}
+
fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index, have_res_ty: bool) bool {
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
@@ -9021,6 +9141,7 @@ fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index, have_
.mul_wrap,
.mul_sat,
.switch_range,
+ .for_range,
.field_access,
.sub,
.sub_wrap,
@@ -9310,6 +9431,7 @@ fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) BuiltinFn.Ev
.mul_wrap,
.mul_sat,
.switch_range,
+ .for_range,
.sub,
.sub_wrap,
.sub_sat,
@@ -9487,6 +9609,7 @@ fn nodeImpliesMoreThanOnePossibleValue(tree: *const Ast, start_node: Ast.Node.In
.mul_wrap,
.mul_sat,
.switch_range,
+ .for_range,
.field_access,
.sub,
.sub_wrap,
@@ -9731,6 +9854,7 @@ fn nodeImpliesComptimeOnly(tree: *const Ast, start_node: Ast.Node.Index) bool {
.mul_wrap,
.mul_sat,
.switch_range,
+ .for_range,
.field_access,
.sub,
.sub_wrap,
@@ -10491,14 +10615,16 @@ fn strLitNodeAsString(astgen: *AstGen, node: Ast.Node.Index) !IndexSlice {
var tok_i = start;
{
const slice = tree.tokenSlice(tok_i);
- const line_bytes = slice[2 .. slice.len - 1];
+ const carriage_return_ending: usize = if (slice[slice.len - 2] == '\r') 2 else 1;
+ const line_bytes = slice[2 .. slice.len - carriage_return_ending];
try string_bytes.appendSlice(gpa, line_bytes);
tok_i += 1;
}
// Following lines: each line prepends a newline.
while (tok_i <= end) : (tok_i += 1) {
const slice = tree.tokenSlice(tok_i);
- const line_bytes = slice[2 .. slice.len - 1];
+ const carriage_return_ending: usize = if (slice[slice.len - 2] == '\r') 2 else 1;
+ const line_bytes = slice[2 .. slice.len - carriage_return_ending];
try string_bytes.ensureUnusedCapacity(gpa, line_bytes.len + 1);
string_bytes.appendAssumeCapacity('\n');
string_bytes.appendSliceAssumeCapacity(line_bytes);
@@ -10577,7 +10703,6 @@ const Scope = struct {
@"function parameter",
@"local constant",
@"local variable",
- @"loop index capture",
@"switch tag capture",
capture,
};
diff --git a/src/Autodoc.zig b/src/Autodoc.zig
index 44325e3836..2fc54cc0ec 100644
--- a/src/Autodoc.zig
+++ b/src/Autodoc.zig
@@ -1647,7 +1647,7 @@ fn walkInstruction(
std.debug.assert(operands.len > 0);
var array_type = try self.walkRef(file, parent_scope, parent_src, operands[0], false);
- for (operands[1..]) |op, idx| {
+ for (operands[1..], 0..) |op, idx| {
const wr = try self.walkRef(file, parent_scope, parent_src, op, false);
const expr_index = self.exprs.items.len;
try self.exprs.append(self.arena, wr.expr);
@@ -1665,7 +1665,7 @@ fn walkInstruction(
const operands = file.zir.refSlice(extra.end, extra.data.operands_len);
const array_data = try self.arena.alloc(usize, operands.len);
- for (operands) |op, idx| {
+ for (operands, 0..) |op, idx| {
const wr = try self.walkRef(file, parent_scope, parent_src, op, false);
const expr_index = self.exprs.items.len;
try self.exprs.append(self.arena, wr.expr);
@@ -1686,7 +1686,7 @@ fn walkInstruction(
std.debug.assert(operands.len > 0);
var array_type = try self.walkRef(file, parent_scope, parent_src, operands[0], false);
- for (operands[1..]) |op, idx| {
+ for (operands[1..], 0..) |op, idx| {
const wr = try self.walkRef(file, parent_scope, parent_src, op, false);
const expr_index = self.exprs.items.len;
try self.exprs.append(self.arena, wr.expr);
@@ -1715,7 +1715,7 @@ fn walkInstruction(
const operands = file.zir.refSlice(extra.end, extra.data.operands_len);
const array_data = try self.arena.alloc(usize, operands.len);
- for (operands) |op, idx| {
+ for (operands, 0..) |op, idx| {
const wr = try self.walkRef(file, parent_scope, parent_src, op, false);
const expr_index = self.exprs.items.len;
try self.exprs.append(self.arena, wr.expr);
@@ -2386,7 +2386,7 @@ fn walkInstruction(
const array_data = try self.arena.alloc(usize, args.len);
var array_type: ?DocData.Expr = null;
- for (args) |arg, idx| {
+ for (args, 0..) |arg, idx| {
const wr = try self.walkRef(file, parent_scope, parent_src, arg, idx == 0);
if (idx == 0) {
array_type = wr.typeRef;
@@ -3470,7 +3470,7 @@ fn tryResolveRefPath(
}
}
- for (self.ast_nodes.items[t_enum.src].fields.?) |ast_node, idx| {
+ for (self.ast_nodes.items[t_enum.src].fields.?, 0..) |ast_node, idx| {
const name = self.ast_nodes.items[ast_node].name.?;
if (std.mem.eql(u8, name, child_string)) {
// TODO: should we really create an artificial
@@ -3517,7 +3517,7 @@ fn tryResolveRefPath(
}
}
- for (self.ast_nodes.items[t_union.src].fields.?) |ast_node, idx| {
+ for (self.ast_nodes.items[t_union.src].fields.?, 0..) |ast_node, idx| {
const name = self.ast_nodes.items[ast_node].name.?;
if (std.mem.eql(u8, name, child_string)) {
// TODO: should we really create an artificial
@@ -3564,7 +3564,7 @@ fn tryResolveRefPath(
}
}
- for (self.ast_nodes.items[t_struct.src].fields.?) |ast_node, idx| {
+ for (self.ast_nodes.items[t_struct.src].fields.?, 0..) |ast_node, idx| {
const name = self.ast_nodes.items[ast_node].name.?;
if (std.mem.eql(u8, name, child_string)) {
// TODO: should we really create an artificial
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 44201ab834..60e74107c2 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -641,7 +641,7 @@ pub const AllErrors = struct {
}
const reference_trace = try allocator.alloc(Message, module_err_msg.reference_trace.len);
- for (reference_trace) |*reference, i| {
+ for (reference_trace, 0..) |*reference, i| {
const module_reference = module_err_msg.reference_trace[i];
if (module_reference.hidden != 0) {
reference.* = .{ .plain = .{ .msg = undefined, .count = module_reference.hidden } };
@@ -714,7 +714,7 @@ pub const AllErrors = struct {
const block = file.zir.extraData(Zir.Inst.Block, item.data.notes);
const body = file.zir.extra[block.end..][0..block.data.body_len];
notes = try arena.alloc(Message, body.len);
- for (notes) |*note, i| {
+ for (notes, 0..) |*note, i| {
const note_item = file.zir.extraData(Zir.Inst.CompileErrors.Item, body[i]);
const msg = file.zir.nullTerminatedString(note_item.data.msg);
const span = blk: {
@@ -786,7 +786,7 @@ pub const AllErrors = struct {
fn dupeList(list: []const Message, arena: Allocator) Allocator.Error![]Message {
const duped_list = try arena.alloc(Message, list.len);
- for (list) |item, i| {
+ for (list, 0..) |item, i| {
duped_list[i] = switch (item) {
.src => |src| .{ .src = .{
.msg = try arena.dupe(u8, src.msg),
@@ -1441,7 +1441,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
const llvm_cpu_features: ?[*:0]const u8 = if (build_options.have_llvm and use_llvm) blk: {
var buf = std.ArrayList(u8).init(arena);
- for (options.target.cpu.arch.allFeaturesList()) |feature, index_usize| {
+ for (options.target.cpu.arch.allFeaturesList(), 0..) |feature, index_usize| {
const index = @intCast(Target.Cpu.Feature.Set.Index, index_usize);
const is_enabled = options.target.cpu.features.isEnabled(index);
@@ -1818,7 +1818,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
var system_libs: std.StringArrayHashMapUnmanaged(SystemLib) = .{};
errdefer system_libs.deinit(gpa);
try system_libs.ensureTotalCapacity(gpa, options.system_lib_names.len);
- for (options.system_lib_names) |lib_name, i| {
+ for (options.system_lib_names, 0..) |lib_name, i| {
system_libs.putAssumeCapacity(lib_name, options.system_lib_infos[i]);
}
@@ -2880,7 +2880,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
}
for (self.lld_errors.items) |lld_error| {
const notes = try arena_allocator.alloc(AllErrors.Message, lld_error.context_lines.len);
- for (lld_error.context_lines) |context_line, i| {
+ for (lld_error.context_lines, 0..) |context_line, i| {
notes[i] = .{ .plain = .{
.msg = try arena_allocator.dupe(u8, context_line),
} };
@@ -3007,7 +3007,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
};
defer self.gpa.free(err_msg.notes);
- for (keys[1..]) |key, i| {
+ for (keys[1..], 0..) |key, i| {
const note_decl = module.declPtr(key);
err_msg.notes[i] = .{
.src_loc = note_decl.nodeOffsetSrcLoc(values[i + 1]),
@@ -3104,7 +3104,7 @@ pub fn performAllTheWork(
const notes = try mod.gpa.alloc(Module.ErrorMsg, file.references.items.len);
errdefer mod.gpa.free(notes);
- for (notes) |*note, i| {
+ for (notes, 0..) |*note, i| {
errdefer for (notes[0..i]) |*n| n.deinit(mod.gpa);
note.* = switch (file.references.items[i]) {
.import => |loc| try Module.ErrorMsg.init(
@@ -3740,7 +3740,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, argv.items.len + 1);
new_argv_with_sentinel[argv.items.len] = null;
const new_argv = new_argv_with_sentinel[0..argv.items.len :null];
- for (argv.items) |arg, i| {
+ for (argv.items, 0..) |arg, i| {
new_argv[i] = try arena.dupeZ(u8, arg);
}
@@ -4382,7 +4382,7 @@ pub fn addCCArgs(
// It would be really nice if there was a more compact way to communicate this info to Clang.
const all_features_list = target.cpu.arch.allFeaturesList();
try argv.ensureUnusedCapacity(all_features_list.len * 4);
- for (all_features_list) |feature, index_usize| {
+ for (all_features_list, 0..) |feature, index_usize| {
const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize);
const is_enabled = target.cpu.features.isEnabled(index);
@@ -5210,7 +5210,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Alloca
std.zig.fmtId(generic_arch_name),
});
- for (target.cpu.arch.allFeaturesList()) |feature, index_usize| {
+ for (target.cpu.arch.allFeaturesList(), 0..) |feature, index_usize| {
const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize);
const is_enabled = target.cpu.features.isEnabled(index);
if (is_enabled) {
diff --git a/src/Liveness.zig b/src/Liveness.zig
index e775883b1f..481cf25d04 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -384,7 +384,7 @@ pub fn categorizeOperand(
const args = @ptrCast([]const Air.Inst.Ref, air.extra[extra.end..][0..extra.data.args_len]);
if (args.len + 1 <= bpi - 1) {
if (callee == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
if (arg == operand_ref) return matchOperandSmallIndex(l, inst, @intCast(OperandInt, i + 1), .write);
}
return .write;
@@ -436,7 +436,7 @@ pub fn categorizeOperand(
const elements = @ptrCast([]const Air.Inst.Ref, air.extra[ty_pl.payload..][0..len]);
if (elements.len <= bpi - 1) {
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
if (elem == operand_ref) return matchOperandSmallIndex(l, inst, @intCast(OperandInt, i), .none);
}
return .none;
@@ -1272,12 +1272,12 @@ fn analyzeInst(
defer for (case_deaths) |*cd| cd.deinit(gpa);
var total_deaths: u32 = 0;
- for (case_tables) |*ct, i| {
+ for (case_tables, 0..) |*ct, i| {
total_deaths += ct.count();
var it = ct.keyIterator();
while (it.next()) |key| {
const case_death = key.*;
- for (case_tables) |*ct_inner, j| {
+ for (case_tables, 0..) |*ct_inner, j| {
if (i == j) continue;
if (!ct_inner.contains(case_death)) {
// instruction is not referenced in this case
diff --git a/src/Manifest.zig b/src/Manifest.zig
index c3f77aec98..068a14942f 100644
--- a/src/Manifest.zig
+++ b/src/Manifest.zig
@@ -123,7 +123,7 @@ pub fn hexDigest(digest: [Hash.digest_length]u8) [multihash_len * 2]u8 {
result[2] = hex_charset[Hash.digest_length >> 4];
result[3] = hex_charset[Hash.digest_length & 15];
- for (digest) |byte, i| {
+ for (digest, 0..) |byte, i| {
result[4 + i * 2] = hex_charset[byte >> 4];
result[5 + i * 2] = hex_charset[byte & 15];
}
diff --git a/src/Module.zig b/src/Module.zig
index a129cb0cb6..76777532ab 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -268,7 +268,7 @@ pub const MemoizedCall = struct {
if (a.func != b.func) return false;
assert(a.args.len == b.args.len);
- for (a.args) |a_arg, arg_i| {
+ for (a.args, 0..) |a_arg, arg_i| {
const b_arg = b.args[arg_i];
if (!a_arg.eql(b_arg, ctx.module)) {
return false;
@@ -1082,7 +1082,7 @@ pub const Struct = struct {
assert(s.layout == .Packed);
assert(s.haveLayout());
var bit_sum: u64 = 0;
- for (s.fields.values()) |field, i| {
+ for (s.fields.values(), 0..) |field, i| {
if (i == index) {
return @intCast(u16, bit_sum);
}
@@ -1341,7 +1341,7 @@ pub const Union = struct {
assert(u.haveFieldTypes());
var most_alignment: u32 = 0;
var most_index: usize = undefined;
- for (u.fields.values()) |field, i| {
+ for (u.fields.values(), 0..) |field, i| {
if (!field.ty.hasRuntimeBits()) continue;
const field_align = field.normalAlignment(target);
@@ -1405,7 +1405,7 @@ pub const Union = struct {
var payload_size: u64 = 0;
var payload_align: u32 = 0;
const fields = u.fields.values();
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
const field_align = a: {
@@ -2462,6 +2462,55 @@ pub const SrcLoc = struct {
};
return nodeToSpan(tree, src_node);
},
+ .for_input => |for_input| {
+ const tree = try src_loc.file_scope.getTree(gpa);
+ const node = src_loc.declRelativeToNodeIndex(for_input.for_node_offset);
+ const for_full = tree.fullFor(node).?;
+ const src_node = for_full.ast.inputs[for_input.input_index];
+ return nodeToSpan(tree, src_node);
+ },
+ .for_capture_from_input => |node_off| {
+ const tree = try src_loc.file_scope.getTree(gpa);
+ const token_tags = tree.tokens.items(.tag);
+ const input_node = src_loc.declRelativeToNodeIndex(node_off);
+ // We have to actually linear scan the whole AST to find the for loop
+ // that contains this input.
+ const node_tags = tree.nodes.items(.tag);
+ for (node_tags, 0..) |node_tag, node_usize| {
+ const node = @intCast(Ast.Node.Index, node_usize);
+ switch (node_tag) {
+ .for_simple, .@"for" => {
+ const for_full = tree.fullFor(node).?;
+ for (for_full.ast.inputs, 0..) |input, input_index| {
+ if (input_node == input) {
+ var count = input_index;
+ var tok = for_full.payload_token;
+ while (true) {
+ switch (token_tags[tok]) {
+ .comma => {
+ count -= 1;
+ tok += 1;
+ },
+ .identifier => {
+ if (count == 0)
+ return tokensToSpan(tree, tok, tok + 1, tok);
+ tok += 1;
+ },
+ .asterisk => {
+ if (count == 0)
+ return tokensToSpan(tree, tok, tok + 2, tok);
+ tok += 1;
+ },
+ else => unreachable,
+ }
+ }
+ }
+ }
+ },
+ else => continue,
+ }
+ } else unreachable;
+ },
.node_offset_bin_lhs => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
const node = src_loc.declRelativeToNodeIndex(node_off);
@@ -3114,6 +3163,20 @@ pub const LazySrcLoc = union(enum) {
/// The source location points to the RHS of an assignment.
/// The Decl is determined contextually.
node_offset_store_operand: i32,
+ /// The source location points to a for loop input.
+ /// The Decl is determined contextually.
+ for_input: struct {
+ /// Points to the for loop AST node.
+ for_node_offset: i32,
+ /// Picks one of the inputs from the condition.
+ input_index: u32,
+ },
+ /// The source location points to one of the captures of a for loop, found
+ /// by taking this AST node index offset from the containing
+ /// Decl AST node, which points to one of the input nodes of a for loop.
+ /// Next, navigate to the corresponding capture.
+ /// The Decl is determined contextually.
+ for_capture_from_input: i32,
pub const nodeOffset = if (TracedOffset.want_tracing) nodeOffsetDebug else nodeOffsetRelease;
@@ -3200,6 +3263,8 @@ pub const LazySrcLoc = union(enum) {
.node_offset_init_ty,
.node_offset_store_ptr,
.node_offset_store_operand,
+ .for_input,
+ .for_capture_from_input,
=> .{
.file_scope = decl.getFileScope(),
.parent_decl_node = decl.src_node,
@@ -3553,7 +3618,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
}
if (data_has_safety_tag) {
const tags = zir.instructions.items(.tag);
- for (zir.instructions.items(.data)) |*data, i| {
+ for (zir.instructions.items(.data), 0..) |*data, i| {
const union_tag = Zir.Inst.Tag.data_tags[@enumToInt(tags[i])];
const as_struct = @ptrCast(*HackDataLayout, data);
as_struct.* = .{
@@ -3740,7 +3805,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
@ptrCast([*]const u8, file.zir.instructions.items(.data).ptr);
if (data_has_safety_tag) {
// The `Data` union has a safety tag but in the file format we store it without.
- for (file.zir.instructions.items(.data)) |*data, i| {
+ for (file.zir.instructions.items(.data), 0..) |*data, i| {
const as_struct = @ptrCast(*const HackDataLayout, data);
safety_buffer[i] = as_struct.data;
}
@@ -6293,7 +6358,7 @@ pub fn populateTestFunctions(
// Add a dependency on each test name and function pointer.
try array_decl.dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2);
- for (mod.test_functions.keys()) |test_decl_index, i| {
+ for (mod.test_functions.keys(), 0..) |test_decl_index, i| {
const test_decl = mod.declPtr(test_decl_index);
const test_name_slice = mem.sliceTo(test_decl.name, 0);
const test_name_decl_index = n: {
diff --git a/src/Package.zig b/src/Package.zig
index a3afe21009..5878e7bad6 100644
--- a/src/Package.zig
+++ b/src/Package.zig
@@ -207,7 +207,7 @@ pub fn fetchAndAddDependencies(
var any_error = false;
const deps_list = manifest.dependencies.values();
- for (manifest.dependencies.keys()) |name, i| {
+ for (manifest.dependencies.keys(), 0..) |name, i| {
const dep = deps_list[i];
const sub_prefix = try std.fmt.allocPrint(arena, "{s}{s}.", .{ name_prefix, name });
diff --git a/src/RangeSet.zig b/src/RangeSet.zig
index a5007ef7c8..7e501f984b 100644
--- a/src/RangeSet.zig
+++ b/src/RangeSet.zig
@@ -79,7 +79,7 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool {
const target = self.module.getTarget();
// look for gaps
- for (self.ranges.items[1..]) |cur, i| {
+ for (self.ranges.items[1..], 0..) |cur, i| {
// i starts counting from the second item.
const prev = self.ranges.items[i];
diff --git a/src/Sema.zig b/src/Sema.zig
index cf6350e35f..fcdb1ce518 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1035,6 +1035,7 @@ fn analyzeBodyInner(
.@"await" => try sema.zirAwait(block, inst),
.array_base_ptr => try sema.zirArrayBasePtr(block, inst),
.field_base_ptr => try sema.zirFieldBasePtr(block, inst),
+ .for_len => try sema.zirForLen(block, inst),
.clz => try sema.zirBitCount(block, inst, .clz, Value.clz),
.ctz => try sema.zirBitCount(block, inst, .ctz, Value.ctz),
@@ -1059,15 +1060,16 @@ fn analyzeBodyInner(
.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon),
.error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func),
- .add => try sema.zirArithmetic(block, inst, .add),
- .addwrap => try sema.zirArithmetic(block, inst, .addwrap),
- .add_sat => try sema.zirArithmetic(block, inst, .add_sat),
- .mul => try sema.zirArithmetic(block, inst, .mul),
- .mulwrap => try sema.zirArithmetic(block, inst, .mulwrap),
- .mul_sat => try sema.zirArithmetic(block, inst, .mul_sat),
- .sub => try sema.zirArithmetic(block, inst, .sub),
- .subwrap => try sema.zirArithmetic(block, inst, .subwrap),
- .sub_sat => try sema.zirArithmetic(block, inst, .sub_sat),
+ .add => try sema.zirArithmetic(block, inst, .add, true),
+ .addwrap => try sema.zirArithmetic(block, inst, .addwrap, true),
+ .add_sat => try sema.zirArithmetic(block, inst, .add_sat, true),
+ .add_unsafe=> try sema.zirArithmetic(block, inst, .add_unsafe, false),
+ .mul => try sema.zirArithmetic(block, inst, .mul, true),
+ .mulwrap => try sema.zirArithmetic(block, inst, .mulwrap, true),
+ .mul_sat => try sema.zirArithmetic(block, inst, .mul_sat, true),
+ .sub => try sema.zirArithmetic(block, inst, .sub, true),
+ .subwrap => try sema.zirArithmetic(block, inst, .subwrap, true),
+ .sub_sat => try sema.zirArithmetic(block, inst, .sub_sat, true),
.div => try sema.zirDiv(block, inst),
.div_exact => try sema.zirDivExact(block, inst),
@@ -3377,26 +3379,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
else
object_ty;
- if (!array_ty.isIndexable()) {
- const msg = msg: {
- const msg = try sema.errMsg(
- block,
- src,
- "type '{}' does not support indexing",
- .{array_ty.fmt(sema.mod)},
- );
- errdefer msg.destroy(sema.gpa);
- try sema.errNote(
- block,
- src,
- msg,
- "for loop operand must be an array, slice, tuple, or vector",
- .{},
- );
- break :msg msg;
- };
- return sema.failWithOwnedErrorMsg(msg);
- }
+ try checkIndexable(sema, block, src, array_ty);
return sema.fieldVal(block, src, object, "len", src);
}
@@ -3819,7 +3802,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const dummy_ptr = try trash_block.addTy(.alloc, mut_final_ptr_ty);
const empty_trash_count = trash_block.instructions.items.len;
- for (placeholders) |bitcast_inst, i| {
+ for (placeholders, 0..) |bitcast_inst, i| {
const sub_ptr_ty = sema.typeOf(Air.indexToRef(bitcast_inst));
if (mut_final_ptr_ty.eql(sub_ptr_ty, sema.mod)) {
@@ -3919,6 +3902,121 @@ fn zirFieldBasePtr(
return sema.failWithStructInitNotSupported(block, src, sema.typeOf(start_ptr).childType());
}
+fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const gpa = sema.gpa;
+ const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
+ const args = sema.code.refSlice(extra.end, extra.data.operands_len);
+ const src = inst_data.src();
+
+ var len: Air.Inst.Ref = .none;
+ var len_val: ?Value = null;
+ var len_idx: u32 = undefined;
+ var any_runtime = false;
+
+ const runtime_arg_lens = try gpa.alloc(Air.Inst.Ref, args.len);
+ defer gpa.free(runtime_arg_lens);
+
+ // First pass to look for comptime values.
+ for (args, 0..) |zir_arg, i_usize| {
+ const i = @intCast(u32, i_usize);
+ runtime_arg_lens[i] = .none;
+ if (zir_arg == .none) continue;
+ const object = try sema.resolveInst(zir_arg);
+ const object_ty = sema.typeOf(object);
+ // Each arg could be an indexable, or a range, in which case the length
+ // is passed directly as an integer.
+ const is_int = switch (object_ty.zigTypeTag()) {
+ .Int, .ComptimeInt => true,
+ else => false,
+ };
+ const arg_src: LazySrcLoc = .{ .for_input = .{
+ .for_node_offset = inst_data.src_node,
+ .input_index = i,
+ } };
+ const arg_len_uncoerced = if (is_int) object else l: {
+ try checkIndexable(sema, block, arg_src, object_ty);
+ if (!object_ty.indexableHasLen()) continue;
+
+ break :l try sema.fieldVal(block, arg_src, object, "len", arg_src);
+ };
+ const arg_len = try sema.coerce(block, Type.usize, arg_len_uncoerced, arg_src);
+ if (len == .none) {
+ len = arg_len;
+ len_idx = i;
+ }
+ if (try sema.resolveDefinedValue(block, src, arg_len)) |arg_val| {
+ if (len_val) |v| {
+ if (!(try sema.valuesEqual(arg_val, v, Type.usize))) {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "non-matching for loop lengths", .{});
+ errdefer msg.destroy(gpa);
+ const a_src: LazySrcLoc = .{ .for_input = .{
+ .for_node_offset = inst_data.src_node,
+ .input_index = len_idx,
+ } };
+ try sema.errNote(block, a_src, msg, "length {} here", .{
+ v.fmtValue(Type.usize, sema.mod),
+ });
+ try sema.errNote(block, arg_src, msg, "length {} here", .{
+ arg_val.fmtValue(Type.usize, sema.mod),
+ });
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+ } else {
+ len = arg_len;
+ len_val = arg_val;
+ len_idx = i;
+ }
+ continue;
+ }
+ runtime_arg_lens[i] = arg_len;
+ any_runtime = true;
+ }
+
+ if (len == .none) {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "unbounded for loop", .{});
+ errdefer msg.destroy(gpa);
+ for (args, 0..) |zir_arg, i_usize| {
+ const i = @intCast(u32, i_usize);
+ if (zir_arg == .none) continue;
+ const object = try sema.resolveInst(zir_arg);
+ const object_ty = sema.typeOf(object);
+ // Each arg could be an indexable, or a range, in which case the length
+ // is passed directly as an integer.
+ switch (object_ty.zigTypeTag()) {
+ .Int, .ComptimeInt => continue,
+ else => {},
+ }
+ const arg_src: LazySrcLoc = .{ .for_input = .{
+ .for_node_offset = inst_data.src_node,
+ .input_index = i,
+ } };
+ try sema.errNote(block, arg_src, msg, "type '{}' has no upper bound", .{
+ object_ty.fmt(sema.mod),
+ });
+ }
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+
+ // Now for the runtime checks.
+ if (any_runtime and block.wantSafety()) {
+ for (runtime_arg_lens, 0..) |arg_len, i| {
+ if (arg_len == .none) continue;
+ if (i == len_idx) continue;
+ const ok = try block.addBinOp(.cmp_eq, len, arg_len);
+ try sema.addSafetyCheck(block, ok, .for_len_mismatch);
+ }
+ }
+
+ return len;
+}
+
fn validateArrayInitTy(
sema: *Sema,
block: *Block,
@@ -4198,7 +4296,7 @@ fn validateStructInit(
// In this case the only thing we need to do is evaluate the implicit
// store instructions for default field values, and report any missing fields.
// Avoid the cost of the extra machinery for detecting a comptime struct init value.
- for (found_fields) |field_ptr, i| {
+ for (found_fields, 0..) |field_ptr, i| {
if (field_ptr != 0) continue;
const default_val = struct_ty.structFieldDefaultValue(i);
@@ -4264,7 +4362,7 @@ fn validateStructInit(
// ends up being comptime-known.
const field_values = try sema.arena.alloc(Value, struct_ty.structFieldCount());
- field: for (found_fields) |field_ptr, i| {
+ field: for (found_fields, 0..) |field_ptr, i| {
if (field_ptr != 0) {
// Determine whether the value stored to this pointer is comptime-known.
const field_ty = struct_ty.structFieldType(i);
@@ -4397,7 +4495,7 @@ fn validateStructInit(
try sema.resolveStructLayout(struct_ty);
// Our task is to insert `store` instructions for all the default field values.
- for (found_fields) |field_ptr, i| {
+ for (found_fields, 0..) |field_ptr, i| {
if (field_ptr != 0) continue;
const field_src = init_src; // TODO better source location
@@ -4472,7 +4570,7 @@ fn zirValidateArrayInit(
// any ZIR instructions at comptime; we need to do that here.
if (array_ty.sentinel()) |sentinel_val| {
const array_len_ref = try sema.addIntUnsigned(Type.usize, array_len);
- const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true);
+ const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true);
const sentinel = try sema.addConstant(array_ty.childType(), sentinel_val);
try sema.storePtr2(block, init_src, sentinel_ptr, init_src, sentinel, init_src, .store);
}
@@ -4491,7 +4589,7 @@ fn zirValidateArrayInit(
const air_tags = sema.air_instructions.items(.tag);
const air_datas = sema.air_instructions.items(.data);
- outer: for (instrs) |elem_ptr, i| {
+ outer: for (instrs, 0..) |elem_ptr, i| {
// Determine whether the value stored to this pointer is comptime-known.
if (array_ty.isTuple()) {
@@ -5010,7 +5108,7 @@ fn zirCompileLog(
const src_node = extra.data.src_node;
const args = sema.code.refSlice(extra.end, extended.small);
- for (args) |arg_ref, i| {
+ for (args, 0..) |arg_ref, i| {
if (i != 0) try writer.print(", ", .{});
const arg = try sema.resolveInst(arg_ref);
@@ -6228,7 +6326,7 @@ const GenericCallAdapter = struct {
if (ctx.generic_fn.owner_decl != other_key.generic_owner_decl.unwrap().?) return false;
const other_comptime_args = other_key.comptime_args.?;
- for (other_comptime_args[0..ctx.func_ty_info.param_types.len]) |other_arg, i| {
+ for (other_comptime_args[0..ctx.func_ty_info.param_types.len], 0..) |other_arg, i| {
const this_arg = ctx.args[i];
const this_is_comptime = this_arg.val.tag() != .generic_poison;
const other_is_comptime = other_arg.val.tag() != .generic_poison;
@@ -6744,7 +6842,7 @@ fn analyzeCall(
assert(!func_ty_info.is_generic);
const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len);
- for (uncasted_args) |uncasted_arg, i| {
+ for (uncasted_args, 0..) |uncasted_arg, i| {
if (i < fn_params_len) {
const opts: CoerceOpts = .{ .param_src = .{
.func_inst = func,
@@ -7519,7 +7617,7 @@ fn resolveGenericInstantiationType(
fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
if (!ty.isSimpleTupleOrAnonStruct()) return;
const tuple = ty.tupleFields();
- for (tuple.values) |field_val, i| {
+ for (tuple.values, 0..) |field_val, i| {
try sema.resolveTupleLazyValues(block, src, tuple.types[i]);
if (field_val.tag() == .unreachable_value) continue;
try sema.resolveLazyValue(field_val);
@@ -8593,7 +8691,7 @@ fn funcCommon(
const cc_resolved = cc orelse .Unspecified;
const param_types = try sema.arena.alloc(Type, block.params.items.len);
const comptime_params = try sema.arena.alloc(bool, block.params.items.len);
- for (block.params.items) |param, i| {
+ for (block.params.items, 0..) |param, i| {
const is_noalias = blk: {
const index = std.math.cast(u5, i) orelse break :blk false;
break :blk @truncate(u1, noalias_bits >> index) != 0;
@@ -8702,7 +8800,7 @@ fn funcCommon(
const tags = sema.code.instructions.items(.tag);
const data = sema.code.instructions.items(.data);
const param_body = sema.code.getParamBody(func_inst);
- for (block.params.items) |param, i| {
+ for (block.params.items, 0..) |param, i| {
if (!param.is_comptime) {
const param_index = param_body[i];
const param_src = switch (tags[param_index]) {
@@ -9619,7 +9717,7 @@ fn zirElemVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
- return sema.elemVal(block, src, array, elem_index, src);
+ return sema.elemVal(block, src, array, elem_index, src, false);
}
fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9632,7 +9730,7 @@ fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
- return sema.elemVal(block, src, array, elem_index, elem_index_src);
+ return sema.elemVal(block, src, array, elem_index, elem_index_src, true);
}
fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9644,7 +9742,22 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
- return sema.elemPtr(block, src, array_ptr, elem_index, src, false);
+ const indexable_ty = sema.typeOf(array_ptr);
+ if (indexable_ty.zigTypeTag() != .Pointer) {
+ const capture_src: LazySrcLoc = .{ .for_capture_from_input = inst_data.src_node };
+ const msg = msg: {
+ const msg = try sema.errMsg(block, capture_src, "pointer capture of non pointer type '{}'", .{
+ indexable_ty.fmt(sema.mod),
+ });
+ errdefer msg.destroy(sema.gpa);
+ if (indexable_ty.zigTypeTag() == .Array) {
+ try sema.errNote(block, src, msg, "consider using '&' here", .{});
+ }
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+ return sema.elemPtrOneLayerOnly(block, src, array_ptr, elem_index, src, false, false);
}
fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9657,7 +9770,7 @@ fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
- return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src, false);
+ return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src, false, true);
}
fn zirElemPtrImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9669,7 +9782,7 @@ fn zirElemPtrImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const extra = sema.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.ptr);
const elem_index = try sema.addIntUnsigned(Type.usize, extra.index);
- return sema.elemPtr(block, src, array_ptr, elem_index, src, true);
+ return sema.elemPtr(block, src, array_ptr, elem_index, src, true, true);
}
fn zirSliceStart(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9801,7 +9914,7 @@ fn zirSwitchCapture(
const first_field_index = @intCast(u32, operand_ty.unionTagFieldIndex(first_item_val, sema.mod).?);
const first_field = union_obj.fields.values()[first_field_index];
- for (items[1..]) |item, i| {
+ for (items[1..], 0..) |item, i| {
const item_ref = try sema.resolveInst(item);
// Previous switch validation ensured this will succeed
const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable;
@@ -10131,7 +10244,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + body_len;
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItemEnum(
block,
seen_enum_fields,
@@ -10165,7 +10278,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
.{},
);
errdefer msg.destroy(sema.gpa);
- for (seen_enum_fields) |seen_src, i| {
+ for (seen_enum_fields, 0..) |seen_src, i| {
if (seen_src != null) continue;
const field_name = operand_ty.enumFieldName(i);
@@ -10227,7 +10340,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + body_len;
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItemError(
block,
&seen_errors,
@@ -10369,7 +10482,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len;
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItem(
block,
&range_set,
@@ -10464,7 +10577,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + body_len;
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItemBool(
block,
&true_count,
@@ -10548,7 +10661,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + body_len;
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItemSparse(
block,
&seen_values,
@@ -10859,7 +10972,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
}
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
cases_len += 1;
const item = try sema.resolveInst(item_ref);
@@ -11045,7 +11158,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
operand_ty.fmt(sema.mod),
});
}
- for (seen_enum_fields) |f, i| {
+ for (seen_enum_fields, 0..) |f, i| {
if (f != null) continue;
cases_len += 1;
@@ -11188,7 +11301,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
const analyze_body = if (union_originally and !special.is_inline)
- for (seen_enum_fields) |seen_field, index| {
+ for (seen_enum_fields, 0..) |seen_field, index| {
if (seen_field != null) continue;
const union_obj = maybe_union_ty.cast(Type.Payload.Union).?.data;
const field_ty = union_obj.fields.values()[index].ty;
@@ -12168,7 +12281,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen());
var elem_val_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_val_buf);
elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod);
}
@@ -12434,14 +12547,14 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
while (elem_i < lhs_len) : (elem_i += 1) {
const elem_index = try sema.addIntUnsigned(Type.usize, elem_i);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
- const init = try sema.elemVal(block, lhs_src, lhs, elem_index, src);
+ const init = try sema.elemVal(block, lhs_src, lhs, elem_index, src, true);
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
}
while (elem_i < result_len) : (elem_i += 1) {
const elem_index = try sema.addIntUnsigned(Type.usize, elem_i);
const rhs_index = try sema.addIntUnsigned(Type.usize, elem_i - lhs_len);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
- const init = try sema.elemVal(block, rhs_src, rhs, rhs_index, src);
+ const init = try sema.elemVal(block, rhs_src, rhs, rhs_index, src, true);
try sema.storePtr2(block, src, elem_ptr, src, init, rhs_src, .store);
}
if (res_sent_val) |sent_val| {
@@ -12459,12 +12572,12 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
var elem_i: usize = 0;
while (elem_i < lhs_len) : (elem_i += 1) {
const index = try sema.addIntUnsigned(Type.usize, elem_i);
- const init = try sema.elemVal(block, lhs_src, lhs, index, src);
+ const init = try sema.elemVal(block, lhs_src, lhs, index, src, true);
element_refs[elem_i] = try sema.coerce(block, resolved_elem_ty, init, lhs_src);
}
while (elem_i < result_len) : (elem_i += 1) {
const index = try sema.addIntUnsigned(Type.usize, elem_i - lhs_len);
- const init = try sema.elemVal(block, rhs_src, rhs, index, src);
+ const init = try sema.elemVal(block, rhs_src, rhs, index, src, true);
element_refs[elem_i] = try sema.coerce(block, resolved_elem_ty, init, rhs_src);
}
}
@@ -12684,7 +12797,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
elem_i += 1;
const lhs_index = try sema.addIntUnsigned(Type.usize, lhs_i);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
- const init = try sema.elemVal(block, lhs_src, lhs, lhs_index, src);
+ const init = try sema.elemVal(block, lhs_src, lhs, lhs_index, src, true);
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
}
}
@@ -12704,7 +12817,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
var lhs_i: usize = 0;
while (lhs_i < lhs_len) : (lhs_i += 1) {
const lhs_index = try sema.addIntUnsigned(Type.usize, lhs_i);
- const init = try sema.elemVal(block, lhs_src, lhs, lhs_index, src);
+ const init = try sema.elemVal(block, lhs_src, lhs, lhs_index, src, true);
element_refs[elem_i] = init;
elem_i += 1;
}
@@ -12776,6 +12889,7 @@ fn zirArithmetic(
block: *Block,
inst: Zir.Inst.Index,
zir_tag: Zir.Inst.Tag,
+ safety: bool,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -12788,7 +12902,7 @@ fn zirArithmetic(
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
- return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, sema.src, lhs_src, rhs_src, true);
+ return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, sema.src, lhs_src, rhs_src, safety);
}
fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -13612,7 +13726,7 @@ fn intRem(
) CompileError!Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -14139,7 +14253,7 @@ fn analyzeArithmetic(
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: {
switch (zir_tag) {
- .add => {
+ .add, .add_unsafe => {
// For integers:intAddSat
// If either of the operands are zero, then the other operand is
// returned, even if it is undefined.
@@ -14722,7 +14836,7 @@ fn zirAsm(
const outputs = try sema.arena.alloc(ConstraintName, outputs_len);
var expr_ty = Air.Inst.Ref.void_type;
- for (out_args) |*arg, out_i| {
+ for (out_args, 0..) |*arg, out_i| {
const output = sema.code.extraData(Zir.Inst.Asm.Output, extra_i);
extra_i = output.end;
@@ -14749,7 +14863,7 @@ fn zirAsm(
const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len);
const inputs = try sema.arena.alloc(ConstraintName, inputs_len);
- for (args) |*arg, arg_i| {
+ for (args, 0..) |*arg, arg_i| {
const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i);
extra_i = input.end;
@@ -15473,7 +15587,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
defer params_anon_decl.deinit();
const param_vals = try params_anon_decl.arena().alloc(Value, info.param_types.len);
- for (param_vals) |*param_val, i| {
+ for (param_vals, 0..) |*param_val, i| {
const param_ty = info.param_types[i];
const is_generic = param_ty.tag() == .generic_poison;
const param_ty_val = if (is_generic)
@@ -15717,7 +15831,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const error_field_vals: ?[]Value = if (ty.isAnyError()) null else blk: {
const names = ty.errorSetNames();
const vals = try fields_anon_decl.arena().alloc(Value, names.len);
- for (vals) |*field_val, i| {
+ for (vals, 0..) |*field_val, i| {
const name = names[i];
const name_val = v: {
var anon_decl = try block.startAnonDecl();
@@ -15819,7 +15933,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const enum_fields = ty.enumFields();
const enum_field_vals = try fields_anon_decl.arena().alloc(Value, enum_fields.count());
- for (enum_field_vals) |*field_val, i| {
+ for (enum_field_vals, 0..) |*field_val, i| {
var tag_val_payload: Value.Payload.U32 = .{
.base = .{ .tag = .enum_field_index },
.data = @intCast(u32, i),
@@ -15916,7 +16030,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const union_fields = union_ty.unionFields();
const union_field_vals = try fields_anon_decl.arena().alloc(Value, union_fields.count());
- for (union_field_vals) |*field_val, i| {
+ for (union_field_vals, 0..) |*field_val, i| {
const field = union_fields.values()[i];
const name = union_fields.keys()[i];
const name_val = v: {
@@ -16025,7 +16139,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const tuple = struct_ty.tupleFields();
const field_types = tuple.types;
const struct_field_vals = try fields_anon_decl.arena().alloc(Value, field_types.len);
- for (struct_field_vals) |*struct_field_val, i| {
+ for (struct_field_vals, 0..) |*struct_field_val, i| {
const field_ty = field_types[i];
const name_val = v: {
var anon_decl = try block.startAnonDecl();
@@ -16069,7 +16183,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const struct_fields = struct_ty.structFields();
const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_fields.count());
- for (struct_field_vals) |*field_val, i| {
+ for (struct_field_vals, 0..) |*field_val, i| {
const field = struct_fields.values()[i];
const name = struct_fields.keys()[i];
const name_val = v: {
@@ -16408,7 +16522,7 @@ fn zirTypeofPeer(
const inst_list = try sema.gpa.alloc(Air.Inst.Ref, args.len);
defer sema.gpa.free(inst_list);
- for (args) |arg_ref, i| {
+ for (args, 0..) |arg_ref, i| {
inst_list[i] = try sema.resolveInst(arg_ref);
}
@@ -17519,7 +17633,7 @@ fn finishStructInit(
if (struct_ty.isAnonStruct()) {
const struct_obj = struct_ty.castTag(.anon_struct).?.data;
- for (struct_obj.values) |default_val, i| {
+ for (struct_obj.values, 0..) |default_val, i| {
if (field_inits[i] != .none) continue;
if (default_val.tag() == .unreachable_value) {
@@ -17555,7 +17669,7 @@ fn finishStructInit(
}
} else {
const struct_obj = struct_ty.castTag(.@"struct").?.data;
- for (struct_obj.fields.values()) |field, i| {
+ for (struct_obj.fields.values(), 0..) |field, i| {
if (field_inits[i] != .none) continue;
if (field.default_val.tag() == .unreachable_value) {
@@ -17596,7 +17710,7 @@ fn finishStructInit(
if (is_comptime) {
const values = try sema.arena.alloc(Value, field_inits.len);
- for (field_inits) |field_init, i| {
+ for (field_inits, 0..) |field_init, i| {
values[i] = (sema.resolveMaybeUndefVal(field_init) catch unreachable).?;
}
const struct_val = try Value.Tag.aggregate.create(sema.arena, values);
@@ -17611,7 +17725,7 @@ fn finishStructInit(
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
const alloc = try block.addTy(.alloc, alloc_ty);
- for (field_inits) |field_init, i_usize| {
+ for (field_inits, 0..) |field_init, i_usize| {
const i = @intCast(u32, i_usize);
const field_src = dest_src;
const field_ptr = try sema.structFieldPtrByIndex(block, dest_src, alloc, i, field_src, struct_ty, true);
@@ -17644,7 +17758,7 @@ fn zirStructInitAnon(
const opt_runtime_index = rs: {
var runtime_index: ?usize = null;
var extra_index = extra.end;
- for (types) |*field_ty, i| {
+ for (types, 0..) |*field_ty, i| {
const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index);
extra_index = item.end;
@@ -17718,7 +17832,7 @@ fn zirStructInitAnon(
});
const alloc = try block.addTy(.alloc, alloc_ty);
var extra_index = extra.end;
- for (types) |field_ty, i_usize| {
+ for (types, 0..) |field_ty, i_usize| {
const i = @intCast(u32, i_usize);
const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index);
extra_index = item.end;
@@ -17740,7 +17854,7 @@ fn zirStructInitAnon(
const element_refs = try sema.arena.alloc(Air.Inst.Ref, types.len);
var extra_index = extra.end;
- for (types) |_, i| {
+ for (types, 0..) |_, i| {
const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index);
extra_index = item.end;
element_refs[i] = try sema.resolveInst(item.data.init);
@@ -17768,7 +17882,7 @@ fn zirArrayInit(
const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len - 1 + @boolToInt(sentinel_val != null));
defer gpa.free(resolved_args);
- for (args[1..]) |arg, i| {
+ for (args[1..], 0..) |arg, i| {
const resolved_arg = try sema.resolveInst(arg);
const elem_ty = if (array_ty.zigTypeTag() == .Struct)
array_ty.structFieldType(i)
@@ -17789,7 +17903,7 @@ fn zirArrayInit(
resolved_args[resolved_args.len - 1] = try sema.addConstant(array_ty.elemType2(), some);
}
- const opt_runtime_index: ?u32 = for (resolved_args) |arg, i| {
+ const opt_runtime_index: ?u32 = for (resolved_args, 0..) |arg, i| {
const comptime_known = try sema.isComptimeKnown(arg);
if (!comptime_known) break @intCast(u32, i);
} else null;
@@ -17797,7 +17911,7 @@ fn zirArrayInit(
const runtime_index = opt_runtime_index orelse {
const elem_vals = try sema.arena.alloc(Value, resolved_args.len);
- for (resolved_args) |arg, i| {
+ for (resolved_args, 0..) |arg, i| {
// We checked that all args are comptime above.
elem_vals[i] = (sema.resolveMaybeUndefVal(arg) catch unreachable).?;
}
@@ -17826,7 +17940,7 @@ fn zirArrayInit(
const alloc = try block.addTy(.alloc, alloc_ty);
if (array_ty.isTuple()) {
- for (resolved_args) |arg, i| {
+ for (resolved_args, 0..) |arg, i| {
const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.mutable = true,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
@@ -17848,7 +17962,7 @@ fn zirArrayInit(
});
const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty);
- for (resolved_args) |arg, i| {
+ for (resolved_args, 0..) |arg, i| {
const index = try sema.addIntUnsigned(Type.usize, i);
const elem_ptr = try block.addPtrElemPtrTypeRef(alloc, index, elem_ptr_ty_ref);
_ = try block.addBinOp(.store, elem_ptr, arg);
@@ -17875,7 +17989,7 @@ fn zirArrayInitAnon(
const opt_runtime_src = rs: {
var runtime_src: ?LazySrcLoc = null;
- for (operands) |operand, i| {
+ for (operands, 0..) |operand, i| {
const operand_src = src; // TODO better source location
const elem = try sema.resolveInst(operand);
types[i] = sema.typeOf(elem);
@@ -17918,7 +18032,7 @@ fn zirArrayInitAnon(
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
const alloc = try block.addTy(.alloc, alloc_ty);
- for (operands) |operand, i_usize| {
+ for (operands, 0..) |operand, i_usize| {
const i = @intCast(u32, i_usize);
const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.mutable = true,
@@ -17935,7 +18049,7 @@ fn zirArrayInitAnon(
}
const element_refs = try sema.arena.alloc(Air.Inst.Ref, operands.len);
- for (operands) |operand, i| {
+ for (operands, 0..) |operand, i| {
element_refs[i] = try sema.resolveInst(operand);
}
@@ -18138,7 +18252,7 @@ fn zirUnaryMath(
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
elem.* = try eval(elem_val, scalar_ty, sema.arena, sema.mod);
}
@@ -19142,7 +19256,7 @@ fn reifyStruct(
if (layout == .Packed) {
struct_obj.status = .layout_wip;
- for (struct_obj.fields.values()) |field, index| {
+ for (struct_obj.fields.values(), 0..) |field, index| {
sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
@@ -19771,7 +19885,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, operand_ty.vectorLen());
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod);
}
@@ -19873,7 +19987,7 @@ fn zirBitCount(
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
const scalar_ty = operand_ty.scalarType();
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
const count = comptimeOp(elem_val, scalar_ty, target);
elem.* = try Value.Tag.int_u64.create(sema.arena, count);
@@ -19942,7 +20056,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const vec_len = operand_ty.vectorLen();
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
elem.* = try elem_val.byteSwap(operand_ty, target, sema.arena);
}
@@ -19991,7 +20105,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const vec_len = operand_ty.vectorLen();
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
elem.* = try elem_val.bitReverse(scalar_ty, target, sema.arena);
}
@@ -20060,7 +20174,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
.Packed => {
var bit_sum: u64 = 0;
const fields = ty.structFields();
- for (fields.values()) |field, i| {
+ for (fields.values(), 0..) |field, i| {
if (i == field_index) {
return bit_sum;
}
@@ -20997,7 +21111,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
var buf: Value.ElemValueBuffer = undefined;
const elems = try sema.gpa.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const pred_elem_val = pred_val.elemValueBuffer(sema.mod, i, &buf);
const should_choose_a = pred_elem_val.toBool();
if (should_choose_a) {
@@ -21347,12 +21461,12 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
func = bound_data.func_inst;
resolved_args = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount() + 1);
resolved_args[0] = bound_data.arg0_inst;
- for (resolved_args[1..]) |*resolved, i| {
+ for (resolved_args[1..], 0..) |*resolved, i| {
resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(u32, i), args_ty);
}
} else {
resolved_args = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount());
- for (resolved_args) |*resolved, i| {
+ for (resolved_args, 0..) |*resolved, i| {
resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(u32, i), args_ty);
}
}
@@ -21507,7 +21621,7 @@ fn analyzeMinMax(
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const lhs_elem_val = lhs_val.elemValueBuffer(sema.mod, i, &lhs_buf);
const rhs_elem_val = rhs_val.elemValueBuffer(sema.mod, i, &rhs_buf);
elem.* = opFunc(lhs_elem_val, rhs_elem_val, target);
@@ -22404,7 +22518,7 @@ fn explainWhyTypeIsComptimeInner(
if (ty.castTag(.@"struct")) |payload| {
const struct_obj = payload.data;
- for (struct_obj.fields.values()) |field, i| {
+ for (struct_obj.fields.values(), 0..) |field, i| {
const field_src_loc = struct_obj.fieldSrcLoc(sema.mod, .{
.index = i,
.range = .type,
@@ -22424,7 +22538,7 @@ fn explainWhyTypeIsComptimeInner(
if (ty.cast(Type.Payload.Union)) |payload| {
const union_obj = payload.data;
- for (union_obj.fields.values()) |field, i| {
+ for (union_obj.fields.values(), 0..) |field, i| {
const field_src_loc = union_obj.fieldSrcLoc(sema.mod, .{
.index = i,
.range = .type,
@@ -22676,6 +22790,7 @@ pub const PanicId = enum {
unwrap_error,
index_out_of_bounds,
start_index_greater_than_end,
+ for_len_mismatch,
};
fn addSafetyCheck(
@@ -23694,7 +23809,7 @@ fn structFieldPtrByIndex(
comptime assert(Type.packed_struct_layout_version == 2);
var running_bits: u16 = 0;
- for (struct_obj.fields.values()) |f, i| {
+ for (struct_obj.fields.values(), 0..) |f, i| {
if (!(try sema.typeHasRuntimeBits(f.ty))) continue;
if (i == field_index) {
@@ -24057,6 +24172,7 @@ fn elemPtr(
elem_index: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
init: bool,
+ oob_safety: bool,
) CompileError!Air.Inst.Ref {
const indexable_ptr_src = src; // TODO better source location
const indexable_ptr_ty = sema.typeOf(indexable_ptr);
@@ -24065,46 +24181,61 @@ fn elemPtr(
.Pointer => indexable_ptr_ty.elemType(),
else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(sema.mod)}),
};
- if (!indexable_ty.isIndexable()) {
- return sema.fail(block, src, "element access of non-indexable type '{}'", .{indexable_ty.fmt(sema.mod)});
- }
-
switch (indexable_ty.zigTypeTag()) {
- .Pointer => {
- // In all below cases, we have to deref the ptr operand to get the actual indexable pointer.
- const indexable = try sema.analyzeLoad(block, indexable_ptr_src, indexable_ptr, indexable_ptr_src);
- switch (indexable_ty.ptrSize()) {
- .Slice => return sema.elemPtrSlice(block, src, indexable_ptr_src, indexable, elem_index_src, elem_index),
- .Many, .C => {
- const maybe_ptr_val = try sema.resolveDefinedValue(block, indexable_ptr_src, indexable);
- const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
- const runtime_src = rs: {
- const ptr_val = maybe_ptr_val orelse break :rs indexable_ptr_src;
- const index_val = maybe_index_val orelse break :rs elem_index_src;
- const index = @intCast(usize, index_val.toUnsignedInt(target));
- const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, sema.mod);
- const result_ty = try sema.elemPtrType(indexable_ty, index);
- return sema.addConstant(result_ty, elem_ptr);
- };
- const result_ty = try sema.elemPtrType(indexable_ty, null);
-
- try sema.requireRuntimeBlock(block, src, runtime_src);
- return block.addPtrElemPtr(indexable, elem_index, result_ty);
- },
- .One => {
- assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable
- return sema.elemPtrArray(block, src, indexable_ptr_src, indexable, elem_index_src, elem_index, init);
- },
- }
- },
- .Array, .Vector => return sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init),
+ .Array, .Vector => return sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety),
.Struct => {
// Tuple field access.
const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known");
const index = @intCast(u32, index_val.toUnsignedInt(target));
return sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init);
},
- else => unreachable,
+ else => {
+ const indexable = try sema.analyzeLoad(block, indexable_ptr_src, indexable_ptr, indexable_ptr_src);
+ return elemPtrOneLayerOnly(sema, block, src, indexable, elem_index, elem_index_src, init, oob_safety);
+ },
+ }
+}
+
+/// Asserts that the type of indexable is pointer.
+fn elemPtrOneLayerOnly(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ indexable: Air.Inst.Ref,
+ elem_index: Air.Inst.Ref,
+ elem_index_src: LazySrcLoc,
+ init: bool,
+ oob_safety: bool,
+) CompileError!Air.Inst.Ref {
+ const indexable_src = src; // TODO better source location
+ const indexable_ty = sema.typeOf(indexable);
+ if (!indexable_ty.isIndexable()) {
+ return sema.fail(block, src, "element access of non-indexable type '{}'", .{indexable_ty.fmt(sema.mod)});
+ }
+ const target = sema.mod.getTarget();
+
+ switch (indexable_ty.ptrSize()) {
+ .Slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
+ .Many, .C => {
+ const maybe_ptr_val = try sema.resolveDefinedValue(block, indexable_src, indexable);
+ const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
+ const runtime_src = rs: {
+ const ptr_val = maybe_ptr_val orelse break :rs indexable_src;
+ const index_val = maybe_index_val orelse break :rs elem_index_src;
+ const index = @intCast(usize, index_val.toUnsignedInt(target));
+ const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, sema.mod);
+ const result_ty = try sema.elemPtrType(indexable_ty, index);
+ return sema.addConstant(result_ty, elem_ptr);
+ };
+ const result_ty = try sema.elemPtrType(indexable_ty, null);
+
+ try sema.requireRuntimeBlock(block, src, runtime_src);
+ return block.addPtrElemPtr(indexable, elem_index, result_ty);
+ },
+ .One => {
+ assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable
+ return sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety);
+ },
}
}
@@ -24115,6 +24246,7 @@ fn elemVal(
indexable: Air.Inst.Ref,
elem_index_uncasted: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
+ oob_safety: bool,
) CompileError!Air.Inst.Ref {
const indexable_src = src; // TODO better source location
const indexable_ty = sema.typeOf(indexable);
@@ -24130,7 +24262,7 @@ fn elemVal(
switch (indexable_ty.zigTypeTag()) {
.Pointer => switch (indexable_ty.ptrSize()) {
- .Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index),
+ .Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
.Many, .C => {
const maybe_indexable_val = try sema.resolveDefinedValue(block, indexable_src, indexable);
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
@@ -24151,14 +24283,14 @@ fn elemVal(
},
.One => {
assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable
- const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false);
+ const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety);
return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src);
},
},
- .Array => return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index),
+ .Array => return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
.Vector => {
// TODO: If the index is a vector, the result should be a vector.
- return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index);
+ return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety);
},
.Struct => {
// Tuple field access.
@@ -24303,6 +24435,7 @@ fn elemValArray(
array: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
elem_index: Air.Inst.Ref,
+ oob_safety: bool,
) CompileError!Air.Inst.Ref {
const array_ty = sema.typeOf(array);
const array_sent = array_ty.sentinel();
@@ -24346,7 +24479,7 @@ fn elemValArray(
const runtime_src = if (maybe_undef_array_val != null) elem_index_src else array_src;
try sema.requireRuntimeBlock(block, src, runtime_src);
- if (block.wantSafety()) {
+ if (oob_safety and block.wantSafety()) {
// Runtime check is only needed if unable to comptime check
if (maybe_index_val == null) {
const len_inst = try sema.addIntUnsigned(Type.usize, array_len);
@@ -24366,6 +24499,7 @@ fn elemPtrArray(
elem_index_src: LazySrcLoc,
elem_index: Air.Inst.Ref,
init: bool,
+ oob_safety: bool,
) CompileError!Air.Inst.Ref {
const target = sema.mod.getTarget();
const array_ptr_ty = sema.typeOf(array_ptr);
@@ -24409,7 +24543,7 @@ fn elemPtrArray(
try sema.requireRuntimeBlock(block, src, runtime_src);
// Runtime check is only needed if unable to comptime check.
- if (block.wantSafety() and offset == null) {
+ if (oob_safety and block.wantSafety() and offset == null) {
const len_inst = try sema.addIntUnsigned(Type.usize, array_len);
const cmp_op: Air.Inst.Tag = if (array_sent) .cmp_lte else .cmp_lt;
try sema.panicIndexOutOfBounds(block, elem_index, len_inst, cmp_op);
@@ -24426,6 +24560,7 @@ fn elemValSlice(
slice: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
elem_index: Air.Inst.Ref,
+ oob_safety: bool,
) CompileError!Air.Inst.Ref {
const slice_ty = sema.typeOf(slice);
const slice_sent = slice_ty.sentinel() != null;
@@ -24462,7 +24597,7 @@ fn elemValSlice(
try sema.validateRuntimeElemAccess(block, elem_index_src, elem_ty, slice_ty, slice_src);
try sema.requireRuntimeBlock(block, src, runtime_src);
- if (block.wantSafety()) {
+ if (oob_safety and block.wantSafety()) {
const len_inst = if (maybe_slice_val) |slice_val|
try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod))
else
@@ -24482,6 +24617,7 @@ fn elemPtrSlice(
slice: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
elem_index: Air.Inst.Ref,
+ oob_safety: bool,
) CompileError!Air.Inst.Ref {
const target = sema.mod.getTarget();
const slice_ty = sema.typeOf(slice);
@@ -24519,7 +24655,7 @@ fn elemPtrSlice(
const runtime_src = if (maybe_undef_slice_val != null) elem_index_src else slice_src;
try sema.requireRuntimeBlock(block, src, runtime_src);
- if (block.wantSafety()) {
+ if (oob_safety and block.wantSafety()) {
const len_inst = len: {
if (maybe_undef_slice_val) |slice_val|
if (!slice_val.isUndef())
@@ -25980,7 +26116,7 @@ fn coerceInMemoryAllowedFns(
} };
}
- for (dest_info.param_types) |dest_param_ty, i| {
+ for (dest_info.param_types, 0..) |dest_param_ty, i| {
const src_param_ty = src_info.param_types[i];
if (dest_info.comptime_params[i] != src_info.comptime_params[i]) {
@@ -26224,7 +26360,7 @@ fn storePtr2(
const elem_src = operand_src; // TODO better source location
const elem = try sema.tupleField(block, operand_src, uncasted_operand, elem_src, i);
const elem_index = try sema.addIntUnsigned(Type.usize, i);
- const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false);
+ const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false, true);
try sema.storePtr2(block, src, elem_ptr, elem_src, elem, elem_src, .store);
}
return;
@@ -26510,7 +26646,7 @@ fn beginComptimePtrMutation(
// assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted.
assert(bytes.len >= dest_len);
const elems = try arena.alloc(Value, @intCast(usize, dest_len));
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
elem.* = try Value.Tag.int_u64.create(arena, bytes[i]);
}
@@ -26539,7 +26675,7 @@ fn beginComptimePtrMutation(
const dest_len = parent.ty.arrayLenIncludingSentinel();
const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
const elems = try arena.alloc(Value, @intCast(usize, dest_len));
- for (bytes) |byte, i| {
+ for (bytes, 0..) |byte, i| {
elems[i] = try Value.Tag.int_u64.create(arena, byte);
}
if (parent.ty.sentinel()) |sent_val| {
@@ -27510,7 +27646,7 @@ fn coerceEnumToUnion(
var msg: ?*Module.ErrorMsg = null;
errdefer if (msg) |some| some.destroy(sema.gpa);
- for (union_obj.fields.values()) |field, i| {
+ for (union_obj.fields.values(), 0..) |field, i| {
if (field.ty.zigTypeTag() == .NoReturn) {
const err_msg = msg orelse try sema.errMsg(
block,
@@ -27669,14 +27805,14 @@ fn coerceArrayLike(
const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_len);
var runtime_src: ?LazySrcLoc = null;
- for (element_vals) |*elem, i| {
+ for (element_vals, 0..) |*elem, i| {
const index_ref = try sema.addConstant(
Type.usize,
try Value.Tag.int_u64.create(sema.arena, i),
);
const src = inst_src; // TODO better source location
const elem_src = inst_src; // TODO better source location
- const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref);
+ const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref, true);
const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src);
element_refs[i] = coerced;
if (runtime_src == null) {
@@ -27731,7 +27867,7 @@ fn coerceTupleToArray(
const dest_elem_ty = dest_ty.childType();
var runtime_src: ?LazySrcLoc = null;
- for (element_vals) |*elem, i_usize| {
+ for (element_vals, 0..) |*elem, i_usize| {
const i = @intCast(u32, i_usize);
if (i_usize == inst_len) {
elem.* = dest_ty.sentinel().?;
@@ -27860,7 +27996,7 @@ fn coerceTupleToStruct(
var root_msg: ?*Module.ErrorMsg = null;
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
- for (field_refs) |*field_ref, i| {
+ for (field_refs, 0..) |*field_ref, i| {
if (field_ref.* != .none) continue;
const field_name = fields.keys()[i];
@@ -27958,7 +28094,7 @@ fn coerceTupleToTuple(
var root_msg: ?*Module.ErrorMsg = null;
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
- for (field_refs) |*field_ref, i| {
+ for (field_refs, 0..) |*field_ref, i| {
if (field_ref.* != .none) continue;
const default_val = tuple_ty.structFieldDefaultValue(i);
@@ -29334,7 +29470,7 @@ fn resolvePeerTypes(
var seen_const = false;
var convert_to_slice = false;
var chosen_i: usize = 0;
- for (instructions[1..]) |candidate, candidate_i| {
+ for (instructions[1..], 0..) |candidate, candidate_i| {
const candidate_ty = sema.typeOf(candidate);
const chosen_ty = sema.typeOf(chosen);
@@ -29993,7 +30129,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
};
struct_obj.status = .layout_wip;
- for (struct_obj.fields.values()) |field, i| {
+ for (struct_obj.fields.values(), 0..) |field, i| {
sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
@@ -30031,7 +30167,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
break :blk try decl_arena_allocator.alloc(u32, struct_obj.fields.count());
};
- for (struct_obj.fields.values()) |field, i| {
+ for (struct_obj.fields.values(), 0..) |field, i| {
optimized_order[i] = if (field.ty.hasRuntimeBits())
@intCast(u32, i)
else
@@ -30191,6 +30327,29 @@ fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_
}
}
+fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, array_ty: Type) !void {
+ if (!array_ty.isIndexable()) {
+ const msg = msg: {
+ const msg = try sema.errMsg(
+ block,
+ src,
+ "type '{}' does not support indexing",
+ .{array_ty.fmt(sema.mod)},
+ );
+ errdefer msg.destroy(sema.gpa);
+ try sema.errNote(
+ block,
+ src,
+ msg,
+ "for loop operand must be an array, slice, tuple, or vector",
+ .{},
+ );
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+}
+
fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
const resolved_ty = try sema.resolveTypeFields(ty);
const union_obj = resolved_ty.cast(Type.Payload.Union).?.data;
@@ -30213,7 +30372,7 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
};
union_obj.status = .layout_wip;
- for (union_obj.fields.values()) |field, i| {
+ for (union_obj.fields.values(), 0..) |field, i| {
sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
@@ -30361,7 +30520,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const have_comptime_val = tuple.values[i].tag() != .unreachable_value;
if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty)) {
return true;
@@ -30876,7 +31035,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
// so that init values may depend on type layout.
const bodies_index = extra_index;
- for (fields) |zir_field, field_i| {
+ for (fields, 0..) |zir_field, field_i| {
const field_ty: Type = ty: {
if (zir_field.type_ref != .none) {
break :ty sema.resolveType(&block_scope, .unneeded, zir_field.type_ref) catch |err| switch (err) {
@@ -30998,7 +31157,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
if (any_inits) {
extra_index = bodies_index;
- for (fields) |zir_field, field_i| {
+ for (fields, 0..) |zir_field, field_i| {
extra_index += zir_field.type_body_len;
extra_index += zir_field.align_body_len;
if (zir_field.init_body_len > 0) {
@@ -31718,7 +31877,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.@"struct" => {
const resolved_ty = try sema.resolveTypeFields(ty);
const s = resolved_ty.castTag(.@"struct").?.data;
- for (s.fields.values()) |field, i| {
+ for (s.fields.values(), 0..) |field, i| {
if (field.is_comptime) continue;
if (field.ty.eql(resolved_ty, sema.mod)) {
const msg = try Module.ErrorMsg.create(
@@ -31739,7 +31898,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.values) |val, i| {
+ for (tuple.values, 0..) |val, i| {
const is_comptime = val.tag() != .unreachable_value;
if (is_comptime) continue;
if ((try sema.typeHasOnePossibleValue(tuple.types[i])) != null) continue;
@@ -32379,7 +32538,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const have_comptime_val = tuple.values[i].tag() != .unreachable_value;
if (!have_comptime_val and try sema.typeRequiresComptime(field_ty)) {
return true;
@@ -32539,7 +32698,7 @@ fn anonStructFieldIndex(
field_src: LazySrcLoc,
) !u32 {
const anon_struct = struct_ty.castTag(.anon_struct).?.data;
- for (anon_struct.names) |name, i| {
+ for (anon_struct.names, 0..) |name, i| {
if (mem.eql(u8, name, field_name)) {
return @intCast(u32, i);
}
@@ -32557,7 +32716,7 @@ fn queueFullTypeResolution(sema: *Sema, ty: Type) !void {
fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -32615,7 +32774,7 @@ fn intSub(
) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -32673,7 +32832,7 @@ fn floatAdd(
) !Value {
if (float_type.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -32730,7 +32889,7 @@ fn floatSub(
) !Value {
if (float_type.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -32788,7 +32947,7 @@ fn intSubWithOverflow(
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen());
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -32842,7 +33001,7 @@ fn floatToInt(
if (float_ty.zigTypeTag() == .Vector) {
const elem_ty = float_ty.childType();
const result_data = try sema.arena.alloc(Value, float_ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(sema.mod, i, &buf);
scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType());
@@ -33042,7 +33201,7 @@ fn intFitsInType(
.aggregate => {
assert(ty.zigTypeTag() == .Vector);
- for (val.castTag(.aggregate).?.data) |elem, i| {
+ for (val.castTag(.aggregate).?.data, 0..) |elem, i| {
if (!(try sema.intFitsInType(elem, ty.scalarType(), null))) {
if (vector_index) |some| some.* = i;
return false;
@@ -33139,7 +33298,7 @@ fn intAddWithOverflow(
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen());
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -33243,7 +33402,7 @@ fn compareVector(
) !Value {
assert(ty.zigTypeTag() == .Vector);
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
diff --git a/src/Zir.zig b/src/Zir.zig
index 58f9fdff14..4dd2386c51 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -137,6 +137,8 @@ pub const Inst = struct {
/// Saturating addition.
/// Uses the `pl_node` union field. Payload is `Bin`.
add_sat,
+ /// The same as `add` except no safety check.
+ add_unsafe,
/// Arithmetic subtraction. Asserts no integer overflow.
/// Uses the `pl_node` union field. Payload is `Bin`.
sub,
@@ -382,18 +384,24 @@ pub const Inst = struct {
/// Uses the `pl_node` union field. AST node is a[b] syntax. Payload is `Bin`.
elem_ptr_node,
/// Same as `elem_ptr_node` but used only for for loop.
- /// Uses the `pl_node` union field. AST node is the condition of a for loop. Payload is `Bin`.
+ /// Uses the `pl_node` union field. AST node is the condition of a for loop.
+ /// Payload is `Bin`.
+ /// No OOB safety check is emitted.
elem_ptr,
/// Same as `elem_ptr_node` except the index is stored immediately rather than
/// as a reference to another ZIR instruction.
/// Uses the `pl_node` union field. AST node is an element inside array initialization
/// syntax. Payload is `ElemPtrImm`.
+ /// This instruction has a way to set the result type to be a
+ /// single-pointer or a many-pointer.
elem_ptr_imm,
/// Given an array, slice, or pointer, returns the element at the provided index.
/// Uses the `pl_node` union field. AST node is a[b] syntax. Payload is `Bin`.
elem_val_node,
/// Same as `elem_val_node` but used only for for loop.
- /// Uses the `pl_node` union field. AST node is the condition of a for loop. Payload is `Bin`.
+ /// Uses the `pl_node` union field. AST node is the condition of a for loop.
+ /// Payload is `Bin`.
+ /// No OOB safety check is emitted.
elem_val,
/// Emits a compile error if the operand is not `void`.
/// Uses the `un_node` field.
@@ -497,6 +505,15 @@ pub const Inst = struct {
/// Sends comptime control flow back to the beginning of the current block.
/// Uses the `node` field.
repeat_inline,
+ /// Asserts that all the lengths provided match. Used to build a for loop.
+ /// Return value is the length as a usize.
+ /// Uses the `pl_node` field with payload `MultiOp`.
+ /// There is exactly one item corresponding to each AST node inside the for
+ /// loop condition. Any item may be `none`, indicating an unbounded range.
+ /// Illegal behaviors:
+ /// * If all lengths are unbounded ranges (always a compile error).
+ /// * If any two lengths do not match each other.
+ for_len,
/// Merge two error sets into one, `E1 || E2`.
/// Uses the `pl_node` field with payload `Bin`.
merge_error_sets,
@@ -1008,6 +1025,7 @@ pub const Inst = struct {
.add,
.addwrap,
.add_sat,
+ .add_unsafe,
.alloc,
.alloc_mut,
.alloc_comptime_mut,
@@ -1242,6 +1260,7 @@ pub const Inst = struct {
.defer_err_code,
.save_err_ret_index,
.restore_err_ret_index,
+ .for_len,
=> false,
.@"break",
@@ -1322,6 +1341,7 @@ pub const Inst = struct {
.add,
.addwrap,
.add_sat,
+ .add_unsafe,
.alloc,
.alloc_mut,
.alloc_comptime_mut,
@@ -1533,6 +1553,7 @@ pub const Inst = struct {
.repeat_inline,
.panic,
.panic_comptime,
+ .for_len,
.@"try",
.try_ptr,
//.try_inline,
@@ -1553,6 +1574,7 @@ pub const Inst = struct {
.add = .pl_node,
.addwrap = .pl_node,
.add_sat = .pl_node,
+ .add_unsafe = .pl_node,
.sub = .pl_node,
.subwrap = .pl_node,
.sub_sat = .pl_node,
@@ -1588,6 +1610,7 @@ pub const Inst = struct {
.@"break" = .@"break",
.break_inline = .@"break",
.check_comptime_control_flow = .un_node,
+ .for_len = .pl_node,
.call = .pl_node,
.cmp_lt = .pl_node,
.cmp_lte = .pl_node,
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 473a62fd83..5b0db30757 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -515,7 +515,7 @@ fn gen(self: *Self) !void {
self.ret_mcv = MCValue{ .stack_offset = stack_offset };
}
- for (self.args) |*arg, arg_index| {
+ for (self.args, 0..) |*arg, arg_index| {
// Copy register arguments to the stack
switch (arg.*) {
.register => |reg| {
@@ -1633,14 +1633,14 @@ fn allocRegs(
var reused_read_arg: ?usize = null;
// Lock all args which are already allocated to registers
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
const mcv = try arg.bind.resolveToMcv(self);
if (mcv == .register) {
read_locks[i] = self.register_manager.lockReg(mcv.register);
}
}
- for (write_args) |arg, i| {
+ for (write_args, 0..) |arg, i| {
if (arg.bind == .reg) {
write_locks[i] = self.register_manager.lockReg(arg.bind.reg);
}
@@ -1648,7 +1648,7 @@ fn allocRegs(
// Allocate registers for all args which aren't allocated to
// registers yet
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
const mcv = try arg.bind.resolveToMcv(self);
if (mcv == .register) {
const raw_reg = mcv.register;
@@ -1672,7 +1672,7 @@ fn allocRegs(
const raw_reg = arg.bind.reg;
arg.reg.* = self.registerAlias(raw_reg, arg.ty);
} else {
- reuse_operand: for (read_args) |read_arg, i| {
+ reuse_operand: for (read_args, 0..) |read_arg, i| {
if (read_arg.bind == .inst) {
const operand = read_arg.bind.inst;
const mcv = try self.resolveInst(operand);
@@ -1694,7 +1694,7 @@ fn allocRegs(
}
}
} else {
- for (write_args) |arg, i| {
+ for (write_args, 0..) |arg, i| {
if (arg.bind == .reg) {
const raw_reg = arg.bind.reg;
arg.reg.* = self.registerAlias(raw_reg, arg.ty);
@@ -1708,7 +1708,7 @@ fn allocRegs(
// For all read_args which need to be moved from non-register to
// register, perform the move
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
if (reused_read_arg) |j| {
// Check whether this read_arg was reused
if (i == j) continue;
@@ -4267,7 +4267,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Make space for the arguments passed via the stack
self.max_end_stack += info.stack_byte_count;
- for (info.args) |mc_arg, arg_i| {
+ for (info.args, 0..) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
@@ -4757,7 +4757,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const else_slice = else_branch.inst_table.entries.slice();
const else_keys = else_slice.items(.key);
const else_values = else_slice.items(.value);
- for (else_keys) |else_key, else_idx| {
+ for (else_keys, 0..) |else_key, else_idx| {
const else_value = else_values[else_idx];
const canon_mcv = if (saved_then_branch.inst_table.fetchSwapRemove(else_key)) |then_entry| blk: {
// The instruction's MCValue is overridden in both branches.
@@ -4790,7 +4790,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const then_slice = saved_then_branch.inst_table.entries.slice();
const then_keys = then_slice.items(.key);
const then_values = then_slice.items(.value);
- for (then_keys) |then_key, then_idx| {
+ for (then_keys, 0..) |then_key, then_idx| {
const then_value = then_values[then_idx];
// We already deleted the items from this table that matched the else_branch.
// So these are all instructions that are only overridden in the then branch.
@@ -5069,7 +5069,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
const branch_into_prong_relocs = try self.gpa.alloc(u32, items.len);
defer self.gpa.free(branch_into_prong_relocs);
- for (items) |item, idx| {
+ for (items, 0..) |item, idx| {
const cmp_result = try self.cmp(.{ .inst = pl_op.operand }, .{ .inst = item }, condition_ty, .neq);
branch_into_prong_relocs[idx] = try self.condBr(cmp_result);
}
@@ -6373,7 +6373,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
}
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
if (param_size == 0) {
result.args[i] = .{ .none = {} };
@@ -6438,7 +6438,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var stack_offset: u32 = 0;
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
if (ty.abiSize(self.target.*) > 0) {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
const param_alignment = ty.abiAlignment(self.target.*);
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index 3c2a81d5d1..b2e23c6278 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -80,7 +80,7 @@ pub fn emitMir(
try emit.lowerBranches();
// Emit machine code
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
switch (tag) {
.add_immediate => try emit.mirAddSubtractImmediate(inst),
@@ -323,7 +323,7 @@ fn lowerBranches(emit: *Emit) !void {
//
// TODO optimization opportunity: do this in codegen while
// generating MIR
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
if (isBranch(tag)) {
const target_inst = emit.branchTarget(inst);
@@ -368,7 +368,7 @@ fn lowerBranches(emit: *Emit) !void {
all_branches_lowered = true;
var current_code_offset: usize = 0;
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
// If this instruction contained in the code offset
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 57a8aed699..0fbf1ee984 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -513,7 +513,7 @@ fn gen(self: *Self) !void {
self.ret_mcv = MCValue{ .stack_offset = stack_offset };
}
- for (self.args) |*arg, arg_index| {
+ for (self.args, 0..) |*arg, arg_index| {
// Copy register arguments to the stack
switch (arg.*) {
.register => |reg| {
@@ -3105,14 +3105,14 @@ fn allocRegs(
var reused_read_arg: ?usize = null;
// Lock all args which are already allocated to registers
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
const mcv = try arg.bind.resolveToMcv(self);
if (mcv == .register) {
read_locks[i] = self.register_manager.lockReg(mcv.register);
}
}
- for (write_args) |arg, i| {
+ for (write_args, 0..) |arg, i| {
if (arg.bind == .reg) {
write_locks[i] = self.register_manager.lockReg(arg.bind.reg);
}
@@ -3120,7 +3120,7 @@ fn allocRegs(
// Allocate registers for all args which aren't allocated to
// registers yet
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
const mcv = try arg.bind.resolveToMcv(self);
if (mcv == .register) {
arg.reg.* = mcv.register;
@@ -3141,7 +3141,7 @@ fn allocRegs(
if (arg.bind == .reg) {
arg.reg.* = arg.bind.reg;
} else {
- reuse_operand: for (read_args) |read_arg, i| {
+ reuse_operand: for (read_args, 0..) |read_arg, i| {
if (read_arg.bind == .inst) {
const operand = read_arg.bind.inst;
const mcv = try self.resolveInst(operand);
@@ -3161,7 +3161,7 @@ fn allocRegs(
}
}
} else {
- for (write_args) |arg, i| {
+ for (write_args, 0..) |arg, i| {
if (arg.bind == .reg) {
arg.reg.* = arg.bind.reg;
} else {
@@ -3173,7 +3173,7 @@ fn allocRegs(
// For all read_args which need to be moved from non-register to
// register, perform the move
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
if (reused_read_arg) |j| {
// Check whether this read_arg was reused
if (i == j) continue;
@@ -4217,7 +4217,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Make space for the arguments passed via the stack
self.max_end_stack += info.stack_byte_count;
- for (info.args) |mc_arg, arg_i| {
+ for (info.args, 0..) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
@@ -4669,7 +4669,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const else_slice = else_branch.inst_table.entries.slice();
const else_keys = else_slice.items(.key);
const else_values = else_slice.items(.value);
- for (else_keys) |else_key, else_idx| {
+ for (else_keys, 0..) |else_key, else_idx| {
const else_value = else_values[else_idx];
const canon_mcv = if (saved_then_branch.inst_table.fetchSwapRemove(else_key)) |then_entry| blk: {
// The instruction's MCValue is overridden in both branches.
@@ -4702,7 +4702,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const then_slice = saved_then_branch.inst_table.entries.slice();
const then_keys = then_slice.items(.key);
const then_values = then_slice.items(.value);
- for (then_keys) |then_key, then_idx| {
+ for (then_keys, 0..) |then_key, then_idx| {
const then_value = then_values[then_idx];
// We already deleted the items from this table that matched the else_branch.
// So these are all instructions that are only overridden in the then branch.
@@ -4991,7 +4991,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
const branch_into_prong_relocs = try self.gpa.alloc(u32, items.len);
defer self.gpa.free(branch_into_prong_relocs);
- for (items) |item, idx| {
+ for (items, 0..) |item, idx| {
const cmp_result = try self.cmp(.{ .inst = pl_op.operand }, .{ .inst = item }, condition_ty, .neq);
branch_into_prong_relocs[idx] = try self.condBr(cmp_result);
}
@@ -6296,7 +6296,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
}
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
if (ty.abiAlignment(self.target.*) == 8)
ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2);
@@ -6346,7 +6346,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var stack_offset: u32 = 0;
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
if (ty.abiSize(self.target.*) > 0) {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
const param_alignment = ty.abiAlignment(self.target.*);
diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig
index fe34a28b6e..17540f0968 100644
--- a/src/arch/arm/Emit.zig
+++ b/src/arch/arm/Emit.zig
@@ -77,7 +77,7 @@ pub fn emitMir(
try emit.lowerBranches();
// Emit machine code
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
switch (tag) {
.add => try emit.mirDataProcessing(inst),
@@ -239,7 +239,7 @@ fn lowerBranches(emit: *Emit) !void {
//
// TODO optimization opportunity: do this in codegen while
// generating MIR
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
if (isBranch(tag)) {
const target_inst = emit.branchTarget(inst);
@@ -284,7 +284,7 @@ fn lowerBranches(emit: *Emit) !void {
all_branches_lowered = true;
var current_code_offset: usize = 0;
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
// If this instruction contained in the code offset
diff --git a/src/arch/arm/bits.zig b/src/arch/arm/bits.zig
index af7fb301b9..8e76ae9409 100644
--- a/src/arch/arm/bits.zig
+++ b/src/arch/arm/bits.zig
@@ -452,11 +452,11 @@ pub const Instruction = union(enum) {
const masks = comptime blk: {
const base_mask: u32 = std.math.maxInt(u8);
var result = [_]u32{0} ** 16;
- for (result) |*mask, i| mask.* = std.math.rotr(u32, base_mask, 2 * i);
+ for (&result, 0..) |*mask, i| mask.* = std.math.rotr(u32, base_mask, 2 * i);
break :blk result;
};
- return for (masks) |mask, i| {
+ return for (masks, 0..) |mask, i| {
if (x & mask == x) {
break Operand{
.immediate = .{
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index 8b8fca4859..b97ac727c1 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -1689,7 +1689,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- for (info.args) |mc_arg, arg_i| {
+ for (info.args, 0..) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
@@ -2727,7 +2727,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var next_stack_offset: u32 = 0;
const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 };
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig
index 4b2dad4981..387c735896 100644
--- a/src/arch/riscv64/Emit.zig
+++ b/src/arch/riscv64/Emit.zig
@@ -38,7 +38,7 @@ pub fn emitMir(
const mir_tags = emit.mir.instructions.items(.tag);
// Emit machine code
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
switch (tag) {
.add => try emit.mirRType(inst),
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index 418c67c580..8344b6e0cc 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -1189,7 +1189,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
try self.register_manager.getReg(reg, null);
}
- for (info.args) |mc_arg, arg_i| {
+ for (info.args, 0..) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(arg);
@@ -1450,7 +1450,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const else_slice = else_branch.inst_table.entries.slice();
const else_keys = else_slice.items(.key);
const else_values = else_slice.items(.value);
- for (else_keys) |else_key, else_idx| {
+ for (else_keys, 0..) |else_key, else_idx| {
const else_value = else_values[else_idx];
const canon_mcv = if (saved_then_branch.inst_table.fetchSwapRemove(else_key)) |then_entry| blk: {
// The instruction's MCValue is overridden in both branches.
@@ -1484,7 +1484,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const then_slice = saved_then_branch.inst_table.entries.slice();
const then_keys = then_slice.items(.key);
const then_values = then_slice.items(.value);
- for (then_keys) |then_key, then_idx| {
+ for (then_keys, 0..) |then_key, then_idx| {
const then_value = then_values[then_idx];
// We already deleted the items from this table that matched the else_branch.
// So these are all instructions that are only overridden in the then branch.
@@ -4363,7 +4363,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
.callee => abi.c_abi_int_param_regs_callee_view,
};
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig
index 8500f338ec..7e71492af7 100644
--- a/src/arch/sparc64/Emit.zig
+++ b/src/arch/sparc64/Emit.zig
@@ -69,7 +69,7 @@ pub fn emitMir(
try emit.lowerBranches();
// Emit machine code
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
switch (tag) {
.dbg_line => try emit.mirDbgLine(inst),
@@ -513,7 +513,7 @@ fn lowerBranches(emit: *Emit) !void {
//
// TODO optimization opportunity: do this in codegen while
// generating MIR
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
if (isBranch(tag)) {
const target_inst = emit.branchTarget(inst);
@@ -558,7 +558,7 @@ fn lowerBranches(emit: *Emit) !void {
all_branches_lowered = true;
var current_code_offset: usize = 0;
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
// If this instruction contained in the code offset
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 7ce6a0482b..53dc28626c 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1255,7 +1255,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
// reserve space and insert all prologue instructions at the front of the instruction list
// We insert them in reserve order as there is no insertSlice in multiArrayList.
try func.mir_instructions.ensureUnusedCapacity(func.gpa, prologue.items.len);
- for (prologue.items) |_, index| {
+ for (prologue.items, 0..) |_, index| {
const inst = prologue.items[prologue.items.len - 1 - index];
func.mir_instructions.insertAssumeCapacity(0, inst);
}
@@ -3117,7 +3117,7 @@ fn mergeBranch(func: *CodeGen, branch: *const Branch) !void {
const target_values = target_slice.items(.value);
try parent.values.ensureUnusedCapacity(func.gpa, branch.values.count());
- for (target_keys) |key, index| {
+ for (target_keys, 0..) |key, index| {
// TODO: process deaths from branches
parent.values.putAssumeCapacity(key, target_values[index]);
}
@@ -3501,7 +3501,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const values = try func.gpa.alloc(CaseValue, items.len);
errdefer func.gpa.free(values);
- for (items) |ref, i| {
+ for (items, 0..) |ref, i| {
const item_val = func.air.value(ref).?;
const int_val = func.valueAsI32(item_val, target_ty);
if (lowest_maybe == null or int_val < lowest_maybe.?) {
@@ -3561,7 +3561,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
while (value <= highest) : (value += 1) {
// idx represents the branch we jump to
const idx = blk: {
- for (case_list.items) |case, idx| {
+ for (case_list.items, 0..) |case, idx| {
for (case.values) |case_value| {
if (case_value.integer == value) break :blk @intCast(u32, idx);
}
@@ -3588,7 +3588,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
};
try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @boolToInt(has_else_body));
- for (case_list.items) |case, index| {
+ for (case_list.items, 0..) |case, index| {
// when sparse, we use if/else-chain, so emit conditional checks
if (is_sparse) {
// for single value prong we can emit a simple if
@@ -4558,7 +4558,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// copy stack pointer into a temporary local, which is
// moved for each element to store each value in the right position.
const offset = try func.buildPointerOffset(result, 0, .new);
- for (elements) |elem, elem_index| {
+ for (elements, 0..) |elem, elem_index| {
const elem_val = try func.resolveInst(elem);
try func.store(offset, elem_val, elem_ty, 0);
@@ -4587,7 +4587,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// we ensure a new local is created so it's zero-initialized
const result = try func.ensureAllocLocal(backing_type);
var current_bit: u16 = 0;
- for (elements) |elem, elem_index| {
+ for (elements, 0..) |elem, elem_index| {
const field = fields[elem_index];
if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -4623,7 +4623,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => {
const result = try func.allocStack(result_ty);
const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset
- for (elements) |elem, elem_index| {
+ for (elements, 0..) |elem, elem_index| {
if (result_ty.structFieldValueComptime(elem_index) != null) continue;
const elem_ty = result_ty.structFieldType(elem_index);
@@ -6149,7 +6149,7 @@ fn callIntrinsic(
} else WValue{ .none = {} };
// Lower all arguments to the stack before we call our function
- for (args) |arg, arg_i| {
+ for (args, 0..) |arg, arg_i| {
assert(!(want_sret_param and arg == .stack));
assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime());
try func.lowerArg(.C, param_types[arg_i], arg);
diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig
index a340ac5da8..7d44d3622f 100644
--- a/src/arch/wasm/Emit.zig
+++ b/src/arch/wasm/Emit.zig
@@ -44,7 +44,7 @@ pub fn emitMir(emit: *Emit) InnerError!void {
// before we emit the function body when lowering MIR
try emit.emitLocals();
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
switch (tag) {
// block instructions
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index c11ea4e63e..f63d80486e 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -186,7 +186,7 @@ const Branch = struct {
_ = options;
comptime assert(unused_format_string.len == 0);
try writer.writeAll("Branch {\n");
- for (ctx.insts) |inst, i| {
+ for (ctx.insts, 0..) |inst, i| {
const mcv = ctx.mcvs[i];
try writer.print(" %{d} => {}\n", .{ inst, mcv });
}
@@ -3951,7 +3951,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
};
defer if (ret_reg_lock) |lock| self.register_manager.unlockReg(lock);
- for (args) |arg, arg_i| {
+ for (args, 0..) |arg, arg_i| {
const mc_arg = info.args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
@@ -4912,7 +4912,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
var relocs = try self.gpa.alloc(u32, items.len);
defer self.gpa.free(relocs);
- for (items) |item, item_i| {
+ for (items, 0..) |item, item_i| {
const item_mcv = try self.resolveInst(item);
relocs[item_i] = try self.genCondSwitchMir(condition_ty, condition, item_mcv);
}
@@ -4974,7 +4974,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
for (self.branch_stack.items) |bs| {
log.debug("{}", .{bs.fmtDebug()});
}
- for (branch_stack.items) |bs, i| {
+ for (branch_stack.items, 0..) |bs, i| {
log.debug("Case-{d} branch: {}", .{ i, bs.fmtDebug() });
}
@@ -4999,7 +4999,7 @@ fn canonicaliseBranches(self: *Self, parent_branch: *Branch, canon_branch: *Bran
const target_keys = target_slice.items(.key);
const target_values = target_slice.items(.value);
- for (target_keys) |target_key, target_idx| {
+ for (target_keys, 0..) |target_key, target_idx| {
const target_value = target_values[target_idx];
const canon_mcv = if (canon_branch.inst_table.fetchSwapRemove(target_key)) |canon_entry| blk: {
// The instruction's MCValue is overridden in both branches.
@@ -5032,7 +5032,7 @@ fn canonicaliseBranches(self: *Self, parent_branch: *Branch, canon_branch: *Bran
const canon_slice = canon_branch.inst_table.entries.slice();
const canon_keys = canon_slice.items(.key);
const canon_values = canon_slice.items(.value);
- for (canon_keys) |canon_key, canon_idx| {
+ for (canon_keys, 0..) |canon_key, canon_idx| {
const canon_value = canon_values[canon_idx];
// We already deleted the items from this table that matched the target_branch.
// So these are all instructions that are only overridden in the canon branch.
@@ -6571,7 +6571,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
switch (result_ty.zigTypeTag()) {
.Struct => {
const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align));
- for (elements) |elem, elem_i| {
+ for (elements, 0..) |elem, elem_i| {
if (result_ty.structFieldValueComptime(elem_i) != null) continue; // comptime elem
const elem_ty = result_ty.structFieldType(elem_i);
@@ -6586,7 +6586,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const elem_ty = result_ty.childType();
const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
- for (elements) |elem, elem_i| {
+ for (elements, 0..) |elem, elem_i| {
const elem_mcv = try self.resolveInst(elem);
const elem_off = @intCast(i32, elem_size * elem_i);
try self.genSetStack(elem_ty, stack_offset - elem_off, elem_mcv, .{});
@@ -6963,7 +6963,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
else => 0,
};
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
assert(ty.hasRuntimeBits());
const classes: []const abi.Class = switch (self.target.os.tag) {
@@ -7039,7 +7039,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
else => 0,
};
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
if (!ty.hasRuntimeBits()) {
result.args[i] = .{ .none = {} };
continue;
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index c4f9b4eb42..12c19915c6 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -61,7 +61,7 @@ const Reloc = struct {
pub fn lowerMir(emit: *Emit) InnerError!void {
const mir_tags = emit.mir.instructions.items(.tag);
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
try emit.code_offset_mapping.putNoClobber(emit.bin_file.allocator, inst, emit.code.items.len);
switch (tag) {
@@ -1544,7 +1544,7 @@ const OpCode = struct {
fn init(comptime in_bytes: []const u8) OpCode {
comptime assert(in_bytes.len <= 3);
comptime var bytes: [3]u8 = undefined;
- inline for (in_bytes) |x, i| {
+ inline for (in_bytes, 0..) |x, i| {
bytes[i] = x;
}
return .{ .bytes = bytes, .count = in_bytes.len };
diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig
index df2052ca6e..112d9a5982 100644
--- a/src/arch/x86_64/Mir.zig
+++ b/src/arch/x86_64/Mir.zig
@@ -535,7 +535,7 @@ pub const RegisterList = struct {
const Self = @This();
fn getIndexForReg(registers: []const Register, reg: Register) BitSet.MaskInt {
- for (registers) |cpreg, i| {
+ for (registers, 0..) |cpreg, i| {
if (reg.id() == cpreg.id()) return @intCast(u32, i);
}
unreachable; // register not in input register list!
diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig
index 35ac3dcb55..193efa6dc4 100644
--- a/src/arch/x86_64/abi.zig
+++ b/src/arch/x86_64/abi.zig
@@ -335,7 +335,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
// "If one of the classes is MEMORY, the whole argument is passed in memory"
// "If X87UP is not preceded by X87, the whole argument is passed in memory."
var found_sseup = false;
- for (result) |item, i| switch (item) {
+ for (result, 0..) |item, i| switch (item) {
.memory => return memory_class,
.x87up => if (i == 0 or result[i - 1] != .x87) return memory_class,
.sseup => found_sseup = true,
@@ -347,7 +347,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
if (ty_size > 16 and (result[0] != .sse or !found_sseup)) return memory_class;
// "If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE."
- for (result) |*item, i| {
+ for (&result, 0..) |*item, i| {
if (item.* == .sseup) switch (result[i - 1]) {
.sse, .sseup => continue,
else => item.* = .sse,
@@ -379,7 +379,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
}
// Combine this field with the previous one.
const field_class = classifySystemV(field.ty, target, .other);
- for (result) |*result_item, i| {
+ for (&result, 0..) |*result_item, i| {
const field_item = field_class[i];
// "If both classes are equal, this is the resulting class."
if (result_item.* == field_item) {
@@ -431,7 +431,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
// "If one of the classes is MEMORY, the whole argument is passed in memory"
// "If X87UP is not preceded by X87, the whole argument is passed in memory."
var found_sseup = false;
- for (result) |item, i| switch (item) {
+ for (result, 0..) |item, i| switch (item) {
.memory => return memory_class,
.x87up => if (i == 0 or result[i - 1] != .x87) return memory_class,
.sseup => found_sseup = true,
@@ -443,7 +443,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
if (ty_size > 16 and (result[0] != .sse or !found_sseup)) return memory_class;
// "If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE."
- for (result) |*item, i| {
+ for (&result, 0..) |*item, i| {
if (item.* == .sseup) switch (result[i - 1]) {
.sse, .sseup => continue,
else => item.* = .sse,
diff --git a/src/codegen.zig b/src/codegen.zig
index c0a04765b0..9eea1c667d 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -511,7 +511,7 @@ pub fn generateSymbol(
try code.resize(current_pos + abi_size);
var bits: u16 = 0;
- for (field_vals) |field_val, index| {
+ for (field_vals, 0..) |field_val, index| {
const field_ty = fields[index].ty;
// pointer may point to a decl which must be marked used
// but can also result in a relocation. Therefore we handle those seperately.
@@ -537,7 +537,7 @@ pub fn generateSymbol(
const struct_begin = code.items.len;
const field_vals = typed_value.val.castTag(.aggregate).?.data;
- for (field_vals) |field_val, index| {
+ for (field_vals, 0..) |field_val, index| {
const field_ty = typed_value.ty.structFieldType(index);
if (!field_ty.hasRuntimeBits()) continue;
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 2f721e1b4b..0beb00b236 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -253,7 +253,7 @@ fn formatIdent(
if (solo and isReservedIdent(ident)) {
try writer.writeAll("zig_e_");
}
- for (ident) |c, i| {
+ for (ident, 0..) |c, i| {
switch (c) {
'a'...'z', 'A'...'Z', '_' => try writer.writeByte(c),
'.' => try writer.writeByte('_'),
@@ -361,7 +361,7 @@ pub const Function = struct {
_ = mutability;
if (f.getFreeLocals().getPtrContext(ty, f.tyHashCtx())) |locals_list| {
- for (locals_list.items) |local_index, i| {
+ for (locals_list.items, 0..) |local_index, i| {
const local = &f.locals.items[local_index];
if (local.alignment >= alignment) {
local.loop_depth = @intCast(LoopDepth, f.free_locals_stack.items.len - 1);
@@ -1283,7 +1283,7 @@ pub const DeclGen = struct {
try writer.writeByte('{');
var empty = true;
- for (field_vals) |field_val, field_index| {
+ for (field_vals, 0..) |field_val, field_index| {
const field_ty = ty.structFieldType(field_index);
if (!field_ty.hasRuntimeBits()) continue;
@@ -1309,7 +1309,7 @@ pub const DeclGen = struct {
const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base);
var eff_num_fields: usize = 0;
- for (field_vals) |_, index| {
+ for (field_vals, 0..) |_, index| {
const field_ty = ty.structFieldType(index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -1331,7 +1331,7 @@ pub const DeclGen = struct {
var eff_index: usize = 0;
var needs_closing_paren = false;
- for (field_vals) |field_val, index| {
+ for (field_vals, 0..) |field_val, index| {
const field_ty = ty.structFieldType(index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -1359,7 +1359,7 @@ pub const DeclGen = struct {
try writer.writeByte('(');
// a << a_off | b << b_off | c << c_off
var empty = true;
- for (field_vals) |field_val, index| {
+ for (field_vals, 0..) |field_val, index| {
const field_ty = ty.structFieldType(index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -1719,7 +1719,7 @@ pub const DeclGen = struct {
{
const fields = t.tupleFields();
var field_id: usize = 0;
- for (fields.types) |field_ty, i| {
+ for (fields.types, 0..) |field_ty, i| {
if (!field_ty.hasRuntimeBits() or fields.values[i].tag() != .unreachable_value) continue;
try buffer.append(' ');
@@ -2130,7 +2130,7 @@ pub const DeclGen = struct {
try tuple_storage.ensureTotalCapacity(allocator, t.structFieldCount());
const fields = t.tupleFields();
- for (fields.values) |value, index|
+ for (fields.values, 0..) |value, index|
if (value.tag() == .unreachable_value)
tuple_storage.appendAssumeCapacity(.{
.type = fields.types[index],
@@ -2415,7 +2415,7 @@ pub const DeclGen = struct {
const name_end = buffer.items.len - "(".len;
try dg.renderTypeAndName(bw, enum_ty, .{ .identifier = "tag" }, .Const, 0, .Complete);
try buffer.appendSlice(") {\n switch (tag) {\n");
- for (enum_ty.enumFields().keys()) |name, index| {
+ for (enum_ty.enumFields().keys(), 0..) |name, index| {
const name_z = try dg.typedefs.allocator.dupeZ(u8, name);
defer dg.typedefs.allocator.free(name_z);
const name_bytes = name_z[0 .. name_z.len + 1];
@@ -2681,7 +2681,7 @@ pub fn genErrDecls(o: *Object) !void {
try writer.writeAll("enum {\n");
o.indent_writer.pushIndent();
var max_name_len: usize = 0;
- for (o.dg.module.error_name_list.items) |name, value| {
+ for (o.dg.module.error_name_list.items, 0..) |name, value| {
max_name_len = std.math.max(name.len, max_name_len);
var err_pl = Value.Payload.Error{ .data = .{ .name = name } };
try o.dg.renderValue(writer, Type.anyerror, Value.initPayload(&err_pl.base), .Other);
@@ -2724,7 +2724,7 @@ pub fn genErrDecls(o: *Object) !void {
try writer.writeAll("static ");
try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = name_prefix }, .Const, 0, .Complete);
try writer.writeAll(" = {");
- for (o.dg.module.error_name_list.items) |name, value| {
+ for (o.dg.module.error_name_list.items, 0..) |name, value| {
if (value != 0) try writer.writeByte(',');
var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len };
@@ -2742,7 +2742,7 @@ fn genExports(o: *Object) !void {
defer tracy.end();
const fwd_decl_writer = o.dg.fwd_decl.writer();
- if (o.dg.module.decl_exports.get(o.dg.decl_index)) |exports| for (exports.items[1..]) |@"export", i| {
+ if (o.dg.module.decl_exports.get(o.dg.decl_index)) |exports| for (exports.items[1..], 0..) |@"export", i| {
try fwd_decl_writer.writeAll("zig_export(");
try o.dg.renderFunctionSignature(fwd_decl_writer, .Forward, @intCast(u32, 1 + i));
try fwd_decl_writer.print(", {s}, {s});\n", .{
@@ -2800,7 +2800,7 @@ pub fn genFunc(f: *Function) !void {
// alignment, descending.
const free_locals = f.getFreeLocals();
const values = f.allocs.values();
- for (f.allocs.keys()) |local_index, i| {
+ for (f.allocs.keys(), 0..) |local_index, i| {
if (values[i]) continue; // static
const local = f.locals.items[local_index];
log.debug("inserting local {d} into free_locals", .{local_index});
@@ -4238,7 +4238,7 @@ fn airCall(
const resolved_args = try gpa.alloc(CValue, args.len);
defer gpa.free(resolved_args);
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
resolved_args[i] = try f.resolveInst(arg);
}
@@ -4303,7 +4303,7 @@ fn airCall(
try writer.writeByte('(');
var args_written: usize = 0;
- for (args) |arg, arg_i| {
+ for (args, 0..) |arg, arg_i| {
const ty = f.air.typeOf(arg);
if (!ty.hasRuntimeBitsIgnoreComptime()) continue;
if (args_written != 0) {
@@ -5043,7 +5043,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
extra_i = constraints_extra_begin;
var locals_index = locals_begin;
try writer.writeByte(':');
- for (outputs) |output, index| {
+ for (outputs, 0..) |output, index| {
const extra_bytes = std.mem.sliceAsBytes(f.air.extra[extra_i..]);
const constraint = std.mem.sliceTo(extra_bytes, 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
@@ -5067,7 +5067,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte(')');
}
try writer.writeByte(':');
- for (inputs) |input, index| {
+ for (inputs, 0..) |input, index| {
const extra_bytes = std.mem.sliceAsBytes(f.air.extra[extra_i..]);
const constraint = std.mem.sliceTo(extra_bytes, 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
@@ -5426,7 +5426,7 @@ fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struc
};
const field_loc = switch (struct_ty.tag()) {
.@"struct" => switch (struct_ty.containerLayout()) {
- .Auto, .Extern => for (struct_ty.structFields().values()[index..]) |field, offset| {
+ .Auto, .Extern => for (struct_ty.structFields().values()[index..], 0..) |field, offset| {
if (field.ty.hasRuntimeBitsIgnoreComptime()) break FieldLoc{ .field = .{
.identifier = struct_ty.structFieldName(index + offset),
} };
@@ -5469,7 +5469,7 @@ fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struc
if (tuple.values[index].tag() != .unreachable_value) return CValue.none;
var id: usize = 0;
- break :field_name for (tuple.values) |value, i| {
+ break :field_name for (tuple.values, 0..) |value, i| {
if (value.tag() != .unreachable_value) continue;
if (!tuple.types[i].hasRuntimeBitsIgnoreComptime()) continue;
if (i >= index) break FieldLoc{ .field = .{ .field = id } };
@@ -6687,7 +6687,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
const gpa = f.object.dg.gpa;
const resolved_elements = try gpa.alloc(CValue, elements.len);
defer gpa.free(resolved_elements);
- for (elements) |element, i| {
+ for (elements, 0..) |element, i| {
resolved_elements[i] = try f.resolveInst(element);
}
{
@@ -6706,7 +6706,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
switch (inst_ty.zigTypeTag()) {
.Array, .Vector => {
const elem_ty = inst_ty.childType();
- for (resolved_elements) |element, i| {
+ for (resolved_elements, 0..) |element, i| {
try f.writeCValue(writer, local, .Other);
try writer.print("[{d}] = ", .{i});
try f.writeCValue(writer, element, .Other);
@@ -6727,7 +6727,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(")");
try writer.writeByte('{');
var empty = true;
- for (elements) |element, index| {
+ for (elements, 0..) |element, index| {
if (inst_ty.structFieldValueComptime(index)) |_| continue;
if (!empty) try writer.writeAll(", ");
@@ -6746,7 +6746,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("};\n");
var field_id: usize = 0;
- for (elements) |element, index| {
+ for (elements, 0..) |element, index| {
if (inst_ty.structFieldValueComptime(index)) |_| continue;
const element_ty = f.air.typeOf(element);
@@ -6784,7 +6784,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base);
var empty = true;
- for (elements) |_, index| {
+ for (elements, 0..) |_, index| {
const field_ty = inst_ty.structFieldType(index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -6796,7 +6796,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
empty = false;
}
empty = true;
- for (resolved_elements) |element, index| {
+ for (resolved_elements, 0..) |element, index| {
const field_ty = inst_ty.structFieldType(index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -7608,7 +7608,7 @@ fn deinitFreeLocalsMap(gpa: mem.Allocator, map: *LocalsMap) void {
}
fn noticeBranchFrees(f: *Function, pre_locals_len: LocalIndex, inst: Air.Inst.Index) !void {
- for (f.locals.items[pre_locals_len..]) |*local, local_offset| {
+ for (f.locals.items[pre_locals_len..], 0..) |*local, local_offset| {
const local_index = pre_locals_len + @intCast(LocalIndex, local_offset);
if (f.allocs.contains(local_index)) continue; // allocs are not freeable
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 00f6c55171..21c57b5e13 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -605,7 +605,7 @@ pub const Object = struct {
defer mod.gpa.free(llvm_errors);
llvm_errors[0] = llvm_slice_ty.getUndef();
- for (llvm_errors[1..]) |*llvm_error, i| {
+ for (llvm_errors[1..], 0..) |*llvm_error, i| {
const name = error_name_list[1..][i];
const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False);
const str_global = self.llvm_module.addGlobal(str_init.typeOf(), "");
@@ -696,7 +696,7 @@ pub const Object = struct {
object.extern_collisions.clearRetainingCapacity();
const export_keys = mod.decl_exports.keys();
- for (mod.decl_exports.values()) |export_list, i| {
+ for (mod.decl_exports.values(), 0..) |export_list, i| {
const decl_index = export_keys[i];
const llvm_global = object.decl_map.get(decl_index) orelse continue;
for (export_list.items) |exp| {
@@ -1081,7 +1081,7 @@ pub const Object = struct {
const param_alignment = param_ty.abiAlignment(target);
const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target);
const llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
- for (field_types) |_, field_i_usize| {
+ for (field_types, 0..) |_, field_i_usize| {
const field_i = @intCast(c_uint, field_i_usize);
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
@@ -1500,7 +1500,7 @@ pub const Object = struct {
const int_info = ty.intInfo(target);
assert(int_info.bits != 0);
- for (field_names) |field_name, i| {
+ for (field_names, 0..) |field_name, i| {
const field_name_z = try gpa.dupeZ(u8, field_name);
defer gpa.free(field_name_z);
@@ -1997,7 +1997,7 @@ pub const Object = struct {
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const field_val = tuple.values[i];
if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
@@ -2926,7 +2926,7 @@ pub const DeclGen = struct {
var offset: u64 = 0;
var big_align: u32 = 0;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const field_val = tuple.values[i];
if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
@@ -3437,7 +3437,7 @@ pub const DeclGen = struct {
const llvm_elems = try gpa.alloc(*llvm.Value, len);
defer gpa.free(llvm_elems);
var need_unnamed = false;
- for (elem_vals[0..len]) |elem_val, i| {
+ for (elem_vals[0..len], 0..) |elem_val, i| {
llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val });
need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]);
}
@@ -3623,7 +3623,7 @@ pub const DeclGen = struct {
var big_align: u32 = 0;
var need_unnamed = false;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
if (tuple.values[i].tag() != .unreachable_value) continue;
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -3685,7 +3685,7 @@ pub const DeclGen = struct {
comptime assert(Type.packed_struct_layout_version == 2);
var running_int: *llvm.Value = int_llvm_ty.constNull();
var running_bits: u16 = 0;
- for (field_vals) |field_val, i| {
+ for (field_vals, 0..) |field_val, i| {
const field = fields[i];
if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -3860,7 +3860,7 @@ pub const DeclGen = struct {
const elem_ty = tv.ty.elemType();
const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
defer dg.gpa.free(llvm_elems);
- for (llvm_elems) |*elem, i| {
+ for (llvm_elems, 0..) |*elem, i| {
var byte_payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = bytes[i],
@@ -3885,7 +3885,7 @@ pub const DeclGen = struct {
const elem_ty = tv.ty.elemType();
const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
defer dg.gpa.free(llvm_elems);
- for (llvm_elems) |*elem, i| {
+ for (llvm_elems, 0..) |*elem, i| {
elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_vals[i] });
}
return llvm.constVector(
@@ -3918,7 +3918,7 @@ pub const DeclGen = struct {
const elem_ty = tv.ty.elemType();
const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
defer dg.gpa.free(llvm_elems);
- for (llvm_elems) |*elem, i| {
+ for (llvm_elems, 0..) |*elem, i| {
var byte_payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = bytes[i],
@@ -4484,7 +4484,7 @@ pub const FuncGen = struct {
fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void {
const air_tags = self.air.instructions.items(.tag);
- for (body) |inst, i| {
+ for (body, 0..) |inst, i| {
const opt_value: ?*llvm.Value = switch (air_tags[inst]) {
// zig fmt: off
.add => try self.airAdd(inst, false),
@@ -4857,7 +4857,7 @@ pub const FuncGen = struct {
const llvm_ty = self.context.structType(llvm_types.ptr, @intCast(c_uint, llvm_types.len), .False);
try llvm_args.ensureUnusedCapacity(it.llvm_types_len);
- for (llvm_types) |field_ty, i_usize| {
+ for (llvm_types, 0..) |field_ty, i_usize| {
const i = @intCast(c_uint, i_usize);
const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, "");
const load_inst = self.builder.buildLoad(field_ty, field_ptr, "");
@@ -6255,7 +6255,7 @@ pub const FuncGen = struct {
var name_map: std.StringArrayHashMapUnmanaged(u16) = .{};
try name_map.ensureUnusedCapacity(arena, max_param_count);
- for (outputs) |output, i| {
+ for (outputs, 0..) |output, i| {
const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
@@ -6440,7 +6440,7 @@ pub const FuncGen = struct {
var name_start: usize = undefined;
var modifier_start: usize = undefined;
- for (asm_source) |byte, i| {
+ for (asm_source, 0..) |byte, i| {
switch (state) {
.start => switch (byte) {
'%' => state = .percent,
@@ -6531,7 +6531,7 @@ pub const FuncGen = struct {
.Auto,
"",
);
- for (llvm_param_attrs[0..param_count]) |llvm_elem_ty, i| {
+ for (llvm_param_attrs[0..param_count], 0..) |llvm_elem_ty, i| {
if (llvm_elem_ty) |llvm_ty| {
llvm.setCallElemTypeAttr(call, i, llvm_ty);
}
@@ -6539,7 +6539,7 @@ pub const FuncGen = struct {
var ret_val = call;
llvm_ret_i = 0;
- for (outputs) |output, i| {
+ for (outputs, 0..) |output, i| {
if (llvm_ret_indirect[i]) continue;
const output_value = if (return_count > 1) b: {
@@ -7421,7 +7421,7 @@ pub const FuncGen = struct {
const index_i32 = llvm_i32.constInt(i, .False);
var args: [3]*llvm.Value = undefined;
- for (args_vectors) |arg_vector, k| {
+ for (args_vectors, 0..) |arg_vector, k| {
args[k] = self.builder.buildExtractElement(arg_vector, index_i32, "");
}
const result_elem = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args_len, .C, .Auto, "");
@@ -8790,7 +8790,7 @@ pub const FuncGen = struct {
const tag_int_value = fn_val.getParam(0);
const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, fields.count()));
- for (fields.keys()) |_, field_index| {
+ for (fields.keys(), 0..) |_, field_index| {
const this_tag_int_value = int: {
var tag_val_payload: Value.Payload.U32 = .{
.base = .{ .tag = .enum_field_index },
@@ -8879,7 +8879,7 @@ pub const FuncGen = struct {
usize_llvm_ty.constNull(), usize_llvm_ty.constNull(),
};
- for (fields.keys()) |name, field_index| {
+ for (fields.keys(), 0..) |name, field_index| {
const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False);
const str_init_llvm_ty = str_init.typeOf();
const str_global = self.dg.object.llvm_module.addGlobal(str_init_llvm_ty, "");
@@ -9003,7 +9003,7 @@ pub const FuncGen = struct {
const llvm_i32 = self.context.intType(32);
- for (values) |*val, i| {
+ for (values, 0..) |*val, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem = mask.elemValueBuffer(self.dg.module, i, &buf);
if (elem.isUndef()) {
@@ -9185,7 +9185,7 @@ pub const FuncGen = struct {
const llvm_u32 = self.context.intType(32);
var vector = llvm_result_ty.getUndef();
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
const index_u32 = llvm_u32.constInt(i, .False);
const llvm_elem = try self.resolveInst(elem);
vector = self.builder.buildInsertElement(vector, llvm_elem, index_u32, "");
@@ -9202,7 +9202,7 @@ pub const FuncGen = struct {
comptime assert(Type.packed_struct_layout_version == 2);
var running_int: *llvm.Value = int_llvm_ty.constNull();
var running_bits: u16 = 0;
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
const field = fields[i];
if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -9234,7 +9234,7 @@ pub const FuncGen = struct {
const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(target));
var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined };
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
if (result_ty.structFieldValueComptime(i) != null) continue;
const llvm_elem = try self.resolveInst(elem);
@@ -9255,7 +9255,7 @@ pub const FuncGen = struct {
return alloca_inst;
} else {
var result = llvm_result_ty.getUndef();
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
if (result_ty.structFieldValueComptime(i) != null) continue;
const llvm_elem = try self.resolveInst(elem);
@@ -9280,7 +9280,7 @@ pub const FuncGen = struct {
};
const elem_ptr_ty = Type.initPayload(&elem_ptr_payload.base);
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
const indices: [2]*llvm.Value = .{
llvm_usize.constNull(),
llvm_usize.constInt(@intCast(c_uint, i), .False),
@@ -9919,7 +9919,7 @@ pub const FuncGen = struct {
};
const array_elements = [_]*llvm.Value{ request, a1, a2, a3, a4, a5 };
const zero = usize_llvm_ty.constInt(0, .False);
- for (array_elements) |elem, i| {
+ for (array_elements, 0..) |elem, i| {
const indexes = [_]*llvm.Value{
zero, usize_llvm_ty.constInt(@intCast(c_uint, i), .False),
};
@@ -10341,7 +10341,7 @@ fn llvmFieldIndex(
if (ty.isSimpleTupleOrAnonStruct()) {
const tuple = ty.tupleFields();
var llvm_field_index: c_uint = 0;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
if (tuple.values[i].tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
const field_align = field_ty.abiAlignment(target);
@@ -10952,7 +10952,7 @@ fn isByRef(ty: Type) bool {
if (ty.isSimpleTupleOrAnonStruct()) {
const tuple = ty.tupleFields();
var count: usize = 0;
- for (tuple.values) |field_val, i| {
+ for (tuple.values, 0..) |field_val, i| {
if (field_val.tag() != .unreachable_value or !tuple.types[i].hasRuntimeBits()) continue;
count += 1;
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index c5a3d57d07..5f27c14e95 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -418,7 +418,7 @@ pub const DeclGen = struct {
const elem_refs = try self.gpa.alloc(IdRef, vector_len);
defer self.gpa.free(elem_refs);
- for (elem_refs) |*elem, i| {
+ for (elem_refs, 0..) |*elem, i| {
elem.* = try self.genConstant(elem_ty, elem_vals[i]);
}
try section.emit(self.spv.gpa, .OpConstantComposite, .{
@@ -498,7 +498,7 @@ pub const DeclGen = struct {
return self.fail("VarArgs functions are unsupported for SPIR-V", .{});
const param_types = try self.spv.arena.alloc(SpvType.Ref, ty.fnParamLen());
- for (param_types) |*param, i| {
+ for (param_types, 0..) |*param, i| {
param.* = try self.resolveType(ty.fnParamType(i));
}
diff --git a/src/codegen/spirv/Assembler.zig b/src/codegen/spirv/Assembler.zig
index fc4ab406b9..6e77818fa5 100644
--- a/src/codegen/spirv/Assembler.zig
+++ b/src/codegen/spirv/Assembler.zig
@@ -392,7 +392,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
.OpTypeFunction => blk: {
const param_operands = operands[2..];
const param_types = try self.spv.arena.alloc(SpvType.Ref, param_operands.len);
- for (param_types) |*param, i| {
+ for (param_types, 0..) |*param, i| {
param.* = try self.resolveTypeRef(param_operands[i].ref_id);
}
const payload = try self.spv.arena.create(SpvType.Payload.Function);
diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig
index f37b04bff3..3562e87be4 100644
--- a/src/codegen/spirv/Module.zig
+++ b/src/codegen/spirv/Module.zig
@@ -161,7 +161,7 @@ pub fn flush(self: Module, file: std.fs.File) !void {
var iovc_buffers: [buffers.len]std.os.iovec_const = undefined;
var file_size: u64 = 0;
- for (iovc_buffers) |*iovc, i| {
+ for (&iovc_buffers, 0..) |*iovc, i| {
// Note, since spir-v supports both little and big endian we can ignore byte order here and
// just treat the words as a sequence of bytes.
const bytes = std.mem.sliceAsBytes(buffers[i]);
@@ -389,7 +389,7 @@ fn decorateStruct(self: *Module, target: IdRef, info: *const Type.Payload.Struct
// Decorations for the struct members.
const extra = info.member_decoration_extra;
var extra_i: u32 = 0;
- for (info.members) |member, i| {
+ for (info.members, 0..) |member, i| {
const d = member.decorations;
const index = @intCast(Word, i);
switch (d.matrix_layout) {
diff --git a/src/codegen/spirv/Section.zig b/src/codegen/spirv/Section.zig
index 83f594dcef..a76314f5fa 100644
--- a/src/codegen/spirv/Section.zig
+++ b/src/codegen/spirv/Section.zig
@@ -195,7 +195,7 @@ fn writeContextDependentNumber(section: *Section, operand: spec.LiteralContextDe
fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand) void {
var mask: Word = 0;
- inline for (@typeInfo(Operand).Struct.fields) |field, bit| {
+ inline for (@typeInfo(Operand).Struct.fields, 0..) |field, bit| {
switch (@typeInfo(field.type)) {
.Optional => if (@field(operand, field.name) != null) {
mask |= 1 << @intCast(u5, bit);
diff --git a/src/codegen/spirv/type.zig b/src/codegen/spirv/type.zig
index 6cc1b8f3bd..dc993b62ff 100644
--- a/src/codegen/spirv/type.zig
+++ b/src/codegen/spirv/type.zig
@@ -98,7 +98,7 @@ pub const Type = extern union {
const struct_b = b.payload(.@"struct");
if (struct_a.members.len != struct_b.members.len)
return false;
- for (struct_a.members) |mem_a, i| {
+ for (struct_a.members, 0..) |mem_a, i| {
if (!std.meta.eql(mem_a, struct_b.members[i]))
return false;
}
diff --git a/src/glibc.zig b/src/glibc.zig
index 2a2887c334..3021e7c7ba 100644
--- a/src/glibc.zig
+++ b/src/glibc.zig
@@ -698,7 +698,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void {
const metadata = try loadMetaData(comp.gpa, abilists_contents);
defer metadata.destroy(comp.gpa);
- const target_targ_index = for (metadata.all_targets) |targ, i| {
+ const target_targ_index = for (metadata.all_targets, 0..) |targ, i| {
if (targ.arch == target.cpu.arch and
targ.os == target.os.tag and
targ.abi == target.abi)
@@ -709,7 +709,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void {
unreachable; // target_util.available_libcs prevents us from getting here
};
- const target_ver_index = for (metadata.all_versions) |ver, i| {
+ const target_ver_index = for (metadata.all_versions, 0..) |ver, i| {
switch (ver.order(target_version)) {
.eq => break i,
.lt => continue,
@@ -743,7 +743,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void {
var stubs_asm = std.ArrayList(u8).init(comp.gpa);
defer stubs_asm.deinit();
- for (libs) |lib, lib_i| {
+ for (libs, 0..) |lib, lib_i| {
stubs_asm.shrinkRetainingCapacity(0);
try stubs_asm.appendSlice(".text\n");
diff --git a/src/libc_installation.zig b/src/libc_installation.zig
index 0a50f97012..da877e1291 100644
--- a/src/libc_installation.zig
+++ b/src/libc_installation.zig
@@ -66,7 +66,7 @@ pub const LibCInstallation = struct {
var line_it = std.mem.split(u8, line, "=");
const name = line_it.first();
const value = line_it.rest();
- inline for (fields) |field, i| {
+ inline for (fields, 0..) |field, i| {
if (std.mem.eql(u8, name, field.name)) {
found_keys[i].found = true;
if (value.len == 0) {
@@ -79,7 +79,7 @@ pub const LibCInstallation = struct {
}
}
}
- inline for (fields) |field, i| {
+ inline for (fields, 0..) |field, i| {
if (!found_keys[i].found) {
log.err("missing field: {s}\n", .{field.name});
return error.ParseError;
@@ -640,7 +640,7 @@ fn printVerboseInvocation(
} else {
std.debug.print("Zig attempted to find the path to native system libc headers by executing this command:\n", .{});
}
- for (argv) |arg, i| {
+ for (argv, 0..) |arg, i| {
if (i != 0) std.debug.print(" ", .{});
std.debug.print("{s}", .{arg});
}
diff --git a/src/libunwind.zig b/src/libunwind.zig
index 56113d1355..a20b5e81f7 100644
--- a/src/libunwind.zig
+++ b/src/libunwind.zig
@@ -34,7 +34,7 @@ pub fn buildStaticLib(comp: *Compilation) !void {
.basename = basename,
};
var c_source_files: [unwind_src_list.len]Compilation.CSourceFile = undefined;
- for (unwind_src_list) |unwind_src, i| {
+ for (unwind_src_list, 0..) |unwind_src, i| {
var cflags = std.ArrayList([]const u8).init(arena);
switch (Compilation.classifyFileExt(unwind_src)) {
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 2922e783e1..c0ac7e0b88 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -486,7 +486,7 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
// TODO: enforce order by increasing VM addresses in self.sections container.
// This is required by the loader anyhow as far as I can tell.
- for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
+ for (self.sections.items(.header)[sect_id + 1 ..], 0..) |*next_header, next_sect_id| {
const maybe_last_atom_index = self.sections.items(.last_atom_index)[sect_id + 1 + next_sect_id];
next_header.virtual_address += diff;
@@ -2191,7 +2191,7 @@ fn logSymtab(self: *Coff) void {
log.debug("symtab:", .{});
log.debug(" object(null)", .{});
- for (self.locals.items) |*sym, sym_id| {
+ for (self.locals.items, 0..) |*sym, sym_id| {
const where = if (sym.section_number == .UNDEFINED) "ord" else "sect";
const def_index: u16 = switch (sym.section_number) {
.UNDEFINED => 0, // TODO
@@ -2216,7 +2216,7 @@ fn logSymtab(self: *Coff) void {
}
log.debug("GOT entries:", .{});
- for (self.got_entries.items) |entry, i| {
+ for (self.got_entries.items, 0..) |entry, i| {
const got_sym = self.getSymbol(.{ .sym_index = entry.sym_index, .file = null });
const target_sym = self.getSymbol(entry.target);
if (target_sym.section_number == .UNDEFINED) {
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index a3d0aa8a53..3fb6de7b73 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -339,7 +339,7 @@ pub const DeclState = struct {
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
const fields = ty.tupleFields();
- for (fields.types) |field, field_index| {
+ for (fields.types, 0..) |field, field_index| {
// DW.AT.member
try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_member));
// DW.AT.name, DW.FORM.string
@@ -367,7 +367,7 @@ pub const DeclState = struct {
}
const fields = ty.structFields();
- for (fields.keys()) |field_name, field_index| {
+ for (fields.keys(), 0..) |field_name, field_index| {
const field = fields.get(field_name).?;
if (!field.ty.hasRuntimeBits()) continue;
// DW.AT.member
@@ -409,7 +409,7 @@ pub const DeclState = struct {
.enum_numbered => ty.castTag(.enum_numbered).?.data.values,
else => unreachable,
};
- for (fields.keys()) |field_name, field_i| {
+ for (fields.keys(), 0..) |field_name, field_i| {
// DW.AT.enumerator
try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2 + @sizeOf(u64));
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.enum_variant));
@@ -2252,14 +2252,14 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
1, // `DW.LNS.set_isa`
});
- for (paths.dirs) |dir, i| {
+ for (paths.dirs, 0..) |dir, i| {
log.debug("adding new include dir at {d} of '{s}'", .{ i + 1, dir });
di_buf.appendSliceAssumeCapacity(dir);
di_buf.appendAssumeCapacity(0);
}
di_buf.appendAssumeCapacity(0); // include directories sentinel
- for (paths.files) |file, i| {
+ for (paths.files, 0..) |file, i| {
const dir_index = paths.files_dirs_indexes[i];
log.debug("adding new file name at {d} of '{s}' referencing directory {d}", .{ i + 1, file, dir_index + 1 });
di_buf.appendSliceAssumeCapacity(file);
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index d936a347cf..1a9d594c56 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1126,7 +1126,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const buf = try gpa.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
defer gpa.free(buf);
- for (buf) |*phdr, i| {
+ for (buf, 0..) |*phdr, i| {
phdr.* = progHeaderTo32(self.program_headers.items[i]);
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf32_Phdr, phdr);
@@ -1138,7 +1138,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const buf = try gpa.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
defer gpa.free(buf);
- for (buf) |*phdr, i| {
+ for (buf, 0..) |*phdr, i| {
phdr.* = self.program_headers.items[i];
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf64_Phdr, phdr);
@@ -1193,7 +1193,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const buf = try gpa.alloc(elf.Elf32_Shdr, slice.len);
defer gpa.free(buf);
- for (buf) |*shdr, i| {
+ for (buf, 0..) |*shdr, i| {
shdr.* = sectHeaderTo32(slice.items(.shdr)[i]);
log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
if (foreign_endian) {
@@ -1207,7 +1207,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const buf = try gpa.alloc(elf.Elf64_Shdr, slice.len);
defer gpa.free(buf);
- for (buf) |*shdr, i| {
+ for (buf, 0..) |*shdr, i| {
shdr.* = slice.items(.shdr)[i];
log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
if (foreign_endian) {
@@ -1732,7 +1732,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
argv.appendAssumeCapacity("--as-needed");
var as_needed = true;
- for (system_libs) |link_lib, i| {
+ for (system_libs, 0..) |link_lib, i| {
const lib_as_needed = !system_libs_values[i].needed;
switch ((@as(u2, @boolToInt(lib_as_needed)) << 1) | @boolToInt(as_needed)) {
0b00, 0b11 => {},
@@ -2909,7 +2909,7 @@ fn writeAllGlobalSymbols(self: *Elf) !void {
const buf = try self.base.allocator.alloc(elf.Elf32_Sym, self.global_symbols.items.len);
defer self.base.allocator.free(buf);
- for (buf) |*sym, i| {
+ for (buf, 0..) |*sym, i| {
const global = self.global_symbols.items[i];
sym.* = .{
.st_name = global.st_name,
@@ -2929,7 +2929,7 @@ fn writeAllGlobalSymbols(self: *Elf) !void {
const buf = try self.base.allocator.alloc(elf.Elf64_Sym, self.global_symbols.items.len);
defer self.base.allocator.free(buf);
- for (buf) |*sym, i| {
+ for (buf, 0..) |*sym, i| {
const global = self.global_symbols.items[i];
sym.* = .{
.st_name = global.st_name,
@@ -3238,11 +3238,11 @@ const CsuObjects = struct {
fn logSymtab(self: Elf) void {
log.debug("locals:", .{});
- for (self.local_symbols.items) |sym, id| {
+ for (self.local_symbols.items, 0..) |sym, id| {
log.debug(" {d}: {?s}: @{x} in {d}", .{ id, self.shstrtab.get(sym.st_name), sym.st_value, sym.st_shndx });
}
log.debug("globals:", .{});
- for (self.global_symbols.items) |sym, id| {
+ for (self.global_symbols.items, 0..) |sym, id| {
log.debug(" {d}: {?s}: @{x} in {d}", .{ id, self.shstrtab.get(sym.st_name), sym.st_value, sym.st_shndx });
}
}
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 35f5f1b562..7c1d4776af 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -962,7 +962,7 @@ pub fn parseLibs(
syslibroot: ?[]const u8,
dependent_libs: anytype,
) !void {
- for (lib_names) |lib, i| {
+ for (lib_names, 0..) |lib, i| {
const lib_info = lib_infos[i];
log.debug("parsing lib path '{s}'", .{lib});
if (try self.parseDylib(lib, dependent_libs, .{
@@ -1584,7 +1584,7 @@ pub fn resolveSymbolsInDylibs(self: *MachO) !void {
const sym = self.getSymbolPtr(global);
const sym_name = self.getSymbolName(global);
- for (self.dylibs.items) |dylib, id| {
+ for (self.dylibs.items, 0..) |dylib, id| {
if (!dylib.symbols.contains(sym_name)) continue;
const dylib_id = @intCast(u16, id);
@@ -1686,7 +1686,7 @@ pub fn resolveDyldStubBinder(self: *MachO) !void {
gop.value_ptr.* = sym_loc;
const global = gop.value_ptr.*;
- for (self.dylibs.items) |dylib, id| {
+ for (self.dylibs.items, 0..) |dylib, id| {
if (!dylib.symbols.contains(sym_name)) continue;
const dylib_id = @intCast(u16, id);
@@ -2852,7 +2852,7 @@ fn moveSectionInVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void
});
// TODO: enforce order by increasing VM addresses in self.sections container.
- for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
+ for (self.sections.items(.header)[sect_id + 1 ..], 0..) |*next_header, next_sect_id| {
const index = @intCast(u8, sect_id + 1 + next_sect_id);
const next_segment = self.getSegmentPtr(index);
next_header.addr += diff;
@@ -3082,7 +3082,7 @@ pub fn initSection(self: *MachO, segname: []const u8, sectname: []const u8, opts
fn insertSection(self: *MachO, segment_index: u8, header: macho.section_64) !u8 {
const precedence = getSectionPrecedence(header);
const indexes = self.getSectionIndexes(segment_index);
- const insertion_index = for (self.sections.items(.header)[indexes.start..indexes.end]) |hdr, i| {
+ const insertion_index = for (self.sections.items(.header)[indexes.start..indexes.end], 0..) |hdr, i| {
if (getSectionPrecedence(hdr) > precedence) break @intCast(u8, i + indexes.start);
} else indexes.end;
log.debug("inserting section '{s},{s}' at index {d}", .{
@@ -3133,7 +3133,7 @@ pub fn getGlobalSymbol(self: *MachO, name: []const u8) !u32 {
}
fn writeSegmentHeaders(self: *MachO, writer: anytype) !void {
- for (self.segments.items) |seg, i| {
+ for (self.segments.items, 0..) |seg, i| {
const indexes = self.getSectionIndexes(@intCast(u8, i));
try writer.writeStruct(seg);
for (self.sections.items(.header)[indexes.start..indexes.end]) |header| {
@@ -3147,7 +3147,7 @@ fn writeLinkeditSegmentData(self: *MachO) !void {
seg.filesize = 0;
seg.vmsize = 0;
- for (self.segments.items) |segment, id| {
+ for (self.segments.items, 0..) |segment, id| {
if (self.linkedit_segment_cmd_index.? == @intCast(u8, id)) continue;
if (seg.vmaddr < segment.vmaddr + segment.vmsize) {
seg.vmaddr = mem.alignForwardGeneric(u64, segment.vmaddr + segment.vmsize, self.page_size);
@@ -3167,7 +3167,7 @@ fn collectRebaseData(self: *MachO, rebase: *Rebase) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
- for (self.rebases.keys()) |atom_index, i| {
+ for (self.rebases.keys(), 0..) |atom_index, i| {
const atom = self.getAtom(atom_index);
log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
@@ -3197,7 +3197,7 @@ fn collectBindData(self: *MachO, bind: anytype, raw_bindings: anytype) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
- for (raw_bindings.keys()) |atom_index, i| {
+ for (raw_bindings.keys(), 0..) |atom_index, i| {
const atom = self.getAtom(atom_index);
log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
@@ -3417,7 +3417,7 @@ fn writeSymtab(self: *MachO) !SymtabCtx {
var locals = std.ArrayList(macho.nlist_64).init(gpa);
defer locals.deinit();
- for (self.locals.items) |sym, sym_id| {
+ for (self.locals.items, 0..) |sym, sym_id| {
if (sym.n_strx == 0) continue; // no name, skip
const sym_loc = SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null };
if (self.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip
@@ -3736,7 +3736,7 @@ pub fn makeStaticString(bytes: []const u8) [16]u8 {
}
fn getSegmentByName(self: MachO, segname: []const u8) ?u8 {
- for (self.segments.items) |seg, i| {
+ for (self.segments.items, 0..) |seg, i| {
if (mem.eql(u8, segname, seg.segName())) return @intCast(u8, i);
} else return null;
}
@@ -3758,7 +3758,7 @@ pub fn getLinkeditSegmentPtr(self: *MachO) *macho.segment_command_64 {
pub fn getSectionByName(self: MachO, segname: []const u8, sectname: []const u8) ?u8 {
// TODO investigate caching with a hashmap
- for (self.sections.items(.header)) |header, i| {
+ for (self.sections.items(.header), 0..) |header, i| {
if (mem.eql(u8, header.segName(), segname) and mem.eql(u8, header.sectName(), sectname))
return @intCast(u8, i);
} else return null;
@@ -3766,7 +3766,7 @@ pub fn getSectionByName(self: MachO, segname: []const u8, sectname: []const u8)
pub fn getSectionIndexes(self: MachO, segment_index: u8) struct { start: u8, end: u8 } {
var start: u8 = 0;
- const nsects = for (self.segments.items) |seg, i| {
+ const nsects = for (self.segments.items, 0..) |seg, i| {
if (i == segment_index) break @intCast(u8, seg.nsects);
start += @intCast(u8, seg.nsects);
} else 0;
@@ -4160,7 +4160,7 @@ pub fn findFirst(comptime T: type, haystack: []align(1) const T, start: usize, p
pub fn logSections(self: *MachO) void {
log.debug("sections:", .{});
- for (self.sections.items(.header)) |header, i| {
+ for (self.sections.items(.header), 0..) |header, i| {
log.debug(" sect({d}): {s},{s} @{x}, sizeof({x})", .{
i + 1,
header.segName(),
@@ -4197,7 +4197,7 @@ pub fn logSymtab(self: *MachO) void {
var buf: [4]u8 = undefined;
log.debug("symtab:", .{});
- for (self.locals.items) |sym, sym_id| {
+ for (self.locals.items, 0..) |sym, sym_id| {
const where = if (sym.undf() and !sym.tentative()) "ord" else "sect";
const def_index = if (sym.undf() and !sym.tentative())
@divTrunc(sym.n_desc, macho.N_SYMBOL_RESOLVER)
@@ -4220,7 +4220,7 @@ pub fn logSymtab(self: *MachO) void {
}
log.debug("GOT entries:", .{});
- for (self.got_entries.items) |entry, i| {
+ for (self.got_entries.items, 0..) |entry, i| {
const atom_sym = entry.getSymbol(self);
const target_sym = self.getSymbol(entry.target);
if (target_sym.undf()) {
@@ -4241,7 +4241,7 @@ pub fn logSymtab(self: *MachO) void {
}
log.debug("stubs entries:", .{});
- for (self.stubs.items) |entry, i| {
+ for (self.stubs.items, 0..) |entry, i| {
const target_sym = self.getSymbol(entry.target);
const atom_sym = entry.getSymbol(self);
assert(target_sym.undf());
@@ -4257,7 +4257,7 @@ pub fn logAtoms(self: *MachO) void {
log.debug("atoms:", .{});
const slice = self.sections.slice();
- for (slice.items(.last_atom_index)) |last_atom_index, i| {
+ for (slice.items(.last_atom_index), 0..) |last_atom_index, i| {
var atom_index = last_atom_index orelse continue;
const header = slice.items(.header)[i];
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index 0a5c8b0372..1f41fc1cb8 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -383,7 +383,7 @@ fn finalizeDwarfSegment(self: *DebugSymbols, macho_file: *MachO) void {
fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype) !void {
// Write segment/section headers from the binary file first.
const end = macho_file.linkedit_segment_cmd_index.?;
- for (macho_file.segments.items[0..end]) |seg, i| {
+ for (macho_file.segments.items[0..end], 0..) |seg, i| {
const indexes = macho_file.getSectionIndexes(@intCast(u8, i));
var out_seg = seg;
out_seg.fileoff = 0;
@@ -412,7 +412,7 @@ fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype)
}
}
// Next, commit DSYM's __LINKEDIT and __DWARF segments headers.
- for (self.segments.items) |seg, i| {
+ for (self.segments.items, 0..) |seg, i| {
const indexes = self.getSectionIndexes(@intCast(u8, i));
try writer.writeStruct(seg);
for (self.sections.items[indexes.start..indexes.end]) |header| {
@@ -477,7 +477,7 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void {
var locals = std.ArrayList(macho.nlist_64).init(gpa);
defer locals.deinit();
- for (macho_file.locals.items) |sym, sym_id| {
+ for (macho_file.locals.items, 0..) |sym, sym_id| {
if (sym.n_strx == 0) continue; // no name, skip
const sym_loc = MachO.SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null };
if (macho_file.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip
@@ -547,7 +547,7 @@ fn writeStrtab(self: *DebugSymbols) !void {
pub fn getSectionIndexes(self: *DebugSymbols, segment_index: u8) struct { start: u8, end: u8 } {
var start: u8 = 0;
- const nsects = for (self.segments.items) |seg, i| {
+ const nsects = for (self.segments.items, 0..) |seg, i| {
if (i == segment_index) break @intCast(u8, seg.nsects);
start += @intCast(u8, seg.nsects);
} else 0;
diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig
index a2c4bad942..863f1e805a 100644
--- a/src/link/MachO/Dylib.zig
+++ b/src/link/MachO/Dylib.zig
@@ -347,7 +347,7 @@ pub fn parseFromStub(
});
defer matcher.deinit();
- for (lib_stub.inner) |elem, stub_index| {
+ for (lib_stub.inner, 0..) |elem, stub_index| {
const is_match = switch (elem) {
.v3 => |stub| matcher.matchesArch(stub.archs),
.v4 => |stub| matcher.matchesTarget(stub.targets),
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index 4d24b2ed6a..fdcdb47224 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -201,7 +201,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
var sorted_all_syms = try std.ArrayList(SymbolAtIndex).initCapacity(allocator, self.in_symtab.?.len);
defer sorted_all_syms.deinit();
- for (self.in_symtab.?) |_, index| {
+ for (self.in_symtab.?, 0..) |_, index| {
sorted_all_syms.appendAssumeCapacity(.{ .index = @intCast(u32, index) });
}
@@ -211,7 +211,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
// is kind enough to specify the symbols in the correct order.
sort.sort(SymbolAtIndex, sorted_all_syms.items, self, SymbolAtIndex.lessThan);
- for (sorted_all_syms.items) |sym_id, i| {
+ for (sorted_all_syms.items, 0..) |sym_id, i| {
const sym = sym_id.getSymbol(self);
if (sym.sect() and self.source_section_index_lookup[sym.n_sect - 1] == -1) {
@@ -380,7 +380,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
const gpa = zld.gpa;
const sections = self.getSourceSections();
- for (sections) |sect, id| {
+ for (sections, 0..) |sect, id| {
if (sect.isDebug()) continue;
const out_sect_id = (try zld.getOutputSection(sect)) orelse {
log.debug(" unhandled section '{s},{s}'", .{ sect.segName(), sect.sectName() });
@@ -400,7 +400,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
}
if (self.in_symtab == null) {
- for (sections) |sect, id| {
+ for (sections, 0..) |sect, id| {
if (sect.isDebug()) continue;
const out_sect_id = (try zld.getOutputSection(sect)) orelse continue;
if (sect.size == 0) continue;
@@ -446,7 +446,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
var sorted_sections = try gpa.alloc(SortedSection, sections.len);
defer gpa.free(sorted_sections);
- for (sections) |sect, id| {
+ for (sections, 0..) |sect, id| {
sorted_sections[id] = .{ .header = sect, .id = @intCast(u8, id) };
}
@@ -804,7 +804,7 @@ fn parseUnwindInfo(self: *Object, zld: *Zld, object_id: u32) !void {
try self.parseRelocs(gpa, sect_id);
const relocs = self.getRelocs(sect_id);
- for (unwind_records) |record, record_id| {
+ for (unwind_records, 0..) |record, record_id| {
const offset = record_id * @sizeOf(macho.compact_unwind_entry);
const rel_pos = filterRelocs(
relocs,
@@ -857,7 +857,7 @@ pub fn getSourceSectionByName(self: Object, segname: []const u8, sectname: []con
pub fn getSourceSectionIndexByName(self: Object, segname: []const u8, sectname: []const u8) ?u8 {
const sections = self.getSourceSections();
- for (sections) |sect, i| {
+ for (sections, 0..) |sect, i| {
if (mem.eql(u8, segname, sect.segName()) and mem.eql(u8, sectname, sect.sectName()))
return @intCast(u8, i);
} else return null;
diff --git a/src/link/MachO/UnwindInfo.zig b/src/link/MachO/UnwindInfo.zig
index 5e61834bbc..c64e617a35 100644
--- a/src/link/MachO/UnwindInfo.zig
+++ b/src/link/MachO/UnwindInfo.zig
@@ -126,7 +126,7 @@ const Page = struct {
ctx.page.start + ctx.page.count,
});
try writer.print(" encodings (count = {d})\n", .{ctx.page.page_encodings_count});
- for (ctx.page.page_encodings[0..ctx.page.page_encodings_count]) |record_id, i| {
+ for (ctx.page.page_encodings[0..ctx.page.page_encodings_count], 0..) |record_id, i| {
const record = ctx.info.records.items[record_id];
const enc = record.compactUnwindEncoding;
try writer.print(" {d}: 0x{x:0>8}\n", .{ ctx.info.common_encodings_count + i, enc });
@@ -205,7 +205,7 @@ pub fn scanRelocs(zld: *Zld) !void {
if (zld.getSectionByName("__TEXT", "__unwind_info") == null) return;
const cpu_arch = zld.options.target.cpu.arch;
- for (zld.objects.items) |*object, object_id| {
+ for (zld.objects.items, 0..) |*object, object_id| {
const unwind_records = object.getUnwindRecords();
for (object.exec_atoms.items) |atom_index| {
const record_id = object.unwind_records_lookup.get(atom_index) orelse continue;
@@ -244,7 +244,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
defer atom_indexes.deinit();
// TODO handle dead stripping
- for (zld.objects.items) |*object, object_id| {
+ for (zld.objects.items, 0..) |*object, object_id| {
log.debug("collecting unwind records in {s} ({d})", .{ object.name, object_id });
const unwind_records = object.getUnwindRecords();
@@ -335,7 +335,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
try info.records_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, atom_indexes.items.len));
var maybe_prev: ?macho.compact_unwind_entry = null;
- for (records.items) |record, i| {
+ for (records.items, 0..) |record, i| {
const record_id = blk: {
if (maybe_prev) |prev| {
const is_dwarf = UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch);
@@ -483,7 +483,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
// Save indices of records requiring LSDA relocation
try info.lsdas_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, info.records.items.len));
- for (info.records.items) |rec, i| {
+ for (info.records.items, 0..) |rec, i| {
info.lsdas_lookup.putAssumeCapacityNoClobber(@intCast(RecordIndex, i), @intCast(u32, info.lsdas.items.len));
if (rec.lsda == 0) continue;
try info.lsdas.append(info.gpa, @intCast(RecordIndex, i));
@@ -556,7 +556,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
const cpu_arch = zld.options.target.cpu.arch;
log.debug("Personalities:", .{});
- for (info.personalities[0..info.personalities_count]) |target, i| {
+ for (info.personalities[0..info.personalities_count], 0..) |target, i| {
const atom_index = zld.getGotAtomIndexForSymbol(target).?;
const atom = zld.getAtom(atom_index);
const sym = zld.getSymbol(atom.getSymbolWithLoc());
@@ -581,7 +581,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
}
}
- for (info.records.items) |record, i| {
+ for (info.records.items, 0..) |record, i| {
log.debug("Unwind record at offset 0x{x}", .{i * @sizeOf(macho.compact_unwind_entry)});
log.debug(" start: 0x{x}", .{record.rangeStart});
log.debug(" length: 0x{x}", .{record.rangeLength});
@@ -621,7 +621,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
const pages_base_offset = @intCast(u32, size - (info.pages.items.len * second_level_page_bytes));
const lsda_base_offset = @intCast(u32, pages_base_offset -
(info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry)));
- for (info.pages.items) |page, i| {
+ for (info.pages.items, 0..) |page, i| {
assert(page.count > 0);
const first_entry = info.records.items[page.start];
try writer.writeStruct(macho.unwind_info_section_header_index_entry{
diff --git a/src/link/MachO/dead_strip.zig b/src/link/MachO/dead_strip.zig
index 6ba70acbfd..9dfd6226b4 100644
--- a/src/link/MachO/dead_strip.zig
+++ b/src/link/MachO/dead_strip.zig
@@ -238,7 +238,7 @@ fn mark(zld: *Zld, roots: AtomTable, alive: *AtomTable) !void {
}
}
- for (zld.objects.items) |_, object_id| {
+ for (zld.objects.items, 0..) |_, object_id| {
// Traverse unwind and eh_frame records noting if the source symbol has been marked, and if so,
// marking all references as live.
try markUnwindRecords(zld, @intCast(u32, object_id), alive);
diff --git a/src/link/MachO/dyld_info/Rebase.zig b/src/link/MachO/dyld_info/Rebase.zig
index 0233744c43..1d7a0c94c0 100644
--- a/src/link/MachO/dyld_info/Rebase.zig
+++ b/src/link/MachO/dyld_info/Rebase.zig
@@ -45,7 +45,7 @@ pub fn finalize(rebase: *Rebase, gpa: Allocator) !void {
var start: usize = 0;
var seg_id: ?u8 = null;
- for (rebase.entries.items) |entry, i| {
+ for (rebase.entries.items, 0..) |entry, i| {
if (seg_id != null and seg_id.? == entry.segment_id) continue;
try finalizeSegment(rebase.entries.items[start..i], writer);
seg_id = entry.segment_id;
diff --git a/src/link/MachO/dyld_info/bind.zig b/src/link/MachO/dyld_info/bind.zig
index b4e51478e0..98a693920a 100644
--- a/src/link/MachO/dyld_info/bind.zig
+++ b/src/link/MachO/dyld_info/bind.zig
@@ -51,7 +51,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type {
var start: usize = 0;
var seg_id: ?u8 = null;
- for (self.entries.items) |entry, i| {
+ for (self.entries.items, 0..) |entry, i| {
if (seg_id != null and seg_id.? == entry.segment_id) continue;
try finalizeSegment(self.entries.items[start..i], ctx, writer);
seg_id = entry.segment_id;
diff --git a/src/link/MachO/eh_frame.zig b/src/link/MachO/eh_frame.zig
index 3867b15a96..5420bf6c29 100644
--- a/src/link/MachO/eh_frame.zig
+++ b/src/link/MachO/eh_frame.zig
@@ -16,7 +16,7 @@ const Zld = @import("zld.zig").Zld;
pub fn scanRelocs(zld: *Zld) !void {
const gpa = zld.gpa;
- for (zld.objects.items) |*object, object_id| {
+ for (zld.objects.items, 0..) |*object, object_id| {
var cies = std.AutoHashMap(u32, void).init(gpa);
defer cies.deinit();
@@ -108,7 +108,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void {
var eh_frame_offset: u32 = 0;
- for (zld.objects.items) |*object, object_id| {
+ for (zld.objects.items, 0..) |*object, object_id| {
try eh_records.ensureUnusedCapacity(2 * @intCast(u32, object.exec_atoms.items.len));
var cies = std.AutoHashMap(u32, u32).init(gpa);
@@ -407,7 +407,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
var creader = std.io.countingReader(stream.reader());
const reader = creader.reader();
- for (aug_str) |ch, i| switch (ch) {
+ for (aug_str, 0..) |ch, i| switch (ch) {
'z' => if (i > 0) {
return error.BadDwarfCfi;
} else {
@@ -467,7 +467,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
var creader = std.io.countingReader(stream.reader());
const reader = creader.reader();
- for (aug_str) |ch, i| switch (ch) {
+ for (aug_str, 0..) |ch, i| switch (ch) {
'z' => if (i > 0) {
return error.BadDwarfCfi;
} else {
diff --git a/src/link/MachO/thunks.zig b/src/link/MachO/thunks.zig
index f12fb80668..ce3fda0b1f 100644
--- a/src/link/MachO/thunks.zig
+++ b/src/link/MachO/thunks.zig
@@ -329,7 +329,7 @@ fn createThunkAtom(zld: *Zld) !AtomIndex {
fn getThunkIndex(zld: *Zld, atom_index: AtomIndex) ?ThunkIndex {
const atom = zld.getAtom(atom_index);
const sym = zld.getSymbol(atom.getSymbolWithLoc());
- for (zld.thunks.items) |thunk, i| {
+ for (zld.thunks.items, 0..) |thunk, i| {
if (thunk.len == 0) continue;
const thunk_atom_index = thunk.getStartAtomIndex();
diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig
index a94a0828fc..a901e4fd4b 100644
--- a/src/link/MachO/zld.zig
+++ b/src/link/MachO/zld.zig
@@ -321,7 +321,7 @@ pub const Zld = struct {
syslibroot: ?[]const u8,
dependent_libs: anytype,
) !void {
- for (lib_names) |lib, i| {
+ for (lib_names, 0..) |lib, i| {
const lib_info = lib_infos[i];
log.debug("parsing lib path '{s}'", .{lib});
if (try self.parseDylib(lib, dependent_libs, .{
@@ -1092,7 +1092,7 @@ pub const Zld = struct {
const sym = self.getSymbolPtr(global);
const sym_name = self.getSymbolName(global);
- for (self.dylibs.items) |dylib, id| {
+ for (self.dylibs.items, 0..) |dylib, id| {
if (!dylib.symbols.contains(sym_name)) continue;
const dylib_id = @intCast(u16, id);
@@ -1223,7 +1223,7 @@ pub const Zld = struct {
const global = SymbolWithLoc{ .sym_index = sym_index };
try self.globals.append(gpa, global);
- for (self.dylibs.items) |dylib, id| {
+ for (self.dylibs.items, 0..) |dylib, id| {
if (!dylib.symbols.contains(sym_name)) continue;
const dylib_id = @intCast(u16, id);
@@ -1311,7 +1311,7 @@ pub const Zld = struct {
});
}
- for (self.sections.items(.header)) |header, sect_id| {
+ for (self.sections.items(.header), 0..) |header, sect_id| {
if (header.size == 0) continue; // empty section
const segname = header.segName();
@@ -1385,7 +1385,7 @@ pub const Zld = struct {
const gpa = self.gpa;
const slice = self.sections.slice();
- for (slice.items(.first_atom_index)) |first_atom_index, sect_id| {
+ for (slice.items(.first_atom_index), 0..) |first_atom_index, sect_id| {
const header = slice.items(.header)[sect_id];
var atom_index = first_atom_index;
@@ -1525,7 +1525,7 @@ pub const Zld = struct {
fn calcSectionSizes(self: *Zld) !void {
const slice = self.sections.slice();
- for (slice.items(.header)) |*header, sect_id| {
+ for (slice.items(.header), 0..) |*header, sect_id| {
if (header.size == 0) continue;
if (self.requiresThunks()) {
if (header.isCode() and !(header.type() == macho.S_SYMBOL_STUBS) and !mem.eql(u8, header.sectName(), "__stub_helper")) continue;
@@ -1556,7 +1556,7 @@ pub const Zld = struct {
}
if (self.requiresThunks()) {
- for (slice.items(.header)) |header, sect_id| {
+ for (slice.items(.header), 0..) |header, sect_id| {
if (!header.isCode()) continue;
if (header.type() == macho.S_SYMBOL_STUBS) continue;
if (mem.eql(u8, header.sectName(), "__stub_helper")) continue;
@@ -1568,7 +1568,7 @@ pub const Zld = struct {
}
fn allocateSegments(self: *Zld) !void {
- for (self.segments.items) |*segment, segment_index| {
+ for (self.segments.items, 0..) |*segment, segment_index| {
const is_text_segment = mem.eql(u8, segment.segName(), "__TEXT");
const base_size = if (is_text_segment) try load_commands.calcMinHeaderPad(self.gpa, self.options, .{
.segments = self.segments.items,
@@ -1606,7 +1606,7 @@ pub const Zld = struct {
var start = init_size;
const slice = self.sections.slice();
- for (slice.items(.header)[indexes.start..indexes.end]) |*header, sect_id| {
+ for (slice.items(.header)[indexes.start..indexes.end], 0..) |*header, sect_id| {
const alignment = try math.powi(u32, 2, header.@"align");
const start_aligned = mem.alignForwardGeneric(u64, start, alignment);
const n_sect = @intCast(u8, indexes.start + sect_id + 1);
@@ -1750,7 +1750,7 @@ pub const Zld = struct {
}
fn writeSegmentHeaders(self: *Zld, writer: anytype) !void {
- for (self.segments.items) |seg, i| {
+ for (self.segments.items, 0..) |seg, i| {
const indexes = self.getSectionIndexes(@intCast(u8, i));
var out_seg = seg;
out_seg.cmdsize = @sizeOf(macho.segment_command_64);
@@ -1852,7 +1852,7 @@ pub const Zld = struct {
}
// Finally, unpack the rest.
- for (slice.items(.header)) |header, sect_id| {
+ for (slice.items(.header), 0..) |header, sect_id| {
switch (header.type()) {
macho.S_LITERAL_POINTERS,
macho.S_REGULAR,
@@ -1989,7 +1989,7 @@ pub const Zld = struct {
// Finally, unpack the rest.
const slice = self.sections.slice();
- for (slice.items(.header)) |header, sect_id| {
+ for (slice.items(.header), 0..) |header, sect_id| {
switch (header.type()) {
macho.S_LITERAL_POINTERS,
macho.S_REGULAR,
@@ -2710,7 +2710,7 @@ pub const Zld = struct {
const amt = try self.file.preadAll(locals_buf, self.symtab_cmd.symoff);
if (amt != locals_buf.len) return error.InputOutput;
- const istab: usize = for (locals) |local, i| {
+ const istab: usize = for (locals, 0..) |local, i| {
if (local.stab()) break i;
} else locals.len;
const nstabs = locals.len - istab;
@@ -2897,7 +2897,7 @@ pub const Zld = struct {
}
fn getSegmentByName(self: Zld, segname: []const u8) ?u8 {
- for (self.segments.items) |seg, i| {
+ for (self.segments.items, 0..) |seg, i| {
if (mem.eql(u8, segname, seg.segName())) return @intCast(u8, i);
} else return null;
}
@@ -2921,7 +2921,7 @@ pub const Zld = struct {
pub fn getSectionByName(self: Zld, segname: []const u8, sectname: []const u8) ?u8 {
// TODO investigate caching with a hashmap
- for (self.sections.items(.header)) |header, i| {
+ for (self.sections.items(.header), 0..) |header, i| {
if (mem.eql(u8, header.segName(), segname) and mem.eql(u8, header.sectName(), sectname))
return @intCast(u8, i);
} else return null;
@@ -2929,7 +2929,7 @@ pub const Zld = struct {
pub fn getSectionIndexes(self: Zld, segment_index: u8) struct { start: u8, end: u8 } {
var start: u8 = 0;
- const nsects = for (self.segments.items) |seg, i| {
+ const nsects = for (self.segments.items, 0..) |seg, i| {
if (i == segment_index) break @intCast(u8, seg.nsects);
start += @intCast(u8, seg.nsects);
} else 0;
@@ -3220,7 +3220,7 @@ pub const Zld = struct {
fn logSegments(self: *Zld) void {
log.debug("segments:", .{});
- for (self.segments.items) |segment, i| {
+ for (self.segments.items, 0..) |segment, i| {
log.debug(" segment({d}): {s} @{x} ({x}), sizeof({x})", .{
i,
segment.segName(),
@@ -3233,7 +3233,7 @@ pub const Zld = struct {
fn logSections(self: *Zld) void {
log.debug("sections:", .{});
- for (self.sections.items(.header)) |header, i| {
+ for (self.sections.items(.header), 0..) |header, i| {
log.debug(" sect({d}): {s},{s} @{x} ({x}), sizeof({x})", .{
i + 1,
header.segName(),
@@ -3271,10 +3271,10 @@ pub const Zld = struct {
const scoped_log = std.log.scoped(.symtab);
scoped_log.debug("locals:", .{});
- for (self.objects.items) |object, id| {
+ for (self.objects.items, 0..) |object, id| {
scoped_log.debug(" object({d}): {s}", .{ id, object.name });
if (object.in_symtab == null) continue;
- for (object.symtab) |sym, sym_id| {
+ for (object.symtab, 0..) |sym, sym_id| {
mem.set(u8, &buf, '_');
scoped_log.debug(" %{d}: {s} @{x} in sect({d}), {s}", .{
sym_id,
@@ -3286,7 +3286,7 @@ pub const Zld = struct {
}
}
scoped_log.debug(" object(-1)", .{});
- for (self.locals.items) |sym, sym_id| {
+ for (self.locals.items, 0..) |sym, sym_id| {
if (sym.undf()) continue;
scoped_log.debug(" %{d}: {s} @{x} in sect({d}), {s}", .{
sym_id,
@@ -3298,7 +3298,7 @@ pub const Zld = struct {
}
scoped_log.debug("exports:", .{});
- for (self.globals.items) |global, i| {
+ for (self.globals.items, 0..) |global, i| {
const sym = self.getSymbol(global);
if (sym.undf()) continue;
if (sym.n_desc == N_DEAD) continue;
@@ -3313,7 +3313,7 @@ pub const Zld = struct {
}
scoped_log.debug("imports:", .{});
- for (self.globals.items) |global, i| {
+ for (self.globals.items, 0..) |global, i| {
const sym = self.getSymbol(global);
if (!sym.undf()) continue;
if (sym.n_desc == N_DEAD) continue;
@@ -3328,7 +3328,7 @@ pub const Zld = struct {
}
scoped_log.debug("GOT entries:", .{});
- for (self.got_entries.items) |entry, i| {
+ for (self.got_entries.items, 0..) |entry, i| {
const atom_sym = entry.getAtomSymbol(self);
const target_sym = entry.getTargetSymbol(self);
const target_sym_name = entry.getTargetSymbolName(self);
@@ -3350,7 +3350,7 @@ pub const Zld = struct {
}
scoped_log.debug("__thread_ptrs entries:", .{});
- for (self.tlv_ptr_entries.items) |entry, i| {
+ for (self.tlv_ptr_entries.items, 0..) |entry, i| {
const atom_sym = entry.getAtomSymbol(self);
const target_sym = entry.getTargetSymbol(self);
const target_sym_name = entry.getTargetSymbolName(self);
@@ -3363,7 +3363,7 @@ pub const Zld = struct {
}
scoped_log.debug("stubs entries:", .{});
- for (self.stubs.items) |entry, i| {
+ for (self.stubs.items, 0..) |entry, i| {
const atom_sym = entry.getAtomSymbol(self);
const target_sym = entry.getTargetSymbol(self);
const target_sym_name = entry.getTargetSymbolName(self);
@@ -3376,9 +3376,9 @@ pub const Zld = struct {
}
scoped_log.debug("thunks:", .{});
- for (self.thunks.items) |thunk, i| {
+ for (self.thunks.items, 0..) |thunk, i| {
scoped_log.debug(" thunk({d})", .{i});
- for (thunk.lookup.keys()) |target, j| {
+ for (thunk.lookup.keys(), 0..) |target, j| {
const target_sym = self.getSymbol(target);
const atom = self.getAtom(thunk.lookup.get(target).?);
const atom_sym = self.getSymbol(atom.getSymbolWithLoc());
@@ -3395,7 +3395,7 @@ pub const Zld = struct {
fn logAtoms(self: *Zld) void {
log.debug("atoms:", .{});
const slice = self.sections.slice();
- for (slice.items(.first_atom_index)) |first_atom_index, sect_id| {
+ for (slice.items(.first_atom_index), 0..) |first_atom_index, sect_id| {
var atom_index = first_atom_index;
if (atom_index == 0) continue;
@@ -3980,7 +3980,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
.unresolved = std.AutoArrayHashMap(u32, void).init(arena),
};
- for (zld.objects.items) |_, object_id| {
+ for (zld.objects.items, 0..) |_, object_id| {
try zld.resolveSymbolsInObject(@intCast(u32, object_id), &resolver);
}
@@ -4010,7 +4010,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
zld.entry_index = global_index;
}
- for (zld.objects.items) |*object, object_id| {
+ for (zld.objects.items, 0..) |*object, object_id| {
try object.splitIntoAtoms(&zld, @intCast(u32, object_id));
}
diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig
index 14a29e4498..2d74e404eb 100644
--- a/src/link/SpirV.zig
+++ b/src/link/SpirV.zig
@@ -298,7 +298,7 @@ fn cloneAir(air: Air, gpa: Allocator, air_arena: Allocator) !Air {
const values = try gpa.alloc(Value, air.values.len);
errdefer gpa.free(values);
- for (values) |*value, i| {
+ for (values, 0..) |*value, i| {
value.* = try air.values[i].copy(air_arena);
}
@@ -308,7 +308,7 @@ fn cloneAir(air: Air, gpa: Allocator, air_arena: Allocator) !Air {
const air_tags = instructions.items(.tag);
const air_datas = instructions.items(.data);
- for (air_tags) |tag, i| {
+ for (air_tags, 0..) |tag, i| {
switch (tag) {
.alloc, .ret_ptr, .const_ty => air_datas[i].ty = try air_datas[i].ty.copy(air_arena),
else => {},
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index e62a2050d7..00a52177f7 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -590,7 +590,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void {
const object: Object = wasm.objects.items[object_index];
log.debug("Resolving symbols in object: '{s}'", .{object.name});
- for (object.symtable) |symbol, i| {
+ for (object.symtable, 0..) |symbol, i| {
const sym_index = @intCast(u32, i);
const location: SymbolLoc = .{
.file = object_index,
@@ -794,7 +794,7 @@ fn validateFeatures(
// extract all the used, disallowed and required features from each
// linked object file so we can test them.
- for (wasm.objects.items) |object, object_index| {
+ for (wasm.objects.items, 0..) |object, object_index| {
for (object.features) |feature| {
const value = @intCast(u16, object_index) << 1 | @as(u1, 1);
switch (feature.prefix) {
@@ -815,7 +815,7 @@ fn validateFeatures(
// when we infer the features, we allow each feature found in the 'used' set
// and insert it into the 'allowed' set. When features are not inferred,
// we validate that a used feature is allowed.
- for (used) |used_set, used_index| {
+ for (used, 0..) |used_set, used_index| {
const is_enabled = @truncate(u1, used_set) != 0;
if (infer) {
allowed[used_index] = is_enabled;
@@ -849,7 +849,7 @@ fn validateFeatures(
}
// validate the linked object file has each required feature
- for (required) |required_feature, feature_index| {
+ for (required, 0..) |required_feature, feature_index| {
const is_required = @truncate(u1, required_feature) != 0;
if (is_required and !object_used_features[feature_index]) {
log.err("feature '{s}' is required but not used in linked object", .{(@intToEnum(types.Feature.Tag, feature_index)).toString()});
@@ -1818,7 +1818,7 @@ fn sortDataSegments(wasm: *Wasm) !void {
/// original functions and their types. We need to know the type to verify it doesn't
/// contain any parameters.
fn setupInitFunctions(wasm: *Wasm) !void {
- for (wasm.objects.items) |object, file_index| {
+ for (wasm.objects.items, 0..) |object, file_index| {
try wasm.init_funcs.ensureUnusedCapacity(wasm.base.allocator, object.init_funcs.len);
for (object.init_funcs) |init_func| {
const symbol = object.symtable[init_func.symbol_index];
@@ -2717,7 +2717,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
try wasm.parseInputFiles(positionals.items);
- for (wasm.objects.items) |_, object_index| {
+ for (wasm.objects.items, 0..) |_, object_index| {
try wasm.resolveSymbolsInObject(@intCast(u16, object_index));
}
@@ -2732,7 +2732,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
try wasm.setupStart();
try wasm.setupImports();
- for (wasm.objects.items) |*object, object_index| {
+ for (wasm.objects.items, 0..) |*object, object_index| {
try object.parseIntoAtoms(gpa, @intCast(u16, object_index), wasm);
}
@@ -2801,7 +2801,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
try wasm.parseInputFiles(positionals.items);
- for (wasm.objects.items) |_, object_index| {
+ for (wasm.objects.items, 0..) |_, object_index| {
try wasm.resolveSymbolsInObject(@intCast(u16, object_index));
}
@@ -2850,7 +2850,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
}
}
- for (wasm.objects.items) |*object, object_index| {
+ for (wasm.objects.items, 0..) |*object, object_index| {
try object.parseIntoAtoms(wasm.base.allocator, @intCast(u16, object_index), wasm);
}
@@ -3362,7 +3362,7 @@ fn emitFeaturesSection(binary_bytes: *std.ArrayList(u8), enabled_features: []con
try writer.writeAll(target_features);
try leb.writeULEB128(writer, features_count);
- for (enabled_features) |enabled, feature_index| {
+ for (enabled_features, 0..) |enabled, feature_index| {
if (enabled) {
const feature: types.Feature = .{ .prefix = .used, .tag = @intToEnum(types.Feature.Tag, feature_index) };
try leb.writeULEB128(writer, @enumToInt(feature.prefix));
diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig
index 7d4f6a4e36..82cab2528a 100644
--- a/src/link/Wasm/Object.zig
+++ b/src/link/Wasm/Object.zig
@@ -882,7 +882,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
list.deinit();
} else symbol_for_segment.deinit();
- for (object.symtable) |symbol, symbol_index| {
+ for (object.symtable, 0..) |symbol, symbol_index| {
switch (symbol.tag) {
.function, .data, .section => if (!symbol.isUndefined()) {
const gop = try symbol_for_segment.getOrPut(.{ .kind = symbol.tag, .index = symbol.index });
@@ -896,7 +896,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
}
}
- for (object.relocatable_data) |relocatable_data, index| {
+ for (object.relocatable_data, 0..) |relocatable_data, index| {
const final_index = (try wasm_bin.getMatchingSegment(object_index, @intCast(u32, index))) orelse {
continue; // found unknown section, so skip parsing into atom as we do not know how to handle it.
};
diff --git a/src/link/tapi.zig b/src/link/tapi.zig
index 20a3a2493e..c97332984f 100644
--- a/src/link/tapi.zig
+++ b/src/link/tapi.zig
@@ -124,7 +124,7 @@ pub const LibStub = struct {
log.debug("trying to parse as []TbdV4", .{});
const inner = lib_stub.yaml.parse([]TbdV4) catch break :err;
var out = try lib_stub.yaml.arena.allocator().alloc(Tbd, inner.len);
- for (inner) |doc, i| {
+ for (inner, 0..) |doc, i| {
out[i] = .{ .v4 = doc };
}
break :blk out;
@@ -142,7 +142,7 @@ pub const LibStub = struct {
log.debug("trying to parse as []TbdV3", .{});
const inner = lib_stub.yaml.parse([]TbdV3) catch break :err;
var out = try lib_stub.yaml.arena.allocator().alloc(Tbd, inner.len);
- for (inner) |doc, i| {
+ for (inner, 0..) |doc, i| {
out[i] = .{ .v3 = doc };
}
break :blk out;
diff --git a/src/link/tapi/yaml.zig b/src/link/tapi/yaml.zig
index 748f1c138f..d4136b35d3 100644
--- a/src/link/tapi/yaml.zig
+++ b/src/link/tapi/yaml.zig
@@ -84,7 +84,7 @@ pub const Value = union(ValueType) {
const first = list[0];
if (first.is_compound()) {
- for (list) |elem, i| {
+ for (list, 0..) |elem, i| {
try writer.writeByteNTimes(' ', args.indentation);
try writer.writeAll("- ");
try elem.stringify(writer, .{
@@ -99,7 +99,7 @@ pub const Value = union(ValueType) {
}
try writer.writeAll("[ ");
- for (list) |elem, i| {
+ for (list, 0..) |elem, i| {
try elem.stringify(writer, args);
if (i < len - 1) {
try writer.writeAll(", ");
@@ -112,7 +112,7 @@ pub const Value = union(ValueType) {
const len = keys.len;
if (len == 0) return;
- for (keys) |key, i| {
+ for (keys, 0..) |key, i| {
if (!args.should_inline_first_key or i != 0) {
try writer.writeByteNTimes(' ', args.indentation);
}
@@ -292,7 +292,7 @@ pub const Yaml = struct {
switch (@typeInfo(T)) {
.Array => |info| {
var parsed: T = undefined;
- for (self.docs.items) |doc, i| {
+ for (self.docs.items, 0..) |doc, i| {
parsed[i] = try self.parseValue(info.child, doc);
}
return parsed;
@@ -301,7 +301,7 @@ pub const Yaml = struct {
switch (info.size) {
.Slice => {
var parsed = try self.arena.allocator().alloc(info.child, self.docs.items.len);
- for (self.docs.items) |doc, i| {
+ for (self.docs.items, 0..) |doc, i| {
parsed[i] = try self.parseValue(info.child, doc);
}
return parsed;
@@ -393,7 +393,7 @@ pub const Yaml = struct {
}
var parsed = try arena.alloc(ptr_info.child, value.list.len);
- for (value.list) |elem, i| {
+ for (value.list, 0..) |elem, i| {
parsed[i] = try self.parseValue(ptr_info.child, elem);
}
return parsed;
@@ -407,7 +407,7 @@ pub const Yaml = struct {
if (array_info.len != list.len) return error.ArraySizeMismatch;
var parsed: T = undefined;
- for (list) |elem, i| {
+ for (list, 0..) |elem, i| {
parsed[i] = try self.parseValue(array_info.child, elem);
}
diff --git a/src/main.zig b/src/main.zig
index a0cdfb36b6..e80be06a36 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -3684,10 +3684,10 @@ fn cmdTranslateC(comp: *Compilation, arena: Allocator, enable_cache: bool) !void
const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, clang_args_len + 1);
new_argv_with_sentinel[clang_args_len] = null;
const new_argv = new_argv_with_sentinel[0..clang_args_len :null];
- for (argv.items) |arg, i| {
+ for (argv.items, 0..) |arg, i| {
new_argv[i] = try arena.dupeZ(u8, arg);
}
- for (c_source_file.extra_flags) |arg, i| {
+ for (c_source_file.extra_flags, 0..) |arg, i| {
new_argv[argv.items.len + i] = try arena.dupeZ(u8, arg);
}
@@ -4816,7 +4816,7 @@ extern "c" fn ZigLlvmAr_main(argc: c_int, argv: [*:null]?[*:0]u8) c_int;
fn argsCopyZ(alloc: Allocator, args: []const []const u8) ![:null]?[*:0]u8 {
var argv = try alloc.allocSentinel(?[*:0]u8, args.len, null);
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
argv[i] = try alloc.dupeZ(u8, arg); // TODO If there was an argsAllocZ we could avoid this allocation.
}
return argv;
diff --git a/src/mingw.zig b/src/mingw.zig
index 4f94e26a98..9e9e180945 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -72,7 +72,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
.mingw32_lib => {
var c_source_files: [mingw32_lib_deps.len]Compilation.CSourceFile = undefined;
- for (mingw32_lib_deps) |dep, i| {
+ for (mingw32_lib_deps, 0..) |dep, i| {
var args = std.ArrayList([]const u8).init(arena);
try args.appendSlice(&[_][]const u8{
"-DHAVE_CONFIG_H",
@@ -236,7 +236,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
}),
});
var c_source_files: [uuid_src.len]Compilation.CSourceFile = undefined;
- for (uuid_src) |dep, i| {
+ for (uuid_src, 0..) |dep, i| {
c_source_files[i] = .{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "mingw", "libsrc", dep,
diff --git a/src/objcopy.zig b/src/objcopy.zig
index 72ff8deafd..31e3d60d0d 100644
--- a/src/objcopy.zig
+++ b/src/objcopy.zig
@@ -312,7 +312,7 @@ const BinaryElfOutput = struct {
std.sort.sort(*BinaryElfSegment, self.segments.items, {}, segmentSortCompare);
- for (self.segments.items) |firstSegment, i| {
+ for (self.segments.items, 0..) |firstSegment, i| {
if (firstSegment.firstSection) |firstSection| {
const diff = firstSection.elfOffset - firstSegment.elfOffset;
diff --git a/src/print_air.zig b/src/print_air.zig
index 133e987285..447af5a9c7 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -68,7 +68,7 @@ const Writer = struct {
indent: usize,
fn writeAllConstants(w: *Writer, s: anytype) @TypeOf(s).Error!void {
- for (w.air.instructions.items(.tag)) |tag, i| {
+ for (w.air.instructions.items(.tag), 0..) |tag, i| {
const inst = @intCast(u32, i);
switch (tag) {
.constant, .const_ty => {
@@ -388,7 +388,7 @@ const Writer = struct {
try w.writeType(s, vector_ty);
try s.writeAll(", [");
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
if (i != 0) try s.writeAll(", ");
try w.writeOperand(s, inst, i, elem);
}
@@ -682,7 +682,7 @@ const Writer = struct {
const args = @ptrCast([]const Air.Inst.Ref, w.air.extra[extra.end..][0..extra.data.args_len]);
try w.writeOperand(s, inst, 0, pl_op.operand);
try s.writeAll(", [");
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
if (i != 0) try s.writeAll(", ");
try w.writeOperand(s, inst, 1 + i, arg);
}
@@ -743,7 +743,7 @@ const Writer = struct {
if (liveness_condbr.then_deaths.len != 0) {
try s.writeByteNTimes(' ', w.indent);
- for (liveness_condbr.then_deaths) |operand, i| {
+ for (liveness_condbr.then_deaths, 0..) |operand, i| {
if (i != 0) try s.writeAll(" ");
try s.print("%{d}!", .{operand});
}
@@ -756,7 +756,7 @@ const Writer = struct {
if (liveness_condbr.else_deaths.len != 0) {
try s.writeByteNTimes(' ', w.indent);
- for (liveness_condbr.else_deaths) |operand, i| {
+ for (liveness_condbr.else_deaths, 0..) |operand, i| {
if (i != 0) try s.writeAll(" ");
try s.print("%{d}!", .{operand});
}
@@ -790,7 +790,7 @@ const Writer = struct {
extra_index = case.end + case.data.items_len + case_body.len;
try s.writeAll(", [");
- for (items) |item, item_i| {
+ for (items, 0..) |item, item_i| {
if (item_i != 0) try s.writeAll(", ");
try w.writeInstRef(s, item, false);
}
@@ -800,7 +800,7 @@ const Writer = struct {
const deaths = liveness.deaths[case_i];
if (deaths.len != 0) {
try s.writeByteNTimes(' ', w.indent);
- for (deaths) |operand, i| {
+ for (deaths, 0..) |operand, i| {
if (i != 0) try s.writeAll(" ");
try s.print("%{d}!", .{operand});
}
@@ -821,7 +821,7 @@ const Writer = struct {
const deaths = liveness.deaths[liveness.deaths.len - 1];
if (deaths.len != 0) {
try s.writeByteNTimes(' ', w.indent);
- for (deaths) |operand, i| {
+ for (deaths, 0..) |operand, i| {
if (i != 0) try s.writeAll(" ");
try s.print("%{d}!", .{operand});
}
diff --git a/src/print_targets.zig b/src/print_targets.zig
index 64149d6fba..19518a3368 100644
--- a/src/print_targets.zig
+++ b/src/print_targets.zig
@@ -99,7 +99,7 @@ pub fn cmdTargets(
for (arch.allCpuModels()) |model| {
try jws.objectField(model.name);
try jws.beginArray();
- for (arch.allFeaturesList()) |feature, i| {
+ for (arch.allFeaturesList(), 0..) |feature, i| {
if (model.features.isEnabled(@intCast(u8, i))) {
try jws.arrayElem();
try jws.emitString(feature.name);
@@ -145,7 +145,7 @@ pub fn cmdTargets(
{
try jws.objectField("features");
try jws.beginArray();
- for (native_target.cpu.arch.allFeaturesList()) |feature, i_usize| {
+ for (native_target.cpu.arch.allFeaturesList(), 0..) |feature, i_usize| {
const index = @intCast(Target.Cpu.Feature.Set.Index, i_usize);
if (cpu.features.isEnabled(index)) {
try jws.arrayElem();
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 8d97000582..fb9031296d 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -296,6 +296,7 @@ const Writer = struct {
.add,
.addwrap,
.add_sat,
+ .add_unsafe,
.array_cat,
.array_mul,
.mul,
@@ -355,6 +356,8 @@ const Writer = struct {
.array_type,
=> try self.writePlNodeBin(stream, inst),
+ .for_len => try self.writePlNodeMultiOp(stream, inst),
+
.elem_ptr_imm => try self.writeElemPtrImm(stream, inst),
.@"export" => try self.writePlNodeExport(stream, inst),
@@ -868,6 +871,19 @@ const Writer = struct {
try self.writeSrc(stream, inst_data.src());
}
+ fn writePlNodeMultiOp(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
+ const args = self.code.refSlice(extra.end, extra.data.operands_len);
+ try stream.writeAll("{");
+ for (args, 0..) |arg, i| {
+ if (i != 0) try stream.writeAll(", ");
+ try self.writeInstRef(stream, arg);
+ }
+ try stream.writeAll("}) ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
fn writeElemPtrImm(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const extra = self.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data;
@@ -1053,7 +1069,7 @@ const Writer = struct {
const src = LazySrcLoc.nodeOffset(extra.data.src_node);
const operands = self.code.refSlice(extra.end, extended.small);
- for (operands) |operand, i| {
+ for (operands, 0..) |operand, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, operand);
}
@@ -1377,7 +1393,7 @@ const Writer = struct {
try stream.writeAll("{\n");
self.indent += 2;
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
try self.writeDocComment(stream, field.doc_comment_index);
try stream.writeByteNTimes(' ', self.indent);
try self.writeFlag(stream, "comptime ", field.is_comptime);
@@ -1944,7 +1960,7 @@ const Writer = struct {
try stream.writeByteNTimes(' ', self.indent);
if (is_inline) try stream.writeAll("inline ");
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
if (item_i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, item_ref);
}
@@ -2260,7 +2276,7 @@ const Writer = struct {
try self.writeBracedBody(stream, body);
try stream.writeAll(",[");
const args = self.code.refSlice(extra.end, extended.small);
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, arg);
}
@@ -2319,7 +2335,7 @@ const Writer = struct {
try self.writeInstRef(stream, args[0]);
try stream.writeAll("{");
- for (args[1..]) |arg, i| {
+ for (args[1..], 0..) |arg, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, arg);
}
@@ -2334,7 +2350,7 @@ const Writer = struct {
const args = self.code.refSlice(extra.end, extra.data.operands_len);
try stream.writeAll("{");
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, arg);
}
@@ -2354,7 +2370,7 @@ const Writer = struct {
try stream.writeAll(", ");
try stream.writeAll(".{");
- for (elems) |elem, i| {
+ for (elems, 0..) |elem, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, elem);
}
diff --git a/src/register_manager.zig b/src/register_manager.zig
index 0ab9d103b8..2fe0cd2b6a 100644
--- a/src/register_manager.zig
+++ b/src/register_manager.zig
@@ -82,7 +82,7 @@ pub fn RegisterManager(
comptime registers: []const Register,
reg: Register,
) ?std.math.IntFittingRange(0, registers.len - 1) {
- inline for (tracked_registers) |cpreg, i| {
+ inline for (tracked_registers, 0..) |cpreg, i| {
if (reg.id() == cpreg.id()) return i;
}
return null;
@@ -153,7 +153,7 @@ pub fn RegisterManager(
regs: [count]Register,
) [count]RegisterLock {
var buf: [count]RegisterLock = undefined;
- for (regs) |reg, i| {
+ for (regs, 0..) |reg, i| {
buf[i] = self.lockRegAssumeUnused(reg);
}
return buf;
@@ -207,7 +207,7 @@ pub fn RegisterManager(
}
assert(i == count);
- for (regs) |reg, j| {
+ for (regs, 0..) |reg, j| {
self.markRegAllocated(reg);
if (insts[j]) |inst| {
diff --git a/src/test.zig b/src/test.zig
index b25a6c1e78..acc1bcdc1f 100644
--- a/src/test.zig
+++ b/src/test.zig
@@ -664,7 +664,7 @@ pub const TestContext = struct {
errors: []const []const u8,
) void {
var array = self.updates.allocator.alloc(ErrorMsg, errors.len) catch @panic("out of memory");
- for (errors) |err_msg_line, i| {
+ for (errors, 0..) |err_msg_line, i| {
if (std.mem.startsWith(u8, err_msg_line, "error: ")) {
array[i] = .{
.plain = .{
@@ -1558,7 +1558,7 @@ pub const TestContext = struct {
});
defer comp.destroy();
- update: for (case.updates.items) |update, update_index| {
+ update: for (case.updates.items, 0..) |update, update_index| {
var update_node = root_node.start(update.name, 3);
update_node.activate();
defer update_node.end();
@@ -1631,7 +1631,7 @@ pub const TestContext = struct {
defer notes_to_check.deinit();
for (actual_errors.list) |actual_error| {
- for (case_error_list) |case_msg, i| {
+ for (case_error_list, 0..) |case_msg, i| {
if (handled_errors[i]) continue;
const ex_tag: std.meta.Tag(@TypeOf(case_msg)) = case_msg;
@@ -1702,7 +1702,7 @@ pub const TestContext = struct {
}
}
while (notes_to_check.popOrNull()) |note| {
- for (case_error_list) |case_msg, i| {
+ for (case_error_list, 0..) |case_msg, i| {
const ex_tag: std.meta.Tag(@TypeOf(case_msg)) = case_msg;
switch (note.*) {
.src => |actual_msg| {
@@ -1752,7 +1752,7 @@ pub const TestContext = struct {
}
}
- for (handled_errors) |handled, i| {
+ for (handled_errors, 0..) |handled, i| {
if (!handled) {
print(
"\nExpected error not found:\n{s}\n{}\n{s}",
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 16a3c86757..9057873433 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -1423,7 +1423,7 @@ fn transConvertVectorExpr(
}
const init_list = try c.arena.alloc(Node, num_elements);
- for (init_list) |*init, init_index| {
+ for (init_list, 0..) |*init, init_index| {
const tmp_decl = block_scope.statements.items[init_index];
const name = tmp_decl.castTag(.var_simple).?.data.name;
init.* = try Tag.identifier.create(c.arena, name);
@@ -1454,7 +1454,7 @@ fn makeShuffleMask(c: *Context, scope: *Scope, expr: *const clang.ShuffleVectorE
const init_list = try c.arena.alloc(Node, mask_len);
- for (init_list) |*init, i| {
+ for (init_list, 0..) |*init, i| {
const index_expr = try transExprCoercing(c, scope, expr.getExpr(@intCast(c_uint, i + 2)), .used);
const converted_index = try Tag.helpers_shuffle_vector_index.create(c.arena, .{ .lhs = index_expr, .rhs = vector_len });
init.* = converted_index;
@@ -2686,7 +2686,7 @@ fn transInitListExprArray(
const init_node = if (init_count != 0) blk: {
const init_list = try c.arena.alloc(Node, init_count);
- for (init_list) |*init, i| {
+ for (init_list, 0..) |*init, i| {
const elem_expr = expr.getInit(@intCast(c_uint, i));
init.* = try transExprCoercing(c, scope, elem_expr, .used);
}
@@ -2760,7 +2760,7 @@ fn transInitListExprVector(
}
const init_list = try c.arena.alloc(Node, num_elements);
- for (init_list) |*init, init_index| {
+ for (init_list, 0..) |*init, init_index| {
if (init_index < init_count) {
const tmp_decl = block_scope.statements.items[init_index];
const name = tmp_decl.castTag(.var_simple).?.data.name;
@@ -4649,7 +4649,7 @@ fn transCreateNodeMacroFn(c: *Context, name: []const u8, ref: Node, proto_alias:
const unwrap_expr = try Tag.unwrap.create(c.arena, init);
const args = try c.arena.alloc(Node, fn_params.items.len);
- for (fn_params.items) |param, i| {
+ for (fn_params.items, 0..) |param, i| {
args[i] = try Tag.identifier.create(c.arena, param.name.?);
}
const call_expr = try Tag.call.create(c.arena, .{
@@ -5293,7 +5293,7 @@ const PatternList = struct {
fn init(allocator: mem.Allocator) Error!PatternList {
const patterns = try allocator.alloc(Pattern, templates.len);
- for (templates) |template, i| {
+ for (templates, 0..) |template, i| {
try patterns[i].init(allocator, template);
}
return PatternList{ .patterns = patterns };
@@ -5778,7 +5778,7 @@ fn parseCNumLit(c: *Context, m: *MacroCtx) ParseError!Node {
fn zigifyEscapeSequences(ctx: *Context, m: *MacroCtx) ![]const u8 {
var source = m.slice();
- for (source) |c, i| {
+ for (source, 0..) |c, i| {
if (c == '\"' or c == '\'') {
source = source[i..];
break;
diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig
index 78175a611b..81a19eb39d 100644
--- a/src/translate_c/ast.zig
+++ b/src/translate_c/ast.zig
@@ -1765,7 +1765,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
_ = try c.addToken(.l_brace, "{");
var cases = try c.gpa.alloc(NodeIndex, payload.cases.len);
defer c.gpa.free(cases);
- for (payload.cases) |case, i| {
+ for (payload.cases, 0..) |case, i| {
cases[i] = try renderNode(c, case);
_ = try c.addToken(.comma, ",");
}
@@ -1800,7 +1800,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
var items = try c.gpa.alloc(NodeIndex, std.math.max(payload.cases.len, 1));
defer c.gpa.free(items);
items[0] = 0;
- for (payload.cases) |item, i| {
+ for (payload.cases, 0..) |item, i| {
if (i != 0) _ = try c.addToken(.comma, ",");
items[i] = try renderNode(c, item);
}
@@ -1950,7 +1950,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
defer c.gpa.free(inits);
inits[0] = 0;
inits[1] = 0;
- for (payload) |init, i| {
+ for (payload, 0..) |init, i| {
if (i != 0) _ = try c.addToken(.comma, ",");
inits[i] = try renderNode(c, init);
}
@@ -1984,7 +1984,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
defer c.gpa.free(inits);
inits[0] = 0;
inits[1] = 0;
- for (payload) |init, i| {
+ for (payload, 0..) |init, i| {
_ = try c.addToken(.period, ".");
_ = try c.addIdentifier(init.name);
_ = try c.addToken(.equal, "=");
@@ -2022,7 +2022,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.inits.len, 1));
defer c.gpa.free(inits);
inits[0] = 0;
- for (payload.inits) |init, i| {
+ for (payload.inits, 0..) |init, i| {
_ = try c.addToken(.period, ".");
_ = try c.addIdentifier(init.name);
_ = try c.addToken(.equal, "=");
@@ -2080,7 +2080,7 @@ fn renderRecord(c: *Context, node: Node) !NodeIndex {
members[0] = 0;
members[1] = 0;
- for (payload.fields) |field, i| {
+ for (payload.fields, 0..) |field, i| {
const name_tok = try c.addTokenFmt(.identifier, "{s}", .{std.zig.fmtId(field.name)});
_ = try c.addToken(.colon, ":");
const type_expr = try renderNode(c, field.type);
@@ -2116,10 +2116,10 @@ fn renderRecord(c: *Context, node: Node) !NodeIndex {
});
_ = try c.addToken(.comma, ",");
}
- for (payload.variables) |variable, i| {
+ for (payload.variables, 0..) |variable, i| {
members[payload.fields.len + i] = try renderNode(c, variable);
}
- for (payload.functions) |function, i| {
+ for (payload.functions, 0..) |function, i| {
members[payload.fields.len + num_vars + i] = try renderNode(c, function);
}
_ = try c.addToken(.r_brace, "}");
@@ -2171,7 +2171,7 @@ fn renderArrayInit(c: *Context, lhs: NodeIndex, inits: []const Node) !NodeIndex
var rendered = try c.gpa.alloc(NodeIndex, std.math.max(inits.len, 1));
defer c.gpa.free(rendered);
rendered[0] = 0;
- for (inits) |init, i| {
+ for (inits, 0..) |init, i| {
rendered[i] = try renderNode(c, init);
_ = try c.addToken(.comma, ",");
}
@@ -2539,7 +2539,7 @@ fn renderCall(c: *Context, lhs: NodeIndex, args: []const Node) !NodeIndex {
var rendered = try c.gpa.alloc(NodeIndex, args.len);
defer c.gpa.free(rendered);
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
if (i != 0) _ = try c.addToken(.comma, ",");
rendered[i] = try renderNode(c, arg);
}
@@ -2879,7 +2879,7 @@ fn renderParams(c: *Context, params: []Payload.Param, is_var_args: bool) !std.Ar
var rendered = try std.ArrayList(NodeIndex).initCapacity(c.gpa, std.math.max(params.len, 1));
errdefer rendered.deinit();
- for (params) |param, i| {
+ for (params, 0..) |param, i| {
if (i != 0) _ = try c.addToken(.comma, ",");
if (param.is_noalias) _ = try c.addToken(.keyword_noalias, "noalias");
if (param.name) |some| {
diff --git a/src/type.zig b/src/type.zig
index a13e30cb4c..ec4db8689f 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -628,7 +628,7 @@ pub const Type = extern union {
const a_set = a.errorSetNames();
const b_set = b.errorSetNames();
if (a_set.len != b_set.len) return false;
- for (a_set) |a_item, i| {
+ for (a_set, 0..) |a_item, i| {
const b_item = b_set[i];
if (!std.mem.eql(u8, a_item, b_item)) return false;
}
@@ -675,7 +675,7 @@ pub const Type = extern union {
if (a_info.param_types.len != b_info.param_types.len)
return false;
- for (a_info.param_types) |a_param_ty, i| {
+ for (a_info.param_types, 0..) |a_param_ty, i| {
const b_param_ty = b_info.param_types[i];
if (a_info.comptime_params[i] != b_info.comptime_params[i])
return false;
@@ -824,12 +824,12 @@ pub const Type = extern union {
if (a_tuple.types.len != b_tuple.types.len) return false;
- for (a_tuple.types) |a_ty, i| {
+ for (a_tuple.types, 0..) |a_ty, i| {
const b_ty = b_tuple.types[i];
if (!eql(a_ty, b_ty, mod)) return false;
}
- for (a_tuple.values) |a_val, i| {
+ for (a_tuple.values, 0..) |a_val, i| {
const ty = a_tuple.types[i];
const b_val = b_tuple.values[i];
if (a_val.tag() == .unreachable_value) {
@@ -855,17 +855,17 @@ pub const Type = extern union {
if (a_struct_obj.types.len != b_struct_obj.types.len) return false;
- for (a_struct_obj.names) |a_name, i| {
+ for (a_struct_obj.names, 0..) |a_name, i| {
const b_name = b_struct_obj.names[i];
if (!std.mem.eql(u8, a_name, b_name)) return false;
}
- for (a_struct_obj.types) |a_ty, i| {
+ for (a_struct_obj.types, 0..) |a_ty, i| {
const b_ty = b_struct_obj.types[i];
if (!eql(a_ty, b_ty, mod)) return false;
}
- for (a_struct_obj.values) |a_val, i| {
+ for (a_struct_obj.values, 0..) |a_val, i| {
const ty = a_struct_obj.types[i];
const b_val = b_struct_obj.values[i];
if (a_val.tag() == .unreachable_value) {
@@ -1073,7 +1073,7 @@ pub const Type = extern union {
std.hash.autoHash(hasher, fn_info.noalias_bits);
std.hash.autoHash(hasher, fn_info.param_types.len);
- for (fn_info.param_types) |param_ty, i| {
+ for (fn_info.param_types, 0..) |param_ty, i| {
std.hash.autoHash(hasher, fn_info.paramIsComptime(i));
if (param_ty.tag() == .generic_poison) continue;
hashWithHasher(param_ty, hasher, mod);
@@ -1175,7 +1175,7 @@ pub const Type = extern union {
const tuple = ty.tupleFields();
std.hash.autoHash(hasher, tuple.types.len);
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
hashWithHasher(field_ty, hasher, mod);
const field_val = tuple.values[i];
if (field_val.tag() == .unreachable_value) continue;
@@ -1187,7 +1187,7 @@ pub const Type = extern union {
std.hash.autoHash(hasher, std.builtin.TypeId.Struct);
std.hash.autoHash(hasher, struct_obj.types.len);
- for (struct_obj.types) |field_ty, i| {
+ for (struct_obj.types, 0..) |field_ty, i| {
const field_name = struct_obj.names[i];
const field_val = struct_obj.values[i];
hasher.update(field_name);
@@ -1403,10 +1403,10 @@ pub const Type = extern union {
const payload = self.castTag(.tuple).?.data;
const types = try allocator.alloc(Type, payload.types.len);
const values = try allocator.alloc(Value, payload.values.len);
- for (payload.types) |ty, i| {
+ for (payload.types, 0..) |ty, i| {
types[i] = try ty.copy(allocator);
}
- for (payload.values) |val, i| {
+ for (payload.values, 0..) |val, i| {
values[i] = try val.copy(allocator);
}
return Tag.tuple.create(allocator, .{
@@ -1419,13 +1419,13 @@ pub const Type = extern union {
const names = try allocator.alloc([]const u8, payload.names.len);
const types = try allocator.alloc(Type, payload.types.len);
const values = try allocator.alloc(Value, payload.values.len);
- for (payload.names) |name, i| {
+ for (payload.names, 0..) |name, i| {
names[i] = try allocator.dupe(u8, name);
}
- for (payload.types) |ty, i| {
+ for (payload.types, 0..) |ty, i| {
types[i] = try ty.copy(allocator);
}
- for (payload.values) |val, i| {
+ for (payload.values, 0..) |val, i| {
values[i] = try val.copy(allocator);
}
return Tag.anon_struct.create(allocator, .{
@@ -1437,7 +1437,7 @@ pub const Type = extern union {
.function => {
const payload = self.castTag(.function).?.data;
const param_types = try allocator.alloc(Type, payload.param_types.len);
- for (payload.param_types) |param_ty, i| {
+ for (payload.param_types, 0..) |param_ty, i| {
param_types[i] = try param_ty.copy(allocator);
}
const other_comptime_params = payload.comptime_params[0..payload.param_types.len];
@@ -1678,7 +1678,7 @@ pub const Type = extern union {
.function => {
const payload = ty.castTag(.function).?.data;
try writer.writeAll("fn(");
- for (payload.param_types) |param_type, i| {
+ for (payload.param_types, 0..) |param_type, i| {
if (i != 0) try writer.writeAll(", ");
try param_type.dump("", .{}, writer);
}
@@ -1739,7 +1739,7 @@ pub const Type = extern union {
.tuple => {
const tuple = ty.castTag(.tuple).?.data;
try writer.writeAll("tuple{");
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
if (i != 0) try writer.writeAll(", ");
const val = tuple.values[i];
if (val.tag() != .unreachable_value) {
@@ -1756,7 +1756,7 @@ pub const Type = extern union {
.anon_struct => {
const anon_struct = ty.castTag(.anon_struct).?.data;
try writer.writeAll("struct{");
- for (anon_struct.types) |field_ty, i| {
+ for (anon_struct.types, 0..) |field_ty, i| {
if (i != 0) try writer.writeAll(", ");
const val = anon_struct.values[i];
if (val.tag() != .unreachable_value) {
@@ -1892,7 +1892,7 @@ pub const Type = extern union {
.error_set => {
const names = ty.castTag(.error_set).?.data.names.keys();
try writer.writeAll("error{");
- for (names) |name, i| {
+ for (names, 0..) |name, i| {
if (i != 0) try writer.writeByte(',');
try writer.writeAll(name);
}
@@ -1908,7 +1908,7 @@ pub const Type = extern union {
.error_set_merged => {
const names = ty.castTag(.error_set_merged).?.data.keys();
try writer.writeAll("error{");
- for (names) |name, i| {
+ for (names, 0..) |name, i| {
if (i != 0) try writer.writeByte(',');
try writer.writeAll(name);
}
@@ -2063,7 +2063,7 @@ pub const Type = extern union {
.function => {
const fn_info = ty.fnInfo();
try writer.writeAll("fn(");
- for (fn_info.param_types) |param_ty, i| {
+ for (fn_info.param_types, 0..) |param_ty, i| {
if (i != 0) try writer.writeAll(", ");
if (fn_info.paramIsComptime(i)) {
try writer.writeAll("comptime ");
@@ -2137,7 +2137,7 @@ pub const Type = extern union {
const tuple = ty.castTag(.tuple).?.data;
try writer.writeAll("tuple{");
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
if (i != 0) try writer.writeAll(", ");
const val = tuple.values[i];
if (val.tag() != .unreachable_value) {
@@ -2154,7 +2154,7 @@ pub const Type = extern union {
const anon_struct = ty.castTag(.anon_struct).?.data;
try writer.writeAll("struct{");
- for (anon_struct.types) |field_ty, i| {
+ for (anon_struct.types, 0..) |field_ty, i| {
if (i != 0) try writer.writeAll(", ");
const val = anon_struct.values[i];
if (val.tag() != .unreachable_value) {
@@ -2253,7 +2253,7 @@ pub const Type = extern union {
.error_set => {
const names = ty.castTag(.error_set).?.data.names.keys();
try writer.writeAll("error{");
- for (names) |name, i| {
+ for (names, 0..) |name, i| {
if (i != 0) try writer.writeByte(',');
try writer.writeAll(name);
}
@@ -2266,7 +2266,7 @@ pub const Type = extern union {
.error_set_merged => {
const names = ty.castTag(.error_set_merged).?.data.keys();
try writer.writeAll("error{");
- for (names) |name, i| {
+ for (names, 0..) |name, i| {
if (i != 0) try writer.writeByte(',');
try writer.writeAll(name);
}
@@ -2568,7 +2568,7 @@ pub const Type = extern union {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
if (try field_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) return true;
@@ -3125,7 +3125,7 @@ pub const Type = extern union {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
var big_align: u32 = 0;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
if (!(field_ty.hasRuntimeBits())) continue;
@@ -5044,7 +5044,7 @@ pub const Type = extern union {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.values) |val, i| {
+ for (tuple.values, 0..) |val, i| {
const is_comptime = val.tag() != .unreachable_value;
if (is_comptime) continue;
if (tuple.types[i].onePossibleValue() != null) continue;
@@ -5256,7 +5256,7 @@ pub const Type = extern union {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const have_comptime_val = tuple.values[i].tag() != .unreachable_value;
if (!have_comptime_val and field_ty.comptimeOnly()) return true;
}
@@ -5326,6 +5326,19 @@ pub const Type = extern union {
};
}
+ pub fn indexableHasLen(ty: Type) bool {
+ return switch (ty.zigTypeTag()) {
+ .Array, .Vector => true,
+ .Pointer => switch (ty.ptrSize()) {
+ .Many, .C => false,
+ .Slice => true,
+ .One => ty.elemType().zigTypeTag() == .Array,
+ },
+ .Struct => ty.isTuple(),
+ else => false,
+ };
+ }
+
/// Returns null if the type has no namespace.
pub fn getNamespace(self: Type) ?*Module.Namespace {
return switch (self.tag()) {
@@ -5740,7 +5753,7 @@ pub const Type = extern union {
var bit_offset: u16 = undefined;
var elem_size_bits: u16 = undefined;
var running_bits: u16 = 0;
- for (struct_obj.fields.values()) |f, i| {
+ for (struct_obj.fields.values(), 0..) |f, i| {
if (!f.ty.hasRuntimeBits()) continue;
const field_bits = @intCast(u16, f.ty.bitSize(target));
@@ -5821,7 +5834,7 @@ pub const Type = extern union {
var offset: u64 = 0;
var big_align: u32 = 0;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const field_val = tuple.values[i];
if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) {
// comptime field
diff --git a/src/value.zig b/src/value.zig
index 306e31c0a7..0d80bf7927 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -614,7 +614,7 @@ pub const Value = extern union {
.base = payload.base,
.data = try arena.alloc(Value, payload.data.len),
};
- for (new_payload.data) |*elem, i| {
+ for (new_payload.data, 0..) |*elem, i| {
elem.* = try payload.data[i].copy(arena);
}
return Value{ .ptr_otherwise = &new_payload.base };
@@ -891,7 +891,7 @@ pub const Value = extern union {
fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 {
const result = try allocator.alloc(u8, @intCast(usize, len));
var elem_value_buf: ElemValueBuffer = undefined;
- for (result) |*elem, i| {
+ for (result, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(mod, i, &elem_value_buf);
elem.* = @intCast(u8, elem_val.toUnsignedInt(mod.getTarget()));
}
@@ -1282,7 +1282,7 @@ pub const Value = extern union {
.int_i64 => @bitCast(u64, int_val.castTag(.int_i64).?.data),
else => unreachable,
};
- for (buffer[0..byte_count]) |_, i| switch (endian) {
+ for (buffer[0..byte_count], 0..) |_, i| switch (endian) {
.Little => buffer[i] = @truncate(u8, (int >> @intCast(u6, (8 * i)))),
.Big => buffer[byte_count - i - 1] = @truncate(u8, (int >> @intCast(u6, (8 * i)))),
};
@@ -1324,7 +1324,7 @@ pub const Value = extern union {
.Extern => {
const fields = ty.structFields().values();
const field_vals = val.castTag(.aggregate).?.data;
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
const off = @intCast(usize, ty.structFieldOffset(i, target));
writeToMemory(field_vals[i], field.ty, mod, buffer[off..]);
}
@@ -1340,6 +1340,14 @@ pub const Value = extern union {
const int = mod.global_error_set.get(val.castTag(.@"error").?.data.name).?;
std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian);
},
+ .Union => switch (ty.containerLayout()) {
+ .Auto => unreachable,
+ .Extern => @panic("TODO implement writeToMemory for extern unions"),
+ .Packed => {
+ const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
+ writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
+ },
+ },
else => @panic("TODO implement writeToMemory for more types"),
}
}
@@ -1423,13 +1431,24 @@ pub const Value = extern union {
var bits: u16 = 0;
const fields = ty.structFields().values();
const field_vals = val.castTag(.aggregate).?.data;
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
const field_bits = @intCast(u16, field.ty.bitSize(target));
field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits);
bits += field_bits;
}
},
},
+ .Union => switch (ty.containerLayout()) {
+ .Auto => unreachable, // Sema is supposed to have emitted a compile error already
+ .Extern => unreachable, // Handled in non-packed writeToMemory
+ .Packed => {
+ const field_index = ty.unionTagFieldIndex(val.unionTag(), mod);
+ const field_type = ty.unionFields().values()[field_index.?].ty;
+ const field_val = val.fieldValue(field_type, field_index.?);
+
+ field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset);
+ },
+ },
else => @panic("TODO implement writeToPackedMemory for more types"),
}
}
@@ -1510,7 +1529,7 @@ pub const Value = extern union {
.Extern => {
const fields = ty.structFields().values();
const field_vals = try arena.alloc(Value, fields.len);
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
const off = @intCast(usize, ty.structFieldOffset(i, target));
const sz = @intCast(usize, ty.structFieldType(i).abiSize(target));
field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena);
@@ -1598,7 +1617,7 @@ pub const Value = extern union {
var bits: u16 = 0;
const elem_bit_size = @intCast(u16, elem_ty.bitSize(target));
- for (elems) |_, i| {
+ for (elems, 0..) |_, i| {
// On big-endian systems, LLVM reverses the element order of vectors by default
const tgt_elem_i = if (endian == .Big) elems.len - i - 1 else i;
elems[tgt_elem_i] = try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena);
@@ -1613,7 +1632,7 @@ pub const Value = extern union {
var bits: u16 = 0;
const fields = ty.structFields().values();
const field_vals = try arena.alloc(Value, fields.len);
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
const field_bits = @intCast(u16, field.ty.bitSize(target));
field_vals[i] = try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena);
bits += field_bits;
@@ -2240,7 +2259,7 @@ pub const Value = extern union {
if (ty.isSimpleTupleOrAnonStruct()) {
const types = ty.tupleFields().types;
assert(types.len == a_field_vals.len);
- for (types) |field_ty, i| {
+ for (types, 0..) |field_ty, i| {
if (!(try eqlAdvanced(a_field_vals[i], field_ty, b_field_vals[i], field_ty, mod, opt_sema))) {
return false;
}
@@ -2251,7 +2270,7 @@ pub const Value = extern union {
if (ty.zigTypeTag() == .Struct) {
const fields = ty.structFields().values();
assert(fields.len == a_field_vals.len);
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) {
return false;
}
@@ -2260,7 +2279,7 @@ pub const Value = extern union {
}
const elem_ty = ty.childType();
- for (a_field_vals) |a_elem, i| {
+ for (a_field_vals, 0..) |a_elem, i| {
const b_elem = b_field_vals[i];
if (!(try eqlAdvanced(a_elem, elem_ty, b_elem, elem_ty, mod, opt_sema))) {
@@ -2507,7 +2526,7 @@ pub const Value = extern union {
.empty_struct_value => {},
.aggregate => {
const field_values = val.castTag(.aggregate).?.data;
- for (field_values) |field_val, i| {
+ for (field_values, 0..) |field_val, i| {
const field_ty = ty.structFieldType(i);
field_val.hash(field_ty, hasher, mod);
}
@@ -3209,7 +3228,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (int_ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, int_ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(), target, opt_sema);
@@ -3322,7 +3341,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3371,7 +3390,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3420,7 +3439,7 @@ pub const Value = extern union {
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try arena.alloc(Value, ty.vectorLen());
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3482,7 +3501,7 @@ pub const Value = extern union {
) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3527,7 +3546,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3603,7 +3622,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(), arena, target);
@@ -3642,7 +3661,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3678,7 +3697,7 @@ pub const Value = extern union {
pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3709,7 +3728,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3745,7 +3764,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3781,7 +3800,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3822,7 +3841,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3863,7 +3882,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3939,7 +3958,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3986,7 +4005,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4033,7 +4052,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4070,7 +4089,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, target);
@@ -4092,7 +4111,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
var bits_buf: Value.ElemValueBuffer = undefined;
@@ -4124,7 +4143,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4166,7 +4185,7 @@ pub const Value = extern union {
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try allocator.alloc(Value, ty.vectorLen());
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4224,7 +4243,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4272,7 +4291,7 @@ pub const Value = extern union {
) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4301,7 +4320,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4353,7 +4372,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try floatNegScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4389,7 +4408,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4448,7 +4467,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4507,7 +4526,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4566,7 +4585,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4619,7 +4638,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try sqrtScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4659,7 +4678,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try sinScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4699,7 +4718,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try cosScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4739,7 +4758,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try tanScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4779,7 +4798,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try expScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4819,7 +4838,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try exp2Scalar(elem_val, float_type.scalarType(), arena, target);
@@ -4859,7 +4878,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try logScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4899,7 +4918,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try log2Scalar(elem_val, float_type.scalarType(), arena, target);
@@ -4939,7 +4958,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try log10Scalar(elem_val, float_type.scalarType(), arena, target);
@@ -4979,7 +4998,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try fabsScalar(elem_val, float_type.scalarType(), arena, target);
@@ -5019,7 +5038,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try floorScalar(elem_val, float_type.scalarType(), arena, target);
@@ -5059,7 +5078,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try ceilScalar(elem_val, float_type.scalarType(), arena, target);
@@ -5099,7 +5118,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try roundScalar(elem_val, float_type.scalarType(), arena, target);
@@ -5139,7 +5158,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try truncScalar(elem_val, float_type.scalarType(), arena, target);
@@ -5186,7 +5205,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var mulend1_buf: Value.ElemValueBuffer = undefined;
const mulend1_elem = mulend1.elemValueBuffer(mod, i, &mulend1_buf);
var mulend2_buf: Value.ElemValueBuffer = undefined;