diff options
Diffstat (limited to 'src/codegen')
| -rw-r--r-- | src/codegen/c.zig | 52 | ||||
| -rw-r--r-- | src/codegen/llvm.zig | 60 | ||||
| -rw-r--r-- | src/codegen/spirv.zig | 4 | ||||
| -rw-r--r-- | src/codegen/spirv/Assembler.zig | 2 | ||||
| -rw-r--r-- | src/codegen/spirv/Module.zig | 4 | ||||
| -rw-r--r-- | src/codegen/spirv/Section.zig | 2 | ||||
| -rw-r--r-- | src/codegen/spirv/type.zig | 2 |
7 files changed, 63 insertions, 63 deletions
diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 2f721e1b4b..0beb00b236 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -253,7 +253,7 @@ fn formatIdent( if (solo and isReservedIdent(ident)) { try writer.writeAll("zig_e_"); } - for (ident) |c, i| { + for (ident, 0..) |c, i| { switch (c) { 'a'...'z', 'A'...'Z', '_' => try writer.writeByte(c), '.' => try writer.writeByte('_'), @@ -361,7 +361,7 @@ pub const Function = struct { _ = mutability; if (f.getFreeLocals().getPtrContext(ty, f.tyHashCtx())) |locals_list| { - for (locals_list.items) |local_index, i| { + for (locals_list.items, 0..) |local_index, i| { const local = &f.locals.items[local_index]; if (local.alignment >= alignment) { local.loop_depth = @intCast(LoopDepth, f.free_locals_stack.items.len - 1); @@ -1283,7 +1283,7 @@ pub const DeclGen = struct { try writer.writeByte('{'); var empty = true; - for (field_vals) |field_val, field_index| { + for (field_vals, 0..) |field_val, field_index| { const field_ty = ty.structFieldType(field_index); if (!field_ty.hasRuntimeBits()) continue; @@ -1309,7 +1309,7 @@ pub const DeclGen = struct { const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); var eff_num_fields: usize = 0; - for (field_vals) |_, index| { + for (field_vals, 0..) |_, index| { const field_ty = ty.structFieldType(index); if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; @@ -1331,7 +1331,7 @@ pub const DeclGen = struct { var eff_index: usize = 0; var needs_closing_paren = false; - for (field_vals) |field_val, index| { + for (field_vals, 0..) |field_val, index| { const field_ty = ty.structFieldType(index); if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; @@ -1359,7 +1359,7 @@ pub const DeclGen = struct { try writer.writeByte('('); // a << a_off | b << b_off | c << c_off var empty = true; - for (field_vals) |field_val, index| { + for (field_vals, 0..) |field_val, index| { const field_ty = ty.structFieldType(index); if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; @@ -1719,7 +1719,7 @@ pub const DeclGen = struct { { const fields = t.tupleFields(); var field_id: usize = 0; - for (fields.types) |field_ty, i| { + for (fields.types, 0..) |field_ty, i| { if (!field_ty.hasRuntimeBits() or fields.values[i].tag() != .unreachable_value) continue; try buffer.append(' '); @@ -2130,7 +2130,7 @@ pub const DeclGen = struct { try tuple_storage.ensureTotalCapacity(allocator, t.structFieldCount()); const fields = t.tupleFields(); - for (fields.values) |value, index| + for (fields.values, 0..) |value, index| if (value.tag() == .unreachable_value) tuple_storage.appendAssumeCapacity(.{ .type = fields.types[index], @@ -2415,7 +2415,7 @@ pub const DeclGen = struct { const name_end = buffer.items.len - "(".len; try dg.renderTypeAndName(bw, enum_ty, .{ .identifier = "tag" }, .Const, 0, .Complete); try buffer.appendSlice(") {\n switch (tag) {\n"); - for (enum_ty.enumFields().keys()) |name, index| { + for (enum_ty.enumFields().keys(), 0..) |name, index| { const name_z = try dg.typedefs.allocator.dupeZ(u8, name); defer dg.typedefs.allocator.free(name_z); const name_bytes = name_z[0 .. name_z.len + 1]; @@ -2681,7 +2681,7 @@ pub fn genErrDecls(o: *Object) !void { try writer.writeAll("enum {\n"); o.indent_writer.pushIndent(); var max_name_len: usize = 0; - for (o.dg.module.error_name_list.items) |name, value| { + for (o.dg.module.error_name_list.items, 0..) |name, value| { max_name_len = std.math.max(name.len, max_name_len); var err_pl = Value.Payload.Error{ .data = .{ .name = name } }; try o.dg.renderValue(writer, Type.anyerror, Value.initPayload(&err_pl.base), .Other); @@ -2724,7 +2724,7 @@ pub fn genErrDecls(o: *Object) !void { try writer.writeAll("static "); try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = name_prefix }, .Const, 0, .Complete); try writer.writeAll(" = {"); - for (o.dg.module.error_name_list.items) |name, value| { + for (o.dg.module.error_name_list.items, 0..) |name, value| { if (value != 0) try writer.writeByte(','); var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; @@ -2742,7 +2742,7 @@ fn genExports(o: *Object) !void { defer tracy.end(); const fwd_decl_writer = o.dg.fwd_decl.writer(); - if (o.dg.module.decl_exports.get(o.dg.decl_index)) |exports| for (exports.items[1..]) |@"export", i| { + if (o.dg.module.decl_exports.get(o.dg.decl_index)) |exports| for (exports.items[1..], 0..) |@"export", i| { try fwd_decl_writer.writeAll("zig_export("); try o.dg.renderFunctionSignature(fwd_decl_writer, .Forward, @intCast(u32, 1 + i)); try fwd_decl_writer.print(", {s}, {s});\n", .{ @@ -2800,7 +2800,7 @@ pub fn genFunc(f: *Function) !void { // alignment, descending. const free_locals = f.getFreeLocals(); const values = f.allocs.values(); - for (f.allocs.keys()) |local_index, i| { + for (f.allocs.keys(), 0..) |local_index, i| { if (values[i]) continue; // static const local = f.locals.items[local_index]; log.debug("inserting local {d} into free_locals", .{local_index}); @@ -4238,7 +4238,7 @@ fn airCall( const resolved_args = try gpa.alloc(CValue, args.len); defer gpa.free(resolved_args); - for (args) |arg, i| { + for (args, 0..) |arg, i| { resolved_args[i] = try f.resolveInst(arg); } @@ -4303,7 +4303,7 @@ fn airCall( try writer.writeByte('('); var args_written: usize = 0; - for (args) |arg, arg_i| { + for (args, 0..) |arg, arg_i| { const ty = f.air.typeOf(arg); if (!ty.hasRuntimeBitsIgnoreComptime()) continue; if (args_written != 0) { @@ -5043,7 +5043,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { extra_i = constraints_extra_begin; var locals_index = locals_begin; try writer.writeByte(':'); - for (outputs) |output, index| { + for (outputs, 0..) |output, index| { const extra_bytes = std.mem.sliceAsBytes(f.air.extra[extra_i..]); const constraint = std.mem.sliceTo(extra_bytes, 0); const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0); @@ -5067,7 +5067,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(')'); } try writer.writeByte(':'); - for (inputs) |input, index| { + for (inputs, 0..) |input, index| { const extra_bytes = std.mem.sliceAsBytes(f.air.extra[extra_i..]); const constraint = std.mem.sliceTo(extra_bytes, 0); const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0); @@ -5426,7 +5426,7 @@ fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struc }; const field_loc = switch (struct_ty.tag()) { .@"struct" => switch (struct_ty.containerLayout()) { - .Auto, .Extern => for (struct_ty.structFields().values()[index..]) |field, offset| { + .Auto, .Extern => for (struct_ty.structFields().values()[index..], 0..) |field, offset| { if (field.ty.hasRuntimeBitsIgnoreComptime()) break FieldLoc{ .field = .{ .identifier = struct_ty.structFieldName(index + offset), } }; @@ -5469,7 +5469,7 @@ fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struc if (tuple.values[index].tag() != .unreachable_value) return CValue.none; var id: usize = 0; - break :field_name for (tuple.values) |value, i| { + break :field_name for (tuple.values, 0..) |value, i| { if (value.tag() != .unreachable_value) continue; if (!tuple.types[i].hasRuntimeBitsIgnoreComptime()) continue; if (i >= index) break FieldLoc{ .field = .{ .field = id } }; @@ -6687,7 +6687,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const gpa = f.object.dg.gpa; const resolved_elements = try gpa.alloc(CValue, elements.len); defer gpa.free(resolved_elements); - for (elements) |element, i| { + for (elements, 0..) |element, i| { resolved_elements[i] = try f.resolveInst(element); } { @@ -6706,7 +6706,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { switch (inst_ty.zigTypeTag()) { .Array, .Vector => { const elem_ty = inst_ty.childType(); - for (resolved_elements) |element, i| { + for (resolved_elements, 0..) |element, i| { try f.writeCValue(writer, local, .Other); try writer.print("[{d}] = ", .{i}); try f.writeCValue(writer, element, .Other); @@ -6727,7 +6727,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(")"); try writer.writeByte('{'); var empty = true; - for (elements) |element, index| { + for (elements, 0..) |element, index| { if (inst_ty.structFieldValueComptime(index)) |_| continue; if (!empty) try writer.writeAll(", "); @@ -6746,7 +6746,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("};\n"); var field_id: usize = 0; - for (elements) |element, index| { + for (elements, 0..) |element, index| { if (inst_ty.structFieldValueComptime(index)) |_| continue; const element_ty = f.air.typeOf(element); @@ -6784,7 +6784,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); var empty = true; - for (elements) |_, index| { + for (elements, 0..) |_, index| { const field_ty = inst_ty.structFieldType(index); if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; @@ -6796,7 +6796,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { empty = false; } empty = true; - for (resolved_elements) |element, index| { + for (resolved_elements, 0..) |element, index| { const field_ty = inst_ty.structFieldType(index); if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; @@ -7608,7 +7608,7 @@ fn deinitFreeLocalsMap(gpa: mem.Allocator, map: *LocalsMap) void { } fn noticeBranchFrees(f: *Function, pre_locals_len: LocalIndex, inst: Air.Inst.Index) !void { - for (f.locals.items[pre_locals_len..]) |*local, local_offset| { + for (f.locals.items[pre_locals_len..], 0..) |*local, local_offset| { const local_index = pre_locals_len + @intCast(LocalIndex, local_offset); if (f.allocs.contains(local_index)) continue; // allocs are not freeable diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 00f6c55171..21c57b5e13 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -605,7 +605,7 @@ pub const Object = struct { defer mod.gpa.free(llvm_errors); llvm_errors[0] = llvm_slice_ty.getUndef(); - for (llvm_errors[1..]) |*llvm_error, i| { + for (llvm_errors[1..], 0..) |*llvm_error, i| { const name = error_name_list[1..][i]; const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False); const str_global = self.llvm_module.addGlobal(str_init.typeOf(), ""); @@ -696,7 +696,7 @@ pub const Object = struct { object.extern_collisions.clearRetainingCapacity(); const export_keys = mod.decl_exports.keys(); - for (mod.decl_exports.values()) |export_list, i| { + for (mod.decl_exports.values(), 0..) |export_list, i| { const decl_index = export_keys[i]; const llvm_global = object.decl_map.get(decl_index) orelse continue; for (export_list.items) |exp| { @@ -1081,7 +1081,7 @@ pub const Object = struct { const param_alignment = param_ty.abiAlignment(target); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target); const llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False); - for (field_types) |_, field_i_usize| { + for (field_types, 0..) |_, field_i_usize| { const field_i = @intCast(c_uint, field_i_usize); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -1500,7 +1500,7 @@ pub const Object = struct { const int_info = ty.intInfo(target); assert(int_info.bits != 0); - for (field_names) |field_name, i| { + for (field_names, 0..) |field_name, i| { const field_name_z = try gpa.dupeZ(u8, field_name); defer gpa.free(field_name_z); @@ -1997,7 +1997,7 @@ pub const Object = struct { comptime assert(struct_layout_version == 2); var offset: u64 = 0; - for (tuple.types) |field_ty, i| { + for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; @@ -2926,7 +2926,7 @@ pub const DeclGen = struct { var offset: u64 = 0; var big_align: u32 = 0; - for (tuple.types) |field_ty, i| { + for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; @@ -3437,7 +3437,7 @@ pub const DeclGen = struct { const llvm_elems = try gpa.alloc(*llvm.Value, len); defer gpa.free(llvm_elems); var need_unnamed = false; - for (elem_vals[0..len]) |elem_val, i| { + for (elem_vals[0..len], 0..) |elem_val, i| { llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val }); need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); } @@ -3623,7 +3623,7 @@ pub const DeclGen = struct { var big_align: u32 = 0; var need_unnamed = false; - for (tuple.types) |field_ty, i| { + for (tuple.types, 0..) |field_ty, i| { if (tuple.values[i].tag() != .unreachable_value) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; @@ -3685,7 +3685,7 @@ pub const DeclGen = struct { comptime assert(Type.packed_struct_layout_version == 2); var running_int: *llvm.Value = int_llvm_ty.constNull(); var running_bits: u16 = 0; - for (field_vals) |field_val, i| { + for (field_vals, 0..) |field_val, i| { const field = fields[i]; if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; @@ -3860,7 +3860,7 @@ pub const DeclGen = struct { const elem_ty = tv.ty.elemType(); const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); - for (llvm_elems) |*elem, i| { + for (llvm_elems, 0..) |*elem, i| { var byte_payload: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = bytes[i], @@ -3885,7 +3885,7 @@ pub const DeclGen = struct { const elem_ty = tv.ty.elemType(); const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); - for (llvm_elems) |*elem, i| { + for (llvm_elems, 0..) |*elem, i| { elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_vals[i] }); } return llvm.constVector( @@ -3918,7 +3918,7 @@ pub const DeclGen = struct { const elem_ty = tv.ty.elemType(); const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); - for (llvm_elems) |*elem, i| { + for (llvm_elems, 0..) |*elem, i| { var byte_payload: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = bytes[i], @@ -4484,7 +4484,7 @@ pub const FuncGen = struct { fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void { const air_tags = self.air.instructions.items(.tag); - for (body) |inst, i| { + for (body, 0..) |inst, i| { const opt_value: ?*llvm.Value = switch (air_tags[inst]) { // zig fmt: off .add => try self.airAdd(inst, false), @@ -4857,7 +4857,7 @@ pub const FuncGen = struct { const llvm_ty = self.context.structType(llvm_types.ptr, @intCast(c_uint, llvm_types.len), .False); try llvm_args.ensureUnusedCapacity(it.llvm_types_len); - for (llvm_types) |field_ty, i_usize| { + for (llvm_types, 0..) |field_ty, i_usize| { const i = @intCast(c_uint, i_usize); const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, ""); const load_inst = self.builder.buildLoad(field_ty, field_ptr, ""); @@ -6255,7 +6255,7 @@ pub const FuncGen = struct { var name_map: std.StringArrayHashMapUnmanaged(u16) = .{}; try name_map.ensureUnusedCapacity(arena, max_param_count); - for (outputs) |output, i| { + for (outputs, 0..) |output, i| { const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]); const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0); @@ -6440,7 +6440,7 @@ pub const FuncGen = struct { var name_start: usize = undefined; var modifier_start: usize = undefined; - for (asm_source) |byte, i| { + for (asm_source, 0..) |byte, i| { switch (state) { .start => switch (byte) { '%' => state = .percent, @@ -6531,7 +6531,7 @@ pub const FuncGen = struct { .Auto, "", ); - for (llvm_param_attrs[0..param_count]) |llvm_elem_ty, i| { + for (llvm_param_attrs[0..param_count], 0..) |llvm_elem_ty, i| { if (llvm_elem_ty) |llvm_ty| { llvm.setCallElemTypeAttr(call, i, llvm_ty); } @@ -6539,7 +6539,7 @@ pub const FuncGen = struct { var ret_val = call; llvm_ret_i = 0; - for (outputs) |output, i| { + for (outputs, 0..) |output, i| { if (llvm_ret_indirect[i]) continue; const output_value = if (return_count > 1) b: { @@ -7421,7 +7421,7 @@ pub const FuncGen = struct { const index_i32 = llvm_i32.constInt(i, .False); var args: [3]*llvm.Value = undefined; - for (args_vectors) |arg_vector, k| { + for (args_vectors, 0..) |arg_vector, k| { args[k] = self.builder.buildExtractElement(arg_vector, index_i32, ""); } const result_elem = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args_len, .C, .Auto, ""); @@ -8790,7 +8790,7 @@ pub const FuncGen = struct { const tag_int_value = fn_val.getParam(0); const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, fields.count())); - for (fields.keys()) |_, field_index| { + for (fields.keys(), 0..) |_, field_index| { const this_tag_int_value = int: { var tag_val_payload: Value.Payload.U32 = .{ .base = .{ .tag = .enum_field_index }, @@ -8879,7 +8879,7 @@ pub const FuncGen = struct { usize_llvm_ty.constNull(), usize_llvm_ty.constNull(), }; - for (fields.keys()) |name, field_index| { + for (fields.keys(), 0..) |name, field_index| { const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False); const str_init_llvm_ty = str_init.typeOf(); const str_global = self.dg.object.llvm_module.addGlobal(str_init_llvm_ty, ""); @@ -9003,7 +9003,7 @@ pub const FuncGen = struct { const llvm_i32 = self.context.intType(32); - for (values) |*val, i| { + for (values, 0..) |*val, i| { var buf: Value.ElemValueBuffer = undefined; const elem = mask.elemValueBuffer(self.dg.module, i, &buf); if (elem.isUndef()) { @@ -9185,7 +9185,7 @@ pub const FuncGen = struct { const llvm_u32 = self.context.intType(32); var vector = llvm_result_ty.getUndef(); - for (elements) |elem, i| { + for (elements, 0..) |elem, i| { const index_u32 = llvm_u32.constInt(i, .False); const llvm_elem = try self.resolveInst(elem); vector = self.builder.buildInsertElement(vector, llvm_elem, index_u32, ""); @@ -9202,7 +9202,7 @@ pub const FuncGen = struct { comptime assert(Type.packed_struct_layout_version == 2); var running_int: *llvm.Value = int_llvm_ty.constNull(); var running_bits: u16 = 0; - for (elements) |elem, i| { + for (elements, 0..) |elem, i| { const field = fields[i]; if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; @@ -9234,7 +9234,7 @@ pub const FuncGen = struct { const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(target)); var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined }; - for (elements) |elem, i| { + for (elements, 0..) |elem, i| { if (result_ty.structFieldValueComptime(i) != null) continue; const llvm_elem = try self.resolveInst(elem); @@ -9255,7 +9255,7 @@ pub const FuncGen = struct { return alloca_inst; } else { var result = llvm_result_ty.getUndef(); - for (elements) |elem, i| { + for (elements, 0..) |elem, i| { if (result_ty.structFieldValueComptime(i) != null) continue; const llvm_elem = try self.resolveInst(elem); @@ -9280,7 +9280,7 @@ pub const FuncGen = struct { }; const elem_ptr_ty = Type.initPayload(&elem_ptr_payload.base); - for (elements) |elem, i| { + for (elements, 0..) |elem, i| { const indices: [2]*llvm.Value = .{ llvm_usize.constNull(), llvm_usize.constInt(@intCast(c_uint, i), .False), @@ -9919,7 +9919,7 @@ pub const FuncGen = struct { }; const array_elements = [_]*llvm.Value{ request, a1, a2, a3, a4, a5 }; const zero = usize_llvm_ty.constInt(0, .False); - for (array_elements) |elem, i| { + for (array_elements, 0..) |elem, i| { const indexes = [_]*llvm.Value{ zero, usize_llvm_ty.constInt(@intCast(c_uint, i), .False), }; @@ -10341,7 +10341,7 @@ fn llvmFieldIndex( if (ty.isSimpleTupleOrAnonStruct()) { const tuple = ty.tupleFields(); var llvm_field_index: c_uint = 0; - for (tuple.types) |field_ty, i| { + for (tuple.types, 0..) |field_ty, i| { if (tuple.values[i].tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; const field_align = field_ty.abiAlignment(target); @@ -10952,7 +10952,7 @@ fn isByRef(ty: Type) bool { if (ty.isSimpleTupleOrAnonStruct()) { const tuple = ty.tupleFields(); var count: usize = 0; - for (tuple.values) |field_val, i| { + for (tuple.values, 0..) |field_val, i| { if (field_val.tag() != .unreachable_value or !tuple.types[i].hasRuntimeBits()) continue; count += 1; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index c5a3d57d07..5f27c14e95 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -418,7 +418,7 @@ pub const DeclGen = struct { const elem_refs = try self.gpa.alloc(IdRef, vector_len); defer self.gpa.free(elem_refs); - for (elem_refs) |*elem, i| { + for (elem_refs, 0..) |*elem, i| { elem.* = try self.genConstant(elem_ty, elem_vals[i]); } try section.emit(self.spv.gpa, .OpConstantComposite, .{ @@ -498,7 +498,7 @@ pub const DeclGen = struct { return self.fail("VarArgs functions are unsupported for SPIR-V", .{}); const param_types = try self.spv.arena.alloc(SpvType.Ref, ty.fnParamLen()); - for (param_types) |*param, i| { + for (param_types, 0..) |*param, i| { param.* = try self.resolveType(ty.fnParamType(i)); } diff --git a/src/codegen/spirv/Assembler.zig b/src/codegen/spirv/Assembler.zig index fc4ab406b9..6e77818fa5 100644 --- a/src/codegen/spirv/Assembler.zig +++ b/src/codegen/spirv/Assembler.zig @@ -392,7 +392,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue { .OpTypeFunction => blk: { const param_operands = operands[2..]; const param_types = try self.spv.arena.alloc(SpvType.Ref, param_operands.len); - for (param_types) |*param, i| { + for (param_types, 0..) |*param, i| { param.* = try self.resolveTypeRef(param_operands[i].ref_id); } const payload = try self.spv.arena.create(SpvType.Payload.Function); diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig index f37b04bff3..3562e87be4 100644 --- a/src/codegen/spirv/Module.zig +++ b/src/codegen/spirv/Module.zig @@ -161,7 +161,7 @@ pub fn flush(self: Module, file: std.fs.File) !void { var iovc_buffers: [buffers.len]std.os.iovec_const = undefined; var file_size: u64 = 0; - for (iovc_buffers) |*iovc, i| { + for (&iovc_buffers, 0..) |*iovc, i| { // Note, since spir-v supports both little and big endian we can ignore byte order here and // just treat the words as a sequence of bytes. const bytes = std.mem.sliceAsBytes(buffers[i]); @@ -389,7 +389,7 @@ fn decorateStruct(self: *Module, target: IdRef, info: *const Type.Payload.Struct // Decorations for the struct members. const extra = info.member_decoration_extra; var extra_i: u32 = 0; - for (info.members) |member, i| { + for (info.members, 0..) |member, i| { const d = member.decorations; const index = @intCast(Word, i); switch (d.matrix_layout) { diff --git a/src/codegen/spirv/Section.zig b/src/codegen/spirv/Section.zig index 83f594dcef..a76314f5fa 100644 --- a/src/codegen/spirv/Section.zig +++ b/src/codegen/spirv/Section.zig @@ -195,7 +195,7 @@ fn writeContextDependentNumber(section: *Section, operand: spec.LiteralContextDe fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand) void { var mask: Word = 0; - inline for (@typeInfo(Operand).Struct.fields) |field, bit| { + inline for (@typeInfo(Operand).Struct.fields, 0..) |field, bit| { switch (@typeInfo(field.type)) { .Optional => if (@field(operand, field.name) != null) { mask |= 1 << @intCast(u5, bit); diff --git a/src/codegen/spirv/type.zig b/src/codegen/spirv/type.zig index 6cc1b8f3bd..dc993b62ff 100644 --- a/src/codegen/spirv/type.zig +++ b/src/codegen/spirv/type.zig @@ -98,7 +98,7 @@ pub const Type = extern union { const struct_b = b.payload(.@"struct"); if (struct_a.members.len != struct_b.members.len) return false; - for (struct_a.members) |mem_a, i| { + for (struct_a.members, 0..) |mem_a, i| { if (!std.meta.eql(mem_a, struct_b.members[i])) return false; } |
