diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2023-02-18 09:02:57 -0700 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2023-02-18 19:17:21 -0700 |
| commit | aeaef8c0ffadab4145fd002f2edd87a6db66ebd1 (patch) | |
| tree | e4c76c76173e5e72bc1947e1886662c4c6b2ba3c /src/link/MachO/UnwindInfo.zig | |
| parent | f0530385b57218ef323747bdb7438330a07d25cc (diff) | |
| download | zig-aeaef8c0ffadab4145fd002f2edd87a6db66ebd1.tar.gz zig-aeaef8c0ffadab4145fd002f2edd87a6db66ebd1.zip | |
update std lib and compiler sources to new for loop syntax
Diffstat (limited to 'src/link/MachO/UnwindInfo.zig')
| -rw-r--r-- | src/link/MachO/UnwindInfo.zig | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/src/link/MachO/UnwindInfo.zig b/src/link/MachO/UnwindInfo.zig index 5e61834bbc..c64e617a35 100644 --- a/src/link/MachO/UnwindInfo.zig +++ b/src/link/MachO/UnwindInfo.zig @@ -126,7 +126,7 @@ const Page = struct { ctx.page.start + ctx.page.count, }); try writer.print(" encodings (count = {d})\n", .{ctx.page.page_encodings_count}); - for (ctx.page.page_encodings[0..ctx.page.page_encodings_count]) |record_id, i| { + for (ctx.page.page_encodings[0..ctx.page.page_encodings_count], 0..) |record_id, i| { const record = ctx.info.records.items[record_id]; const enc = record.compactUnwindEncoding; try writer.print(" {d}: 0x{x:0>8}\n", .{ ctx.info.common_encodings_count + i, enc }); @@ -205,7 +205,7 @@ pub fn scanRelocs(zld: *Zld) !void { if (zld.getSectionByName("__TEXT", "__unwind_info") == null) return; const cpu_arch = zld.options.target.cpu.arch; - for (zld.objects.items) |*object, object_id| { + for (zld.objects.items, 0..) |*object, object_id| { const unwind_records = object.getUnwindRecords(); for (object.exec_atoms.items) |atom_index| { const record_id = object.unwind_records_lookup.get(atom_index) orelse continue; @@ -244,7 +244,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { defer atom_indexes.deinit(); // TODO handle dead stripping - for (zld.objects.items) |*object, object_id| { + for (zld.objects.items, 0..) |*object, object_id| { log.debug("collecting unwind records in {s} ({d})", .{ object.name, object_id }); const unwind_records = object.getUnwindRecords(); @@ -335,7 +335,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { try info.records_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, atom_indexes.items.len)); var maybe_prev: ?macho.compact_unwind_entry = null; - for (records.items) |record, i| { + for (records.items, 0..) |record, i| { const record_id = blk: { if (maybe_prev) |prev| { const is_dwarf = UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch); @@ -483,7 +483,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { // Save indices of records requiring LSDA relocation try info.lsdas_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, info.records.items.len)); - for (info.records.items) |rec, i| { + for (info.records.items, 0..) |rec, i| { info.lsdas_lookup.putAssumeCapacityNoClobber(@intCast(RecordIndex, i), @intCast(u32, info.lsdas.items.len)); if (rec.lsda == 0) continue; try info.lsdas.append(info.gpa, @intCast(RecordIndex, i)); @@ -556,7 +556,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void { const cpu_arch = zld.options.target.cpu.arch; log.debug("Personalities:", .{}); - for (info.personalities[0..info.personalities_count]) |target, i| { + for (info.personalities[0..info.personalities_count], 0..) |target, i| { const atom_index = zld.getGotAtomIndexForSymbol(target).?; const atom = zld.getAtom(atom_index); const sym = zld.getSymbol(atom.getSymbolWithLoc()); @@ -581,7 +581,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void { } } - for (info.records.items) |record, i| { + for (info.records.items, 0..) |record, i| { log.debug("Unwind record at offset 0x{x}", .{i * @sizeOf(macho.compact_unwind_entry)}); log.debug(" start: 0x{x}", .{record.rangeStart}); log.debug(" length: 0x{x}", .{record.rangeLength}); @@ -621,7 +621,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void { const pages_base_offset = @intCast(u32, size - (info.pages.items.len * second_level_page_bytes)); const lsda_base_offset = @intCast(u32, pages_base_offset - (info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry))); - for (info.pages.items) |page, i| { + for (info.pages.items, 0..) |page, i| { assert(page.count > 0); const first_entry = info.records.items[page.start]; try writer.writeStruct(macho.unwind_info_section_header_index_entry{ |
