aboutsummaryrefslogtreecommitdiff
path: root/src/link/Coff
diff options
context:
space:
mode:
authormlugg <mlugg@mlugg.co.uk>2023-06-22 18:46:56 +0100
committerAndrew Kelley <andrew@ziglang.org>2023-06-24 16:56:39 -0700
commitf26dda21171e26f44aeec8c59a75bbb3331eeb2e (patch)
treec935248861ae2693b314f2c8bc78fe38d9961b6d /src/link/Coff
parent447ca4e3fff021f471b748187b53f0a4744ad0bc (diff)
downloadzig-f26dda21171e26f44aeec8c59a75bbb3331eeb2e.tar.gz
zig-f26dda21171e26f44aeec8c59a75bbb3331eeb2e.zip
all: migrate code to new cast builtin syntax
Most of this migration was performed automatically with `zig fmt`. There were a few exceptions which I had to manually fix: * `@alignCast` and `@addrSpaceCast` cannot be automatically rewritten * `@truncate`'s fixup is incorrect for vectors * Test cases are not formatted, and their error locations change
Diffstat (limited to 'src/link/Coff')
-rw-r--r--src/link/Coff/ImportTable.zig6
-rw-r--r--src/link/Coff/Relocation.zig24
2 files changed, 15 insertions, 15 deletions
diff --git a/src/link/Coff/ImportTable.zig b/src/link/Coff/ImportTable.zig
index c3ba77e855..c25851fe72 100644
--- a/src/link/Coff/ImportTable.zig
+++ b/src/link/Coff/ImportTable.zig
@@ -38,7 +38,7 @@ pub fn deinit(itab: *ImportTable, allocator: Allocator) void {
/// Size of the import table does not include the sentinel.
pub fn size(itab: ImportTable) u32 {
- return @intCast(u32, itab.entries.items.len) * @sizeOf(u64);
+ return @as(u32, @intCast(itab.entries.items.len)) * @sizeOf(u64);
}
pub fn addImport(itab: *ImportTable, allocator: Allocator, target: SymbolWithLoc) !ImportIndex {
@@ -49,7 +49,7 @@ pub fn addImport(itab: *ImportTable, allocator: Allocator, target: SymbolWithLoc
break :blk index;
} else {
log.debug(" (allocating import entry at index {d})", .{itab.entries.items.len});
- const index = @intCast(u32, itab.entries.items.len);
+ const index = @as(u32, @intCast(itab.entries.items.len));
_ = itab.entries.addOneAssumeCapacity();
break :blk index;
}
@@ -73,7 +73,7 @@ fn getBaseAddress(ctx: Context) u32 {
var addr = header.virtual_address;
for (ctx.coff_file.import_tables.values(), 0..) |other_itab, i| {
if (ctx.index == i) break;
- addr += @intCast(u32, other_itab.entries.items.len * @sizeOf(u64)) + 8;
+ addr += @as(u32, @intCast(other_itab.entries.items.len * @sizeOf(u64))) + 8;
}
return addr;
}
diff --git a/src/link/Coff/Relocation.zig b/src/link/Coff/Relocation.zig
index 10d4eed92b..ded7483667 100644
--- a/src/link/Coff/Relocation.zig
+++ b/src/link/Coff/Relocation.zig
@@ -126,23 +126,23 @@ fn resolveAarch64(self: Relocation, ctx: Context) void {
var buffer = ctx.code[self.offset..];
switch (self.type) {
.got_page, .import_page, .page => {
- const source_page = @intCast(i32, ctx.source_vaddr >> 12);
- const target_page = @intCast(i32, ctx.target_vaddr >> 12);
- const pages = @bitCast(u21, @intCast(i21, target_page - source_page));
+ const source_page = @as(i32, @intCast(ctx.source_vaddr >> 12));
+ const target_page = @as(i32, @intCast(ctx.target_vaddr >> 12));
+ const pages = @as(u21, @bitCast(@as(i21, @intCast(target_page - source_page))));
var inst = aarch64.Instruction{
.pc_relative_address = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.pc_relative_address,
), buffer[0..4]),
};
- inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
- inst.pc_relative_address.immlo = @truncate(u2, pages);
+ inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
+ inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
mem.writeIntLittle(u32, buffer[0..4], inst.toU32());
},
.got_pageoff, .import_pageoff, .pageoff => {
assert(!self.pcrel);
- const narrowed = @truncate(u12, @intCast(u64, ctx.target_vaddr));
+ const narrowed = @as(u12, @truncate(@as(u64, @intCast(ctx.target_vaddr))));
if (isArithmeticOp(buffer[0..4])) {
var inst = aarch64.Instruction{
.add_subtract_immediate = mem.bytesToValue(meta.TagPayload(
@@ -182,7 +182,7 @@ fn resolveAarch64(self: Relocation, ctx: Context) void {
2 => mem.writeIntLittle(
u32,
buffer[0..4],
- @truncate(u32, ctx.target_vaddr + ctx.image_base),
+ @as(u32, @truncate(ctx.target_vaddr + ctx.image_base)),
),
3 => mem.writeIntLittle(u64, buffer[0..8], ctx.target_vaddr + ctx.image_base),
else => unreachable,
@@ -206,17 +206,17 @@ fn resolveX86(self: Relocation, ctx: Context) void {
.got, .import => {
assert(self.pcrel);
- const disp = @intCast(i32, ctx.target_vaddr) - @intCast(i32, ctx.source_vaddr) - 4;
+ const disp = @as(i32, @intCast(ctx.target_vaddr)) - @as(i32, @intCast(ctx.source_vaddr)) - 4;
mem.writeIntLittle(i32, buffer[0..4], disp);
},
.direct => {
if (self.pcrel) {
- const disp = @intCast(i32, ctx.target_vaddr) - @intCast(i32, ctx.source_vaddr) - 4;
+ const disp = @as(i32, @intCast(ctx.target_vaddr)) - @as(i32, @intCast(ctx.source_vaddr)) - 4;
mem.writeIntLittle(i32, buffer[0..4], disp);
} else switch (ctx.ptr_width) {
- .p32 => mem.writeIntLittle(u32, buffer[0..4], @intCast(u32, ctx.target_vaddr + ctx.image_base)),
+ .p32 => mem.writeIntLittle(u32, buffer[0..4], @as(u32, @intCast(ctx.target_vaddr + ctx.image_base))),
.p64 => switch (self.length) {
- 2 => mem.writeIntLittle(u32, buffer[0..4], @truncate(u32, ctx.target_vaddr + ctx.image_base)),
+ 2 => mem.writeIntLittle(u32, buffer[0..4], @as(u32, @truncate(ctx.target_vaddr + ctx.image_base))),
3 => mem.writeIntLittle(u64, buffer[0..8], ctx.target_vaddr + ctx.image_base),
else => unreachable,
},
@@ -226,6 +226,6 @@ fn resolveX86(self: Relocation, ctx: Context) void {
}
inline fn isArithmeticOp(inst: *const [4]u8) bool {
- const group_decode = @truncate(u5, inst[3]);
+ const group_decode = @as(u5, @truncate(inst[3]));
return ((group_decode >> 2) == 4);
}