aboutsummaryrefslogtreecommitdiff
path: root/src/link/Coff/Relocation.zig
diff options
context:
space:
mode:
authormlugg <mlugg@mlugg.co.uk>2023-06-22 18:46:56 +0100
committerAndrew Kelley <andrew@ziglang.org>2023-06-24 16:56:39 -0700
commitf26dda21171e26f44aeec8c59a75bbb3331eeb2e (patch)
treec935248861ae2693b314f2c8bc78fe38d9961b6d /src/link/Coff/Relocation.zig
parent447ca4e3fff021f471b748187b53f0a4744ad0bc (diff)
downloadzig-f26dda21171e26f44aeec8c59a75bbb3331eeb2e.tar.gz
zig-f26dda21171e26f44aeec8c59a75bbb3331eeb2e.zip
all: migrate code to new cast builtin syntax
Most of this migration was performed automatically with `zig fmt`. There were a few exceptions which I had to manually fix: * `@alignCast` and `@addrSpaceCast` cannot be automatically rewritten * `@truncate`'s fixup is incorrect for vectors * Test cases are not formatted, and their error locations change
Diffstat (limited to 'src/link/Coff/Relocation.zig')
-rw-r--r--src/link/Coff/Relocation.zig24
1 files changed, 12 insertions, 12 deletions
diff --git a/src/link/Coff/Relocation.zig b/src/link/Coff/Relocation.zig
index 10d4eed92b..ded7483667 100644
--- a/src/link/Coff/Relocation.zig
+++ b/src/link/Coff/Relocation.zig
@@ -126,23 +126,23 @@ fn resolveAarch64(self: Relocation, ctx: Context) void {
var buffer = ctx.code[self.offset..];
switch (self.type) {
.got_page, .import_page, .page => {
- const source_page = @intCast(i32, ctx.source_vaddr >> 12);
- const target_page = @intCast(i32, ctx.target_vaddr >> 12);
- const pages = @bitCast(u21, @intCast(i21, target_page - source_page));
+ const source_page = @as(i32, @intCast(ctx.source_vaddr >> 12));
+ const target_page = @as(i32, @intCast(ctx.target_vaddr >> 12));
+ const pages = @as(u21, @bitCast(@as(i21, @intCast(target_page - source_page))));
var inst = aarch64.Instruction{
.pc_relative_address = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.pc_relative_address,
), buffer[0..4]),
};
- inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
- inst.pc_relative_address.immlo = @truncate(u2, pages);
+ inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
+ inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
mem.writeIntLittle(u32, buffer[0..4], inst.toU32());
},
.got_pageoff, .import_pageoff, .pageoff => {
assert(!self.pcrel);
- const narrowed = @truncate(u12, @intCast(u64, ctx.target_vaddr));
+ const narrowed = @as(u12, @truncate(@as(u64, @intCast(ctx.target_vaddr))));
if (isArithmeticOp(buffer[0..4])) {
var inst = aarch64.Instruction{
.add_subtract_immediate = mem.bytesToValue(meta.TagPayload(
@@ -182,7 +182,7 @@ fn resolveAarch64(self: Relocation, ctx: Context) void {
2 => mem.writeIntLittle(
u32,
buffer[0..4],
- @truncate(u32, ctx.target_vaddr + ctx.image_base),
+ @as(u32, @truncate(ctx.target_vaddr + ctx.image_base)),
),
3 => mem.writeIntLittle(u64, buffer[0..8], ctx.target_vaddr + ctx.image_base),
else => unreachable,
@@ -206,17 +206,17 @@ fn resolveX86(self: Relocation, ctx: Context) void {
.got, .import => {
assert(self.pcrel);
- const disp = @intCast(i32, ctx.target_vaddr) - @intCast(i32, ctx.source_vaddr) - 4;
+ const disp = @as(i32, @intCast(ctx.target_vaddr)) - @as(i32, @intCast(ctx.source_vaddr)) - 4;
mem.writeIntLittle(i32, buffer[0..4], disp);
},
.direct => {
if (self.pcrel) {
- const disp = @intCast(i32, ctx.target_vaddr) - @intCast(i32, ctx.source_vaddr) - 4;
+ const disp = @as(i32, @intCast(ctx.target_vaddr)) - @as(i32, @intCast(ctx.source_vaddr)) - 4;
mem.writeIntLittle(i32, buffer[0..4], disp);
} else switch (ctx.ptr_width) {
- .p32 => mem.writeIntLittle(u32, buffer[0..4], @intCast(u32, ctx.target_vaddr + ctx.image_base)),
+ .p32 => mem.writeIntLittle(u32, buffer[0..4], @as(u32, @intCast(ctx.target_vaddr + ctx.image_base))),
.p64 => switch (self.length) {
- 2 => mem.writeIntLittle(u32, buffer[0..4], @truncate(u32, ctx.target_vaddr + ctx.image_base)),
+ 2 => mem.writeIntLittle(u32, buffer[0..4], @as(u32, @truncate(ctx.target_vaddr + ctx.image_base))),
3 => mem.writeIntLittle(u64, buffer[0..8], ctx.target_vaddr + ctx.image_base),
else => unreachable,
},
@@ -226,6 +226,6 @@ fn resolveX86(self: Relocation, ctx: Context) void {
}
inline fn isArithmeticOp(inst: *const [4]u8) bool {
- const group_decode = @truncate(u5, inst[3]);
+ const group_decode = @as(u5, @truncate(inst[3]));
return ((group_decode >> 2) == 4);
}