diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2023-06-24 16:58:19 -0700 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2023-06-24 16:58:19 -0700 |
| commit | 146b79af153bbd5dafda0ba12a040385c7fc58f8 (patch) | |
| tree | 67e3db8b444d65c667e314770fc983a7fc8ba293 /src/arch/sparc64/CodeGen.zig | |
| parent | 13853bef0df3c90633021850cc6d6abaeea03282 (diff) | |
| parent | 21ac0beb436f49fe49c6982a872f2dc48e4bea5e (diff) | |
| download | zig-146b79af153bbd5dafda0ba12a040385c7fc58f8.tar.gz zig-146b79af153bbd5dafda0ba12a040385c7fc58f8.zip | |
Merge pull request #16163 from mlugg/feat/builtins-infer-dest-ty
Infer destination type of cast builtins using result type
Diffstat (limited to 'src/arch/sparc64/CodeGen.zig')
| -rw-r--r-- | src/arch/sparc64/CodeGen.zig | 86 |
1 files changed, 43 insertions, 43 deletions
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index f210f8e144..9975cda5cb 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -415,7 +415,7 @@ fn gen(self: *Self) !void { .branch_predict_int = .{ .ccr = .xcc, .cond = .al, - .inst = @intCast(u32, self.mir_instructions.len), + .inst = @as(u32, @intCast(self.mir_instructions.len)), }, }, }); @@ -840,7 +840,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); + const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); const result: MCValue = res: { if (self.liveness.isUnused(inst)) break :res MCValue.dead; return self.fail("TODO implement airAggregateInit for {}", .{self.target.cpu.arch}); @@ -876,7 +876,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(mod); - const array_len = @intCast(u32, array_ty.arrayLen(mod)); + const array_len = @as(u32, @intCast(array_ty.arrayLen(mod))); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -893,11 +893,11 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = (extra.data.flags & 0x80000000) != 0; - const clobbers_len = @truncate(u31, extra.data.flags); + const clobbers_len = @as(u31, @truncate(extra.data.flags)); var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i .. extra_i + extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i .. extra_i + extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i .. extra_i + extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i .. extra_i + extra.data.inputs_len])); extra_i += inputs.len; const dead = !is_volatile and self.liveness.isUnused(inst); @@ -1237,13 +1237,13 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { switch (operand) { .immediate => |imm| { const swapped = switch (int_info.bits) { - 16 => @byteSwap(@intCast(u16, imm)), - 24 => @byteSwap(@intCast(u24, imm)), - 32 => @byteSwap(@intCast(u32, imm)), - 40 => @byteSwap(@intCast(u40, imm)), - 48 => @byteSwap(@intCast(u48, imm)), - 56 => @byteSwap(@intCast(u56, imm)), - 64 => @byteSwap(@intCast(u64, imm)), + 16 => @byteSwap(@as(u16, @intCast(imm))), + 24 => @byteSwap(@as(u24, @intCast(imm))), + 32 => @byteSwap(@as(u32, @intCast(imm))), + 40 => @byteSwap(@as(u40, @intCast(imm))), + 48 => @byteSwap(@as(u48, @intCast(imm))), + 56 => @byteSwap(@as(u56, @intCast(imm))), + 64 => @byteSwap(@as(u64, @intCast(imm))), else => return self.fail("TODO synthesize SPARCv9 byteswap for other integer sizes", .{}), }; break :result .{ .immediate = swapped }; @@ -1295,7 +1295,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const pl_op = self.air.instructions.items(.data)[inst].pl_op; const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end .. extra.end + extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end .. extra.end + extra.data.args_len])); const ty = self.typeOf(callee); const mod = self.bin_file.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { @@ -1348,7 +1348,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); - break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file)); + break :blk @as(u32, @intCast(atom.getOffsetTableAddress(elf_file))); } else unreachable; try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr }); @@ -1515,7 +1515,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.operandDies(inst, 0)) { const op_int = @intFromEnum(pl_op.operand); if (op_int >= Air.ref_start_index) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); + const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index)); self.processDeath(op_index); } } @@ -1851,7 +1851,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end .. loop.end + loop.data.body_len]; - const start = @intCast(u32, self.mir_instructions.len); + const start = @as(u32, @intCast(self.mir_instructions.len)); try self.genBody(body); try self.jump(start); @@ -2574,7 +2574,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); switch (mcv) { .dead, .unreach => unreachable, @@ -2772,7 +2772,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { const gpa = self.gpa; try self.mir_instructions.ensureUnusedCapacity(gpa, 1); - const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len); + const result_index = @as(Air.Inst.Index, @intCast(self.mir_instructions.len)); self.mir_instructions.appendAssumeCapacity(inst); return result_index; } @@ -3207,7 +3207,7 @@ fn binOpImmediate( .is_imm = true, .rd = dest_reg, .rs1 = lhs_reg, - .rs2_or_imm = .{ .imm = @intCast(u12, rhs.immediate) }, + .rs2_or_imm = .{ .imm = @as(u12, @intCast(rhs.immediate)) }, }, }, .sll, @@ -3218,7 +3218,7 @@ fn binOpImmediate( .is_imm = true, .rd = dest_reg, .rs1 = lhs_reg, - .rs2_or_imm = .{ .imm = @intCast(u5, rhs.immediate) }, + .rs2_or_imm = .{ .imm = @as(u5, @intCast(rhs.immediate)) }, }, }, .sllx, @@ -3229,14 +3229,14 @@ fn binOpImmediate( .is_imm = true, .rd = dest_reg, .rs1 = lhs_reg, - .rs2_or_imm = .{ .imm = @intCast(u6, rhs.immediate) }, + .rs2_or_imm = .{ .imm = @as(u6, @intCast(rhs.immediate)) }, }, }, .cmp => .{ .arithmetic_2op = .{ .is_imm = true, .rs1 = lhs_reg, - .rs2_or_imm = .{ .imm = @intCast(u12, rhs.immediate) }, + .rs2_or_imm = .{ .imm = @as(u12, @intCast(rhs.immediate)) }, }, }, else => unreachable, @@ -3535,7 +3535,7 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); + const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))); switch (error_union_mcv) { .register => return self.fail("TODO errUnionPayload for registers", .{}), .stack_offset => |off| { @@ -3565,15 +3565,15 @@ fn finishAirBookkeeping(self: *Self) void { fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { var tomb_bits = self.liveness.getTombBits(inst); for (operands) |op| { - const dies = @truncate(u1, tomb_bits) != 0; + const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; const op_int = @intFromEnum(op); if (op_int < Air.ref_start_index) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); + const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index)); self.processDeath(op_index); } - const is_used = @truncate(u1, tomb_bits) == 0; + const is_used = @as(u1, @truncate(tomb_bits)) == 0; if (is_used) { log.debug("%{d} => {}", .{ inst, result }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -3663,7 +3663,7 @@ fn genInlineMemcpy( .data = .{ .branch_predict_reg = .{ .cond = .ne_zero, .rs1 = len, - .inst = @intCast(u32, self.mir_instructions.len - 2), + .inst = @as(u32, @intCast(self.mir_instructions.len - 2)), } }, }); @@ -3838,7 +3838,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .arithmetic_2op = .{ .is_imm = true, .rs1 = reg, - .rs2_or_imm = .{ .imm = @truncate(u12, x) }, + .rs2_or_imm = .{ .imm = @as(u12, @truncate(x)) }, }, }, }); @@ -3848,7 +3848,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .data = .{ .sethi = .{ .rd = reg, - .imm = @truncate(u22, x >> 10), + .imm = @as(u22, @truncate(x >> 10)), }, }, }); @@ -3860,12 +3860,12 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .is_imm = true, .rd = reg, .rs1 = reg, - .rs2_or_imm = .{ .imm = @truncate(u10, x) }, + .rs2_or_imm = .{ .imm = @as(u10, @truncate(x)) }, }, }, }); } else if (x <= math.maxInt(u44)) { - try self.genSetReg(ty, reg, .{ .immediate = @truncate(u32, x >> 12) }); + try self.genSetReg(ty, reg, .{ .immediate = @as(u32, @truncate(x >> 12)) }); _ = try self.addInst(.{ .tag = .sllx, @@ -3886,7 +3886,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .is_imm = true, .rd = reg, .rs1 = reg, - .rs2_or_imm = .{ .imm = @truncate(u12, x) }, + .rs2_or_imm = .{ .imm = @as(u12, @truncate(x)) }, }, }, }); @@ -3894,8 +3894,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void // Need to allocate a temporary register to load 64-bit immediates. const tmp_reg = try self.register_manager.allocReg(null, gp); - try self.genSetReg(ty, tmp_reg, .{ .immediate = @truncate(u32, x) }); - try self.genSetReg(ty, reg, .{ .immediate = @truncate(u32, x >> 32) }); + try self.genSetReg(ty, tmp_reg, .{ .immediate = @as(u32, @truncate(x)) }); + try self.genSetReg(ty, reg, .{ .immediate = @as(u32, @truncate(x >> 32)) }); _ = try self.addInst(.{ .tag = .sllx, @@ -3994,7 +3994,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); const overflow_bit_ty = ty.structFieldType(1, mod); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod))); const cond_reg = try self.register_manager.allocReg(null, gp); // TODO handle floating point CCRs @@ -4412,8 +4412,8 @@ fn parseRegName(name: []const u8) ?Register { fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { const tag = self.mir_instructions.items(.tag)[inst]; switch (tag) { - .bpcc => self.mir_instructions.items(.data)[inst].branch_predict_int.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), - .bpr => self.mir_instructions.items(.data)[inst].branch_predict_reg.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), + .bpcc => self.mir_instructions.items(.data)[inst].branch_predict_int.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)), + .bpr => self.mir_instructions.items(.data)[inst].branch_predict_reg.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)), else => unreachable, } } @@ -4490,7 +4490,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) }; for (fn_info.param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -4522,7 +4522,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) } else if (!ret_ty.hasRuntimeBits(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod))); // The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller. if (ret_ty_size <= 8) { result.return_value = switch (role) { @@ -4721,7 +4721,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(mod); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -4816,7 +4816,7 @@ fn truncRegister( .is_imm = true, .rd = dest_reg, .rs1 = operand_reg, - .rs2_or_imm = .{ .imm = @intCast(u6, 64 - int_bits) }, + .rs2_or_imm = .{ .imm = @as(u6, @intCast(64 - int_bits)) }, }, }, }); @@ -4830,7 +4830,7 @@ fn truncRegister( .is_imm = true, .rd = dest_reg, .rs1 = dest_reg, - .rs2_or_imm = .{ .imm = @intCast(u6, int_bits) }, + .rs2_or_imm = .{ .imm = @as(u6, @intCast(int_bits)) }, }, }, }); |
