diff options
| author | Jacob Young <jacobly0@users.noreply.github.com> | 2022-12-26 23:08:10 -0500 |
|---|---|---|
| committer | Jacob Young <jacobly0@users.noreply.github.com> | 2022-12-27 00:12:56 -0500 |
| commit | 46b49a0a766a39ca37ba48f1a1c2ed28c260b08b (patch) | |
| tree | 9511c81a36419d8307767f23cc9495caa378fcbc /src/codegen/llvm.zig | |
| parent | 1b86a628acef7bc180ea6cbe6e4930710e5dff97 (diff) | |
| download | zig-46b49a0a766a39ca37ba48f1a1c2ed28c260b08b.tar.gz zig-46b49a0a766a39ca37ba48f1a1c2ed28c260b08b.zip | |
llvm: cleanup code no longer needed with opaque pointers
When using llvm opaque pointers, typed pointers and pointer bitcasts are
no longer needed. This also avoids needing packed struct layouts that
are nested inside pointers, letting us avoid computing struct layouts
in Sema that could cause unnecessary dependency loops.
Diffstat (limited to 'src/codegen/llvm.zig')
| -rw-r--r-- | src/codegen/llvm.zig | 492 |
1 files changed, 165 insertions, 327 deletions
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 4c553f0305..49cf0ff9f0 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -582,7 +582,7 @@ pub const Object = struct { const mod = self.module; const target = mod.getTarget(); - const llvm_ptr_ty = self.context.intType(8).pointerType(0); // TODO: Address space + const llvm_ptr_ty = self.context.pointerType(0); // TODO: Address space const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth()); const type_fields = [_]*llvm.Type{ llvm_ptr_ty, @@ -608,7 +608,7 @@ pub const Object = struct { str_global.setAlignment(1); const slice_fields = [_]*llvm.Value{ - str_global.constBitCast(llvm_ptr_ty), + str_global, llvm_usize_ty.constInt(name.len, .False), }; llvm_error.* = llvm_slice_ty.constNamedStruct(&slice_fields, slice_fields.len); @@ -623,7 +623,7 @@ pub const Object = struct { error_name_table_global.setUnnamedAddr(.True); error_name_table_global.setAlignment(slice_alignment); // TODO: Dont hardcode - const error_name_table_ptr = error_name_table_global.constBitCast(llvm_slice_ty.pointerType(0)); // TODO: Address space + const error_name_table_ptr = error_name_table_global; error_name_table_ptr_global.setInitializer(error_name_table_ptr); } @@ -681,10 +681,9 @@ pub const Object = struct { const other_global = object.getLlvmGlobal(decl.name) orelse continue; if (other_global == llvm_global) continue; - const new_global_ptr = other_global.constBitCast(llvm_global.typeOf()); - llvm_global.replaceAllUsesWith(new_global_ptr); + llvm_global.replaceAllUsesWith(other_global); deleteLlvmGlobal(llvm_global); - entry.value_ptr.* = new_global_ptr; + entry.value_ptr.* = other_global; } object.extern_collisions.clearRetainingCapacity(); @@ -703,11 +702,7 @@ pub const Object = struct { const other_global = object.getLlvmGlobal(exp_name_z.ptr) orelse continue; if (other_global == llvm_global) continue; - // replaceAllUsesWith requires the type to be unchanged. So we bitcast - // the new global to the old type and use that as the thing to replace - // old uses. - const new_global_ptr = llvm_global.constBitCast(other_global.typeOf()); - other_global.replaceAllUsesWith(new_global_ptr); + other_global.replaceAllUsesWith(llvm_global); llvm_global.takeName(other_global); deleteLlvmGlobal(other_global); // Problem: now we need to replace in the decl_map that @@ -962,7 +957,7 @@ pub const Object = struct { if (isByRef(param_ty)) { const alignment = param_ty.abiAlignment(target); const param_llvm_ty = param.typeOf(); - const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target); + const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); const store_inst = builder.buildStore(param, arg_ptr); store_inst.setAlignment(alignment); args.appendAssumeCapacity(arg_ptr); @@ -1020,14 +1015,12 @@ pub const Object = struct { const param_llvm_ty = try dg.lowerType(param_ty); const abi_size = @intCast(c_uint, param_ty.abiSize(target)); const int_llvm_ty = dg.context.intType(abi_size * 8); - const int_ptr_llvm_ty = int_llvm_ty.pointerType(0); const alignment = @max( param_ty.abiAlignment(target), dg.object.target_data.abiAlignmentOfType(int_llvm_ty), ); - const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target); - const casted_ptr = builder.buildBitCast(arg_ptr, int_ptr_llvm_ty, ""); - const store_inst = builder.buildStore(param, casted_ptr); + const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); + const store_inst = builder.buildStore(param, arg_ptr); store_inst.setAlignment(alignment); try args.ensureUnusedCapacity(1); @@ -1078,14 +1071,13 @@ pub const Object = struct { const param_ty = fn_info.param_types[it.zig_index - 1]; const param_llvm_ty = try dg.lowerType(param_ty); const param_alignment = param_ty.abiAlignment(target); - const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty, param_alignment, target); + const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target); const llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False); - const casted_ptr = builder.buildBitCast(arg_ptr, llvm_ty.pointerType(0), ""); for (field_types) |_, field_i_usize| { const field_i = @intCast(c_uint, field_i_usize); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; - const field_ptr = builder.buildStructGEP(llvm_ty, casted_ptr, field_i, ""); + const field_ptr = builder.buildStructGEP(llvm_ty, arg_ptr, field_i, ""); const store_inst = builder.buildStore(param, field_ptr); store_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8); } @@ -1113,9 +1105,8 @@ pub const Object = struct { llvm_arg_i += 1; const alignment = param_ty.abiAlignment(target); - const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target); - const casted_ptr = builder.buildBitCast(arg_ptr, param.typeOf().pointerType(0), ""); - _ = builder.buildStore(param, casted_ptr); + const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); + _ = builder.buildStore(param, arg_ptr); if (isByRef(param_ty)) { try args.append(arg_ptr); @@ -1132,9 +1123,8 @@ pub const Object = struct { llvm_arg_i += 1; const alignment = param_ty.abiAlignment(target); - const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target); - const casted_ptr = builder.buildBitCast(arg_ptr, param.typeOf().pointerType(0), ""); - _ = builder.buildStore(param, casted_ptr); + const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); + _ = builder.buildStore(param, arg_ptr); if (isByRef(param_ty)) { try args.append(arg_ptr); @@ -2477,12 +2467,8 @@ pub const DeclGen = struct { new_global.setAlignment(global.getAlignment()); if (decl.@"linksection") |section| new_global.setSection(section); new_global.setInitializer(llvm_init); - // replaceAllUsesWith requires the type to be unchanged. So we convert - // the new global to the old type and use that as the thing to replace - // old uses. // TODO: How should this work then the address space of a global changed? - const new_global_ptr = new_global.constBitCast(global.typeOf()); - global.replaceAllUsesWith(new_global_ptr); + global.replaceAllUsesWith(new_global); dg.object.decl_map.putAssumeCapacity(decl_index, new_global); new_global.takeName(global); global.deleteGlobal(); @@ -2781,11 +2767,7 @@ pub const DeclGen = struct { } const ptr_info = t.ptrInfo().data; const llvm_addrspace = toLlvmAddressSpace(ptr_info.@"addrspace", target); - if (ptr_info.host_size != 0) { - return dg.context.intType(ptr_info.host_size * 8).pointerType(llvm_addrspace); - } - const llvm_elem_ty = try dg.lowerPtrElemTy(ptr_info.pointee_type); - return llvm_elem_ty.pointerType(llvm_addrspace); + return dg.context.pointerType(llvm_addrspace); }, .Opaque => switch (t.tag()) { .@"opaque" => { @@ -3108,8 +3090,7 @@ pub const DeclGen = struct { defer llvm_params.deinit(); if (firstParamSRet(fn_info, target)) { - const llvm_sret_ty = try dg.lowerType(fn_info.return_type); - try llvm_params.append(llvm_sret_ty.pointerType(0)); + try llvm_params.append(dg.context.pointerType(0)); } if (fn_info.return_type.isError() and @@ -3131,9 +3112,7 @@ pub const DeclGen = struct { try llvm_params.append(try dg.lowerType(param_ty)); }, .byref, .byref_mut => { - const param_ty = fn_info.param_types[it.zig_index - 1]; - const raw_llvm_ty = try dg.lowerType(param_ty); - try llvm_params.append(raw_llvm_ty.pointerType(0)); + try llvm_params.append(dg.context.pointerType(0)); }, .abi_sized_int => { const param_ty = fn_info.param_types[it.zig_index - 1]; @@ -3323,16 +3302,12 @@ pub const DeclGen = struct { const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); - const llvm_var_type = try dg.lowerType(tv.ty); - const llvm_actual_ptr_type = llvm_var_type.pointerType(llvm_actual_addrspace); - const val = try dg.resolveGlobalDecl(decl_index); - const val_ptr = val.constBitCast(llvm_actual_ptr_type); - if (llvm_actual_addrspace != llvm_wanted_addrspace) { - const llvm_wanted_ptr_type = llvm_var_type.pointerType(llvm_wanted_addrspace); - return val_ptr.constAddrSpaceCast(llvm_wanted_ptr_type); - } - return val_ptr; + const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) + val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace)) + else + val; + return addrspace_casted_ptr; }, .slice => { const slice = tv.val.castTag(.slice).?.data; @@ -3355,7 +3330,7 @@ pub const DeclGen = struct { return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); }, .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { - return dg.lowerParentPtr(tv.val, tv.ty.childType()); + return dg.lowerParentPtr(tv.val); }, .null_value, .zero => { const llvm_type = try dg.lowerType(tv.ty); @@ -3363,7 +3338,7 @@ pub const DeclGen = struct { }, .opt_payload => { const payload = tv.val.castTag(.opt_payload).?.data; - return dg.lowerParentPtr(payload, tv.ty); + return dg.lowerParentPtr(payload); }, else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ tv.ty.fmtDebug(), tag, @@ -3940,7 +3915,6 @@ pub const DeclGen = struct { dg: *DeclGen, ptr_val: Value, decl_index: Module.Decl.Index, - ptr_child_ty: Type, ) Error!*llvm.Value { const decl = dg.module.declPtr(decl_index); dg.module.markDeclAlive(decl); @@ -3949,62 +3923,54 @@ pub const DeclGen = struct { .data = decl.ty, }; const ptr_ty = Type.initPayload(&ptr_ty_payload.base); - const llvm_ptr = try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index); - - if (ptr_child_ty.eql(decl.ty, dg.module)) { - return llvm_ptr; - } else { - return llvm_ptr.constBitCast((try dg.lowerType(ptr_child_ty)).pointerType(0)); - } + return try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index); } - fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, ptr_child_ty: Type) Error!*llvm.Value { + fn lowerParentPtr(dg: *DeclGen, ptr_val: Value) Error!*llvm.Value { const target = dg.module.getTarget(); - var bitcast_needed: bool = undefined; - const llvm_ptr = switch (ptr_val.tag()) { + switch (ptr_val.tag()) { .decl_ref_mut => { const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; - return dg.lowerParentPtrDecl(ptr_val, decl, ptr_child_ty); + return dg.lowerParentPtrDecl(ptr_val, decl); }, .decl_ref => { const decl = ptr_val.castTag(.decl_ref).?.data; - return dg.lowerParentPtrDecl(ptr_val, decl, ptr_child_ty); + return dg.lowerParentPtrDecl(ptr_val, decl); }, .variable => { const decl = ptr_val.castTag(.variable).?.data.owner_decl; - return dg.lowerParentPtrDecl(ptr_val, decl, ptr_child_ty); + return dg.lowerParentPtrDecl(ptr_val, decl); }, .int_i64 => { const int = ptr_val.castTag(.int_i64).?.data; const llvm_usize = try dg.lowerType(Type.usize); const llvm_int = llvm_usize.constInt(@bitCast(u64, int), .False); - return llvm_int.constIntToPtr((try dg.lowerType(ptr_child_ty)).pointerType(0)); + return llvm_int.constIntToPtr(dg.context.pointerType(0)); }, .int_u64 => { const int = ptr_val.castTag(.int_u64).?.data; const llvm_usize = try dg.lowerType(Type.usize); const llvm_int = llvm_usize.constInt(int, .False); - return llvm_int.constIntToPtr((try dg.lowerType(ptr_child_ty)).pointerType(0)); + return llvm_int.constIntToPtr(dg.context.pointerType(0)); }, - .field_ptr => blk: { + .field_ptr => { const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.container_ptr, field_ptr.container_ty); + const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.container_ptr); const parent_ty = field_ptr.container_ty; const field_index = @intCast(u32, field_ptr.field_index); const llvm_u32 = dg.context.intType(32); switch (parent_ty.zigTypeTag()) { .Union => { - bitcast_needed = true; if (parent_ty.containerLayout() == .Packed) { - break :blk parent_llvm_ptr; + return parent_llvm_ptr; } const layout = parent_ty.unionGetLayout(target); if (layout.payload_size == 0) { // In this case a pointer to the union and a pointer to any // (void) payload is the same. - break :blk parent_llvm_ptr; + return parent_llvm_ptr; } const llvm_pl_index = if (layout.tag_size == 0) 0 @@ -4015,10 +3981,9 @@ pub const DeclGen = struct { llvm_u32.constInt(llvm_pl_index, .False), }; const parent_llvm_ty = try dg.lowerType(parent_ty); - break :blk parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, .Struct => { - const field_ty = parent_ty.structFieldType(field_index); if (parent_ty.containerLayout() == .Packed) { const llvm_usize = dg.context.intType(target.cpu.arch.ptrBitWidth()); const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); @@ -4033,26 +3998,23 @@ pub const DeclGen = struct { }; const byte_offset = llvm_usize.constInt(prev_bits / 8, .False); const field_addr = base_addr.constAdd(byte_offset); - bitcast_needed = false; - const final_llvm_ty = (try dg.lowerType(ptr_child_ty)).pointerType(0); - break :blk field_addr.constIntToPtr(final_llvm_ty); + const final_llvm_ty = dg.context.pointerType(0); + return field_addr.constIntToPtr(final_llvm_ty); } var ty_buf: Type.Payload.Pointer = undefined; const parent_llvm_ty = try dg.lowerType(parent_ty); if (llvmFieldIndex(parent_ty, field_index, target, &ty_buf)) |llvm_field_index| { - bitcast_needed = !field_ty.eql(ptr_child_ty, dg.module); const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), llvm_u32.constInt(llvm_field_index, .False), }; - break :blk parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); } else { - bitcast_needed = !parent_ty.eql(ptr_child_ty, dg.module); const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime()), .False); const indices: [1]*llvm.Value = .{llvm_index}; - break :blk parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); } }, .Pointer => { @@ -4062,37 +4024,34 @@ pub const DeclGen = struct { llvm_u32.constInt(field_index, .False), }; const parent_llvm_ty = try dg.lowerType(parent_ty); - break :blk parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, else => unreachable, } }, - .elem_ptr => blk: { + .elem_ptr => { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr, elem_ptr.elem_ty); - bitcast_needed = !elem_ptr.elem_ty.eql(ptr_child_ty, dg.module); + const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr); const llvm_usize = try dg.lowerType(Type.usize); const indices: [1]*llvm.Value = .{ llvm_usize.constInt(elem_ptr.index, .False), }; const elem_llvm_ty = try dg.lowerType(elem_ptr.elem_ty); - break :blk elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, - .opt_payload_ptr => blk: { + .opt_payload_ptr => { const opt_payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(opt_payload_ptr.container_ptr, opt_payload_ptr.container_ty); + const parent_llvm_ptr = try dg.lowerParentPtr(opt_payload_ptr.container_ptr); var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf); - bitcast_needed = !payload_ty.eql(ptr_child_ty, dg.module); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or payload_ty.optionalReprIsPayload()) { // In this case, we represent pointer to optional the same as pointer // to the payload. - break :blk parent_llvm_ptr; + return parent_llvm_ptr; } const llvm_u32 = dg.context.intType(32); @@ -4101,19 +4060,17 @@ pub const DeclGen = struct { llvm_u32.constInt(0, .False), }; const opt_llvm_ty = try dg.lowerType(opt_payload_ptr.container_ty); - break :blk opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, - .eu_payload_ptr => blk: { + .eu_payload_ptr => { const eu_payload_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, eu_payload_ptr.container_ty); + const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr); const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload(); - bitcast_needed = !payload_ty.eql(ptr_child_ty, dg.module); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { // In this case, we represent pointer to error union the same as pointer // to the payload. - break :blk parent_llvm_ptr; + return parent_llvm_ptr; } const payload_offset: u8 = if (payload_ty.abiAlignment(target) > Type.anyerror.abiSize(target)) 2 else 1; @@ -4123,14 +4080,9 @@ pub const DeclGen = struct { llvm_u32.constInt(payload_offset, .False), }; const eu_llvm_ty = try dg.lowerType(eu_payload_ptr.container_ty); - break :blk eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, else => unreachable, - }; - if (bitcast_needed) { - return llvm_ptr.constBitCast((try dg.lowerPtrElemTy(ptr_child_ty)).pointerType(0)); - } else { - return llvm_ptr; } } @@ -4189,8 +4141,7 @@ pub const DeclGen = struct { const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); const llvm_val = if (llvm_wanted_addrspace != llvm_actual_addrspace) blk: { - const llvm_decl_ty = try self.lowerType(decl.ty); - const llvm_decl_wanted_ptr_ty = llvm_decl_ty.pointerType(llvm_wanted_addrspace); + const llvm_decl_wanted_ptr_ty = self.context.pointerType(llvm_wanted_addrspace); break :blk llvm_decl_val.constAddrSpaceCast(llvm_decl_wanted_ptr_ty); } else llvm_decl_val; @@ -4471,18 +4422,11 @@ pub const FuncGen = struct { global.setGlobalConstant(.True); global.setUnnamedAddr(.True); global.setAlignment(tv.ty.abiAlignment(target)); - // Because of LLVM limitations for lowering certain types such as unions, - // the type of global constants might not match the type it is supposed to - // be, and so we must bitcast the pointer at the usage sites. - const wanted_llvm_ty = try self.dg.lowerType(tv.ty); - const wanted_bitcasted_llvm_ptr_ty = wanted_llvm_ty.pointerType(llvm_actual_addrspace); - const bitcasted_ptr = global.constBitCast(wanted_bitcasted_llvm_ptr_ty); - const wanted_llvm_ptr_ty = wanted_llvm_ty.pointerType(llvm_wanted_addrspace); - const casted_ptr = if (llvm_wanted_addrspace != llvm_actual_addrspace) - bitcasted_ptr.constAddrSpaceCast(wanted_llvm_ptr_ty) + const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) + global.constAddrSpaceCast(self.context.pointerType(llvm_wanted_addrspace)) else - bitcasted_ptr; - return casted_ptr; + global; + return addrspace_casted_ptr; } fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void { @@ -4770,17 +4714,7 @@ pub const FuncGen = struct { load_inst.setAlignment(alignment); try llvm_args.append(load_inst); } else { - if (param_ty.zigTypeTag() == .Pointer) { - // We need a bitcast in case of two possibilities: - // 1. The parameter type is a pointer to zero-sized type, - // which is always lowered to an LLVM type of `*i8`. - // 2. The argument is a global which does act as a pointer, however - // a bitcast is needed in order for the LLVM types to match. - const casted_ptr = self.builder.buildBitCast(llvm_arg, llvm_param_ty, ""); - try llvm_args.append(casted_ptr); - } else { - try llvm_args.append(llvm_arg); - } + try llvm_args.append(llvm_arg); } }, .byref => { @@ -4824,26 +4758,22 @@ pub const FuncGen = struct { const param_ty = self.air.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const abi_size = @intCast(c_uint, param_ty.abiSize(target)); - const int_llvm_ty = self.dg.context.intType(abi_size * 8); - const int_ptr_llvm_ty = int_llvm_ty.pointerType(0); + const int_llvm_ty = self.context.intType(abi_size * 8); if (isByRef(param_ty)) { const alignment = param_ty.abiAlignment(target); - const casted_ptr = self.builder.buildBitCast(llvm_arg, int_ptr_llvm_ty, ""); - const load_inst = self.builder.buildLoad(int_llvm_ty, casted_ptr, ""); + const load_inst = self.builder.buildLoad(int_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); } else { // LLVM does not allow bitcasting structs so we must allocate - // a local, bitcast its pointer, store, and then load. + // a local, store as one type, and then load as another type. const alignment = @max( param_ty.abiAlignment(target), self.dg.object.target_data.abiAlignmentOfType(int_llvm_ty), ); const int_ptr = self.buildAlloca(int_llvm_ty, alignment); - const param_llvm_ty = try self.dg.lowerType(param_ty); - const casted_ptr = self.builder.buildBitCast(int_ptr, param_llvm_ty.pointerType(0), ""); - const store_inst = self.builder.buildStore(llvm_arg, casted_ptr); + const store_inst = self.builder.buildStore(llvm_arg, int_ptr); store_inst.setAlignment(alignment); const load_inst = self.builder.buildLoad(int_llvm_ty, int_ptr, ""); load_inst.setAlignment(alignment); @@ -4872,12 +4802,11 @@ pub const FuncGen = struct { break :p p; }; - const llvm_ty = self.dg.context.structType(llvm_types.ptr, @intCast(c_uint, llvm_types.len), .False); - const casted_ptr = self.builder.buildBitCast(arg_ptr, llvm_ty.pointerType(0), ""); + const llvm_ty = self.context.structType(llvm_types.ptr, @intCast(c_uint, llvm_types.len), .False); try llvm_args.ensureUnusedCapacity(it.llvm_types_len); for (llvm_types) |field_ty, i_usize| { const i = @intCast(c_uint, i_usize); - const field_ptr = self.builder.buildStructGEP(llvm_ty, casted_ptr, i, ""); + const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, ""); const load_inst = self.builder.buildLoad(field_ty, field_ptr, ""); load_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8); llvm_args.appendAssumeCapacity(load_inst); @@ -4886,7 +4815,7 @@ pub const FuncGen = struct { .as_u16 => { const arg = args[it.zig_index - 1]; const llvm_arg = try self.resolveInst(arg); - const casted = self.builder.buildBitCast(llvm_arg, self.dg.context.intType(16), ""); + const casted = self.builder.buildBitCast(llvm_arg, self.context.intType(16), ""); try llvm_args.append(casted); }, .float_array => |count| { @@ -4903,9 +4832,8 @@ pub const FuncGen = struct { const float_ty = try self.dg.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty).?); const array_llvm_ty = float_ty.arrayType(count); - const casted = self.builder.buildBitCast(llvm_arg, array_llvm_ty.pointerType(0), ""); const alignment = arg_ty.abiAlignment(target); - const load_inst = self.builder.buildLoad(array_llvm_ty, casted, ""); + const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); }, @@ -4921,10 +4849,9 @@ pub const FuncGen = struct { llvm_arg = store_inst; } - const array_llvm_ty = self.dg.context.intType(elem_size).arrayType(arr_len); - const casted = self.builder.buildBitCast(llvm_arg, array_llvm_ty.pointerType(0), ""); + const array_llvm_ty = self.context.intType(elem_size).arrayType(arr_len); const alignment = arg_ty.abiAlignment(target); - const load_inst = self.builder.buildLoad(array_llvm_ty, casted, ""); + const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); }, @@ -5028,12 +4955,10 @@ pub const FuncGen = struct { if (abi_ret_ty != llvm_ret_ty) { // In this case the function return type is honoring the calling convention by having // a different LLVM type than the usual one. We solve this here at the callsite - // by bitcasting a pointer to our canonical type, then loading it if necessary. + // by using our canonical type, then loading it if necessary. const alignment = self.dg.object.target_data.abiAlignmentOfType(abi_ret_ty); const rp = self.buildAlloca(llvm_ret_ty, alignment); - const ptr_abi_ty = abi_ret_ty.pointerType(0); - const casted_ptr = self.builder.buildBitCast(rp, ptr_abi_ty, ""); - const store_inst = self.builder.buildStore(call, casted_ptr); + const store_inst = self.builder.buildStore(call, rp); store_inst.setAlignment(alignment); if (isByRef(return_type)) { return rp; @@ -5086,7 +5011,6 @@ pub const FuncGen = struct { } const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info); - const ptr_abi_ty = abi_ret_ty.pointerType(0); const operand = try self.resolveInst(un_op); const target = self.dg.module.getTarget(); const alignment = ret_ty.abiAlignment(target); @@ -5094,8 +5018,7 @@ pub const FuncGen = struct { if (isByRef(ret_ty)) { // operand is a pointer however self.ret_ptr is null so that means // we need to return a value. - const casted_ptr = self.builder.buildBitCast(operand, ptr_abi_ty, ""); - const load_inst = self.builder.buildLoad(abi_ret_ty, casted_ptr, ""); + const load_inst = self.builder.buildLoad(abi_ret_ty, operand, ""); load_inst.setAlignment(alignment); _ = self.builder.buildRet(load_inst); return null; @@ -5110,8 +5033,7 @@ pub const FuncGen = struct { const rp = self.buildAlloca(llvm_ret_ty, alignment); const store_inst = self.builder.buildStore(operand, rp); store_inst.setAlignment(alignment); - const casted_ptr = self.builder.buildBitCast(rp, ptr_abi_ty, ""); - const load_inst = self.builder.buildLoad(abi_ret_ty, casted_ptr, ""); + const load_inst = self.builder.buildLoad(abi_ret_ty, rp, ""); load_inst.setAlignment(alignment); _ = self.builder.buildRet(load_inst); return null; @@ -5141,12 +5063,7 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(un_op); const target = self.dg.module.getTarget(); const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info); - const llvm_ret_ty = try self.dg.lowerType(ret_ty); - const casted_ptr = if (abi_ret_ty == llvm_ret_ty) ptr else p: { - const ptr_abi_ty = abi_ret_ty.pointerType(0); - break :p self.builder.buildBitCast(ptr, ptr_abi_ty, ""); - }; - const loaded = self.builder.buildLoad(abi_ret_ty, casted_ptr, ""); + const loaded = self.builder.buildLoad(abi_ret_ty, ptr, ""); loaded.setAlignment(ret_ty.abiAlignment(target)); _ = self.builder.buildRet(loaded); return null; @@ -5178,8 +5095,8 @@ pub const FuncGen = struct { const llvm_fn_name = "llvm.va_copy"; const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { const param_types = [_]*llvm.Type{ - self.dg.context.intType(8).pointerType(0), - self.dg.context.intType(8).pointerType(0), + self.context.pointerType(0), + self.context.pointerType(0), }; const fn_type = llvm.functionType(self.context.voidType(), ¶m_types, param_types.len, .False); break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type); @@ -5203,7 +5120,7 @@ pub const FuncGen = struct { const llvm_fn_name = "llvm.va_end"; const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { - const param_types = [_]*llvm.Type{self.dg.context.intType(8).pointerType(0)}; + const param_types = [_]*llvm.Type{self.context.pointerType(0)}; const fn_type = llvm.functionType(self.context.voidType(), ¶m_types, param_types.len, .False); break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type); }; @@ -5224,7 +5141,7 @@ pub const FuncGen = struct { const llvm_fn_name = "llvm.va_start"; const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { - const param_types = [_]*llvm.Type{self.dg.context.intType(8).pointerType(0)}; + const param_types = [_]*llvm.Type{self.context.pointerType(0)}; const fn_type = llvm.functionType(self.context.voidType(), ¶m_types, param_types.len, .False); break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type); }; @@ -5417,7 +5334,7 @@ pub const FuncGen = struct { // of function pointers, however the phi makes it a runtime value and therefore // the LLVM type has to be wrapped in a pointer. if (is_body or isByRef(inst_ty)) { - break :ty raw_llvm_ty.pointerType(0); + break :ty self.context.pointerType(0); } break :ty raw_llvm_ty; }; @@ -5482,7 +5399,7 @@ pub const FuncGen = struct { const payload_ty = self.air.typeOfIndex(inst); const can_elide_load = if (isByRef(payload_ty)) self.canElideLoad(body_tail) else false; const is_unused = self.liveness.isUnused(inst); - return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused, payload_ty); + return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused); } fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { @@ -5491,9 +5408,8 @@ pub const FuncGen = struct { const err_union_ptr = try self.resolveInst(extra.data.ptr); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const err_union_ty = self.air.typeOf(extra.data.ptr).childType(); - const payload_ty = self.air.typeOfIndex(inst); const is_unused = self.liveness.isUnused(inst); - return lowerTry(self, err_union_ptr, body, err_union_ty, true, true, is_unused, payload_ty); + return lowerTry(self, err_union_ptr, body, err_union_ty, true, true, is_unused); } fn lowerTry( @@ -5504,7 +5420,6 @@ pub const FuncGen = struct { operand_is_ptr: bool, can_elide_load: bool, is_unused: bool, - result_ty: Type, ) !?*llvm.Value { const payload_ty = err_union_ty.errorUnionPayload(); const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(); @@ -5547,12 +5462,7 @@ pub const FuncGen = struct { return null; } if (!payload_has_bits) { - if (!operand_is_ptr) return null; - - // TODO once we update to an LLVM version with opaque pointers - // this bitcast won't be necessary. - const res_ptr_ty = try fg.dg.lowerType(result_ty); - return fg.builder.buildBitCast(err_union, res_ptr_ty, ""); + return if (operand_is_ptr) err_union else null; } const offset = errUnionPayloadOffset(payload_ty, target); if (operand_is_ptr) { @@ -6038,9 +5948,8 @@ pub const FuncGen = struct { const union_llvm_ty = try self.dg.lowerType(struct_ty); const layout = struct_ty.unionGetLayout(target); const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); - const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_llvm_val, payload_index, ""); + const field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_llvm_val, payload_index, ""); const llvm_field_ty = try self.dg.lowerType(field_ty); - const field_ptr = self.builder.buildBitCast(union_field_ptr, llvm_field_ty.pointerType(0), ""); if (isByRef(field_ty)) { if (canElideLoad(self, body_tail)) return field_ptr; @@ -6068,7 +5977,7 @@ pub const FuncGen = struct { const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty)); if (field_offset == 0) { - return self.builder.buildBitCast(field_ptr, res_ty, ""); + return field_ptr; } const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth()); @@ -6635,7 +6544,7 @@ pub const FuncGen = struct { self.builder.buildLoad(optional_llvm_ty, operand, "") else operand; - const llvm_i8 = self.dg.context.intType(8); + const llvm_i8 = self.context.intType(8); return self.builder.buildICmp(pred, loaded, llvm_i8.constNull(), ""); } @@ -6701,16 +6610,12 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand).childType(); - const result_ty = self.air.getRefType(ty_op.ty); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { // We have a pointer to a zero-bit value and we need to return // a pointer to a zero-bit value. - - // TODO once we update to LLVM 16 this bitcast won't be necessary. - const res_ptr_ty = try self.dg.lowerType(result_ty); - return self.builder.buildBitCast(operand, res_ptr_ty, ""); + return operand; } if (optional_ty.optionalReprIsPayload()) { // The payload and the optional are the same value. @@ -6726,17 +6631,13 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand).childType(); - const result_ty = self.air.getRefType(ty_op.ty); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); const non_null_bit = self.context.intType(8).constInt(1, .False); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { // We have a pointer to a i8. We need to set it to 1 and then return the same pointer. _ = self.builder.buildStore(non_null_bit, operand); - - // TODO once we update to LLVM 16 this bitcast won't be necessary. - const res_ptr_ty = try self.dg.lowerType(result_ty); - return self.builder.buildBitCast(operand, res_ptr_ty, ""); + return operand; } if (optional_ty.optionalReprIsPayload()) { // The payload and the optional are the same value. @@ -6794,11 +6695,7 @@ pub const FuncGen = struct { const target = self.dg.module.getTarget(); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - if (!operand_is_ptr) return null; - - // TODO once we update to LLVM 14 this bitcast won't be necessary. - const res_ptr_ty = try self.dg.lowerType(result_ty); - return self.builder.buildBitCast(operand, res_ptr_ty, ""); + return if (operand_is_ptr) operand else null; } const offset = errUnionPayloadOffset(payload_ty, target); const err_union_llvm_ty = try self.dg.lowerType(err_union_ty); @@ -6834,7 +6731,7 @@ pub const FuncGen = struct { if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { const err_llvm_ty = try self.dg.lowerType(Type.anyerror); if (operand_is_ptr) { - return self.builder.buildBitCast(operand, err_llvm_ty.pointerType(0), ""); + return operand; } else { return err_llvm_ty.constInt(0, .False); } @@ -7104,14 +7001,8 @@ pub const FuncGen = struct { const llvm_slice_ty = try self.dg.lowerType(inst_ty); // In case of slicing a global, the result type looks something like `{ i8*, i64 }` - // but `ptr` is pointing to the global directly. If it's an array, we would want to - // do GEP(0,0), or we can just bitcast it to be correct, like we do here. - // This prevents an assertion failure. - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = inst_ty.slicePtrFieldType(&buf); - const ptr_llvm_ty = try self.dg.lowerType(ptr_ty); - const casted_ptr = self.builder.buildBitCast(ptr, ptr_llvm_ty, ""); - const partial = self.builder.buildInsertValue(llvm_slice_ty.getUndef(), casted_ptr, 0, ""); + // but `ptr` is pointing to the global directly. + const partial = self.builder.buildInsertValue(llvm_slice_ty.getUndef(), ptr, 0, ""); return self.builder.buildInsertValue(partial, len, 1, ""); } @@ -7636,7 +7527,7 @@ pub const FuncGen = struct { .neg => { // In this case we can generate a softfloat negation by XORing the // bits with a constant. - const int_llvm_ty = self.dg.context.intType(float_bits); + const int_llvm_ty = self.context.intType(float_bits); const one = int_llvm_ty.constInt(1, .False); const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False); const sign_mask = one.constShl(shift_amt); @@ -8045,8 +7936,8 @@ pub const FuncGen = struct { const target = self.dg.module.getTarget(); if (operand_is_ref and result_is_ref) { - // They are both pointers; just do a bitcast on the pointers :) - return self.builder.buildBitCast(operand, llvm_dest_ty.pointerType(0), ""); + // They are both pointers, so just return the same opaque pointer :) + return operand; } if (operand_ty.zigTypeTag() == .Int and inst_ty.isPtrAtRuntime()) { @@ -8061,9 +7952,7 @@ pub const FuncGen = struct { const array_ptr = self.buildAlloca(llvm_dest_ty, null); const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8; if (bitcast_ok) { - const llvm_vector_ty = try self.dg.lowerType(operand_ty); - const casted_ptr = self.builder.buildBitCast(array_ptr, llvm_vector_ty.pointerType(0), ""); - const llvm_store = self.builder.buildStore(operand, casted_ptr); + const llvm_store = self.builder.buildStore(operand, array_ptr); llvm_store.setAlignment(inst_ty.abiAlignment(target)); } else { // If the ABI size of the element type is not evenly divisible by size in bits; @@ -8092,9 +7981,7 @@ pub const FuncGen = struct { const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8; if (bitcast_ok) { - const llvm_vector_ptr_ty = llvm_vector_ty.pointerType(0); - const casted_ptr = self.builder.buildBitCast(operand, llvm_vector_ptr_ty, ""); - const vector = self.builder.buildLoad(llvm_vector_ty, casted_ptr, ""); + const vector = self.builder.buildLoad(llvm_vector_ty, operand, ""); // The array is aligned to the element's alignment, while the vector might have a completely // different alignment. This means we need to enforce the alignment of this load. vector.setAlignment(elem_ty.abiAlignment(target)); @@ -8124,20 +8011,15 @@ pub const FuncGen = struct { } if (operand_is_ref) { - // Bitcast the operand pointer, then load. - const casted_ptr = self.builder.buildBitCast(operand, llvm_dest_ty.pointerType(0), ""); - const load_inst = self.builder.buildLoad(llvm_dest_ty, casted_ptr, ""); + const load_inst = self.builder.buildLoad(llvm_dest_ty, operand, ""); load_inst.setAlignment(operand_ty.abiAlignment(target)); return load_inst; } if (result_is_ref) { - // Bitcast the result pointer, then store. const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target)); const result_ptr = self.buildAlloca(llvm_dest_ty, alignment); - const operand_llvm_ty = try self.dg.lowerType(operand_ty); - const casted_ptr = self.builder.buildBitCast(result_ptr, operand_llvm_ty.pointerType(0), ""); - const store_inst = self.builder.buildStore(operand, casted_ptr); + const store_inst = self.builder.buildStore(operand, result_ptr); store_inst.setAlignment(alignment); return result_ptr; } @@ -8145,12 +8027,10 @@ pub const FuncGen = struct { if (llvm_dest_ty.getTypeKind() == .Struct) { // Both our operand and our result are values, not pointers, // but LLVM won't let us bitcast struct values. - // Therefore, we store operand to bitcasted alloca, then load for result. + // Therefore, we store operand to alloca, then load for result. const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target)); const result_ptr = self.buildAlloca(llvm_dest_ty, alignment); - const operand_llvm_ty = try self.dg.lowerType(operand_ty); - const casted_ptr = self.builder.buildBitCast(result_ptr, operand_llvm_ty.pointerType(0), ""); - const store_inst = self.builder.buildStore(operand, casted_ptr); + const store_inst = self.builder.buildStore(operand, result_ptr); store_inst.setAlignment(alignment); const load_inst = self.builder.buildLoad(llvm_dest_ty, result_ptr, ""); load_inst.setAlignment(alignment); @@ -8248,7 +8128,7 @@ pub const FuncGen = struct { /// Use this instead of builder.buildAlloca, because this function makes sure to /// put the alloca instruction at the top of the function! fn buildAlloca(self: *FuncGen, llvm_ty: *llvm.Type, alignment: ?c_uint) *llvm.Value { - return buildAllocaInner(self.builder, self.llvm_func, self.di_scope != null, llvm_ty, alignment, self.dg.module.getTarget()); + return buildAllocaInner(self.context, self.builder, self.llvm_func, self.di_scope != null, llvm_ty, alignment, self.dg.module.getTarget()); } fn airStore(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { @@ -8282,13 +8162,11 @@ pub const FuncGen = struct { const target = self.dg.module.getTarget(); const operand_size = operand_ty.abiSize(target); const u8_llvm_ty = self.context.intType(8); - const ptr_u8_llvm_ty = u8_llvm_ty.pointerType(0); - const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, ptr_u8_llvm_ty, ""); const fill_char = u8_llvm_ty.constInt(0xaa, .False); const dest_ptr_align = ptr_ty.ptrAlignment(target); const usize_llvm_ty = try self.dg.lowerType(Type.usize); const len = usize_llvm_ty.constInt(operand_size, .False); - _ = self.builder.buildMemSet(dest_ptr_u8, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr()); + _ = self.builder.buildMemSet(dest_ptr, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr()); if (self.dg.module.comp.bin_file.options.valgrind) { self.valgrindMarkUndef(dest_ptr, len); } @@ -8365,7 +8243,7 @@ pub const FuncGen = struct { const llvm_i32 = self.context.intType(32); const llvm_fn_name = "llvm.frameaddress.p0"; const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { - const llvm_p0i8 = self.context.intType(8).pointerType(0); + const llvm_p0i8 = self.context.pointerType(0); const param_types = [_]*llvm.Type{llvm_i32}; const fn_type = llvm.functionType(llvm_p0i8, ¶m_types, param_types.len, .False); break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type); @@ -8388,14 +8266,13 @@ pub const FuncGen = struct { fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !?*llvm.Value { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - var ptr = try self.resolveInst(extra.ptr); + const ptr = try self.resolveInst(extra.ptr); var expected_value = try self.resolveInst(extra.expected_value); var new_value = try self.resolveInst(extra.new_value); const operand_ty = self.air.typeOf(extra.ptr).elemType(); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening and truncating - ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), ""); if (operand_ty.isSignedInt()) { expected_value = self.builder.buildSExt(expected_value, abi_ty, ""); new_value = self.builder.buildSExt(new_value, abi_ty, ""); @@ -8447,7 +8324,6 @@ pub const FuncGen = struct { const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, op == .Xchg); if (opt_abi_ty) |abi_ty| { // operand needs widening and truncating or bitcasting. - const casted_ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), ""); const casted_operand = if (is_float) self.builder.buildBitCast(operand, abi_ty, "") else if (is_signed_int) @@ -8457,7 +8333,7 @@ pub const FuncGen = struct { const uncasted_result = self.builder.buildAtomicRmw( op, - casted_ptr, + ptr, casted_operand, ordering, single_threaded, @@ -8476,11 +8352,10 @@ pub const FuncGen = struct { // It's a pointer but we need to treat it as an int. const usize_llvm_ty = try self.dg.lowerType(Type.usize); - const casted_ptr = self.builder.buildBitCast(ptr, usize_llvm_ty.pointerType(0), ""); const casted_operand = self.builder.buildPtrToInt(operand, usize_llvm_ty, ""); const uncasted_result = self.builder.buildAtomicRmw( op, - casted_ptr, + ptr, casted_operand, ordering, single_threaded, @@ -8508,8 +8383,7 @@ pub const FuncGen = struct { if (opt_abi_llvm_ty) |abi_llvm_ty| { // operand needs widening and truncating - const casted_ptr = self.builder.buildBitCast(ptr, abi_llvm_ty.pointerType(0), ""); - const load_inst = self.builder.buildLoad(abi_llvm_ty, casted_ptr, ""); + const load_inst = self.builder.buildLoad(abi_llvm_ty, ptr, ""); load_inst.setAlignment(ptr_alignment); load_inst.setVolatile(ptr_volatile); load_inst.setOrdering(ordering); @@ -8531,13 +8405,12 @@ pub const FuncGen = struct { const ptr_ty = self.air.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(); if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return null; - var ptr = try self.resolveInst(bin_op.lhs); + const ptr = try self.resolveInst(bin_op.lhs); var element = try self.resolveInst(bin_op.rhs); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening - ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), ""); if (operand_ty.isSignedInt()) { element = self.builder.buildSExt(element, abi_ty, ""); } else { @@ -8557,15 +8430,13 @@ pub const FuncGen = struct { const val_is_undef = if (self.air.value(extra.lhs)) |val| val.isUndefDeep() else false; const len = try self.resolveInst(extra.rhs); const u8_llvm_ty = self.context.intType(8); - const ptr_u8_llvm_ty = u8_llvm_ty.pointerType(0); - const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, ptr_u8_llvm_ty, ""); const fill_char = if (val_is_undef) u8_llvm_ty.constInt(0xaa, .False) else value; const target = self.dg.module.getTarget(); const dest_ptr_align = ptr_ty.ptrAlignment(target); - _ = self.builder.buildMemSet(dest_ptr_u8, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr()); + _ = self.builder.buildMemSet(dest_ptr, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr()); if (val_is_undef and self.dg.module.comp.bin_file.options.valgrind) { - self.valgrindMarkUndef(dest_ptr_u8, len); + self.valgrindMarkUndef(dest_ptr, len); } return null; } @@ -8578,15 +8449,12 @@ pub const FuncGen = struct { const src_ptr = try self.resolveInst(extra.lhs); const src_ptr_ty = self.air.typeOf(extra.lhs); const len = try self.resolveInst(extra.rhs); - const llvm_ptr_u8 = self.context.intType(8).pointerType(0); - const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, llvm_ptr_u8, ""); - const src_ptr_u8 = self.builder.buildBitCast(src_ptr, llvm_ptr_u8, ""); const is_volatile = src_ptr_ty.isVolatilePtr() or dest_ptr_ty.isVolatilePtr(); const target = self.dg.module.getTarget(); _ = self.builder.buildMemCpy( - dest_ptr_u8, + dest_ptr, dest_ptr_ty.ptrAlignment(target), - src_ptr_u8, + src_ptr, src_ptr_ty.ptrAlignment(target), len, is_volatile, @@ -8780,8 +8648,8 @@ pub const FuncGen = struct { const error_set_ty = self.air.getRefType(ty_op.ty); const names = error_set_ty.errorSetNames(); - const valid_block = self.dg.context.appendBasicBlock(self.llvm_func, "Valid"); - const invalid_block = self.dg.context.appendBasicBlock(self.llvm_func, "Invalid"); + const valid_block = self.context.appendBasicBlock(self.llvm_func, "Valid"); + const invalid_block = self.context.appendBasicBlock(self.llvm_func, "Invalid"); const end_block = self.context.appendBasicBlock(self.llvm_func, "End"); const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len)); @@ -8807,7 +8675,7 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(end_block); - const llvm_type = self.dg.context.intType(1); + const llvm_type = self.context.intType(1); const incoming_values: [2]*llvm.Value = .{ llvm_type.constInt(1, .False), llvm_type.constInt(0, .False), }; @@ -8869,13 +8737,13 @@ pub const FuncGen = struct { } } - const entry_block = self.dg.context.appendBasicBlock(fn_val, "Entry"); + const entry_block = self.context.appendBasicBlock(fn_val, "Entry"); self.builder.positionBuilderAtEnd(entry_block); self.builder.clearCurrentDebugLocation(); const fields = enum_ty.enumFields(); - const named_block = self.dg.context.appendBasicBlock(fn_val, "Named"); - const unnamed_block = self.dg.context.appendBasicBlock(fn_val, "Unnamed"); + const named_block = self.context.appendBasicBlock(fn_val, "Named"); + const unnamed_block = self.context.appendBasicBlock(fn_val, "Unnamed"); const tag_int_value = fn_val.getParam(0); const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, fields.count())); @@ -8893,10 +8761,10 @@ pub const FuncGen = struct { switch_instr.addCase(this_tag_int_value, named_block); } self.builder.positionBuilderAtEnd(named_block); - _ = self.builder.buildRet(self.dg.context.intType(1).constInt(1, .False)); + _ = self.builder.buildRet(self.context.intType(1).constInt(1, .False)); self.builder.positionBuilderAtEnd(unnamed_block); - _ = self.builder.buildRet(self.dg.context.intType(1).constInt(0, .False)); + _ = self.builder.buildRet(self.context.intType(1).constInt(0, .False)); return fn_val; } @@ -8955,12 +8823,12 @@ pub const FuncGen = struct { } } - const entry_block = self.dg.context.appendBasicBlock(fn_val, "Entry"); + const entry_block = self.context.appendBasicBlock(fn_val, "Entry"); self.builder.positionBuilderAtEnd(entry_block); self.builder.clearCurrentDebugLocation(); const fields = enum_ty.enumFields(); - const bad_value_block = self.dg.context.appendBasicBlock(fn_val, "BadValue"); + const bad_value_block = self.context.appendBasicBlock(fn_val, "BadValue"); const tag_int_value = fn_val.getParam(0); const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @intCast(c_uint, fields.count())); @@ -8969,7 +8837,7 @@ pub const FuncGen = struct { }; for (fields.keys()) |name, field_index| { - const str_init = self.dg.context.constString(name.ptr, @intCast(c_uint, name.len), .False); + const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False); const str_init_llvm_ty = str_init.typeOf(); const str_global = self.dg.object.llvm_module.addGlobal(str_init_llvm_ty, ""); str_global.setInitializer(str_init); @@ -8990,7 +8858,7 @@ pub const FuncGen = struct { slice_global.setUnnamedAddr(.True); slice_global.setAlignment(slice_alignment); - const return_block = self.dg.context.appendBasicBlock(fn_val, "Name"); + const return_block = self.context.appendBasicBlock(fn_val, "Name"); const this_tag_int_value = int: { var tag_val_payload: Value.Payload.U32 = .{ .base = .{ .tag = .enum_field_index }, @@ -9042,7 +8910,7 @@ pub const FuncGen = struct { const slice_llvm_ty = try self.dg.lowerType(slice_ty); const error_name_table_ptr = try self.getErrorNameTable(); - const ptr_slice_llvm_ty = slice_llvm_ty.pointerType(0); + const ptr_slice_llvm_ty = self.context.pointerType(0); const error_name_table = self.builder.buildLoad(ptr_slice_llvm_ty, error_name_table_ptr, ""); const indices = [_]*llvm.Value{operand}; const error_name_ptr = self.builder.buildInBoundsGEP(slice_llvm_ty, error_name_table, &indices, indices.len, ""); @@ -9285,7 +9153,7 @@ pub const FuncGen = struct { if (result_ty.containerLayout() == .Packed) { const struct_obj = result_ty.castTag(.@"struct").?.data; const big_bits = struct_obj.backing_int_ty.bitSize(target); - const int_llvm_ty = self.dg.context.intType(@intCast(c_uint, big_bits)); + const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits)); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); var running_int: *llvm.Value = int_llvm_ty.constNull(); @@ -9296,7 +9164,7 @@ pub const FuncGen = struct { const non_int_val = try self.resolveInst(elem); const ty_bit_size = @intCast(u16, field.ty.bitSize(target)); - const small_int_ty = self.dg.context.intType(ty_bit_size); + const small_int_ty = self.context.intType(ty_bit_size); const small_int_val = if (field.ty.isPtrAtRuntime()) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") else @@ -9410,11 +9278,11 @@ pub const FuncGen = struct { if (union_obj.layout == .Packed) { const big_bits = union_ty.bitSize(target); - const int_llvm_ty = self.dg.context.intType(@intCast(c_uint, big_bits)); + const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits)); const field = union_obj.fields.values()[extra.field_index]; const non_int_val = try self.resolveInst(extra.init); const ty_bit_size = @intCast(u16, field.ty.bitSize(target)); - const small_int_ty = self.dg.context.intType(ty_bit_size); + const small_int_ty = self.context.intType(ty_bit_size); const small_int_val = if (field.ty.isPtrAtRuntime()) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") else @@ -9444,9 +9312,9 @@ pub const FuncGen = struct { } assert(isByRef(union_ty)); // The llvm type of the alloca will be the named LLVM union type, and will not - // necessarily match the format that we need, depending on which tag is active. We - // must construct the correct unnamed struct type here and bitcast, in order to - // then set the fields appropriately. + // necessarily match the format that we need, depending on which tag is active. + // We must construct the correct unnamed struct type here, in order to then set + // the fields appropriately. const result_ptr = self.buildAlloca(union_llvm_ty, layout.abi_align); const llvm_payload = try self.resolveInst(extra.init); assert(union_obj.haveFieldTypes()); @@ -9489,8 +9357,6 @@ pub const FuncGen = struct { break :t self.context.structType(&fields, fields_len, .False); }; - const casted_ptr = self.builder.buildBitCast(result_ptr, llvm_union_ty.pointerType(0), ""); - // Now we follow the layout as expressed above with GEP instructions to set the // tag and the payload. const index_type = self.context.intType(32); @@ -9510,7 +9376,7 @@ pub const FuncGen = struct { index_type.constNull(), }; const len: c_uint = if (field_size == layout.payload_size) 2 else 3; - const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, casted_ptr, &indices, len, ""); + const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, len, ""); try self.store(field_ptr, field_ptr_ty, llvm_payload, .NotAtomic); return result_ptr; } @@ -9522,7 +9388,7 @@ pub const FuncGen = struct { index_type.constNull(), }; const len: c_uint = if (field_size == layout.payload_size) 2 else 3; - const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, casted_ptr, &indices, len, ""); + const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, len, ""); try self.store(field_ptr, field_ptr_ty, llvm_payload, .NotAtomic); } { @@ -9530,7 +9396,7 @@ pub const FuncGen = struct { index_type.constNull(), index_type.constInt(@boolToInt(layout.tag_align < layout.payload_align), .False), }; - const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, casted_ptr, &indices, indices.len, ""); + const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, indices.len, ""); const tag_llvm_ty = try self.dg.lowerType(union_obj.tag_ty); const llvm_tag = tag_llvm_ty.constInt(tag_int, .False); const store_inst = self.builder.buildStore(llvm_tag, field_ptr); @@ -9579,8 +9445,7 @@ pub const FuncGen = struct { .data => {}, } - const llvm_u8 = self.context.intType(8); - const llvm_ptr_u8 = llvm_u8.pointerType(0); + const llvm_ptr_u8 = self.context.pointerType(0); const llvm_u32 = self.context.intType(32); const llvm_fn_name = "llvm.prefetch.p0"; @@ -9595,10 +9460,9 @@ pub const FuncGen = struct { }; const ptr = try self.resolveInst(prefetch.ptr); - const ptr_u8 = self.builder.buildBitCast(ptr, llvm_ptr_u8, ""); const params = [_]*llvm.Value{ - ptr_u8, + ptr, llvm_u32.constInt(@enumToInt(prefetch.rw), .False), llvm_u32.constInt(prefetch.locality, .False), llvm_u32.constInt(@enumToInt(prefetch.cache), .False), @@ -9625,8 +9489,7 @@ pub const FuncGen = struct { const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); const slice_alignment = slice_ty.abiAlignment(self.dg.module.getTarget()); - const llvm_slice_ty = try self.dg.lowerType(slice_ty); - const llvm_slice_ptr_ty = llvm_slice_ty.pointerType(0); // TODO: Address space + const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space const error_name_table_global = self.dg.object.llvm_module.addGlobal(llvm_slice_ptr_ty, "__zig_err_name_table"); error_name_table_global.setInitializer(llvm_slice_ptr_ty.getUndef()); @@ -9699,7 +9562,7 @@ pub const FuncGen = struct { non_null_bit: *llvm.Value, ) !?*llvm.Value { const optional_llvm_ty = try self.dg.lowerType(optional_ty); - const non_null_field = self.builder.buildZExt(non_null_bit, self.dg.context.intType(8), ""); + const non_null_field = self.builder.buildZExt(non_null_bit, self.context.intType(8), ""); if (isByRef(optional_ty)) { const target = self.dg.module.getTarget(); @@ -9740,31 +9603,24 @@ pub const FuncGen = struct { .Packed => { const result_ty = self.air.typeOfIndex(inst); const result_ty_info = result_ty.ptrInfo().data; - const result_llvm_ty = try self.dg.lowerType(result_ty); if (result_ty_info.host_size != 0) { // From LLVM's perspective, a pointer to a packed struct and a pointer // to a field of a packed struct are the same. The difference is in the // Zig pointer type which provides information for how to mask and shift // out the relevant bits when accessing the pointee. - // Here we perform a bitcast because we want to use the host_size - // as the llvm pointer element type. - return self.builder.buildBitCast(struct_ptr, result_llvm_ty, ""); + return struct_ptr; } // We have a pointer to a packed struct field that happens to be byte-aligned. // Offset our operand pointer by the correct number of bytes. const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, target); - if (byte_offset == 0) { - return self.builder.buildBitCast(struct_ptr, result_llvm_ty, ""); - } + if (byte_offset == 0) return struct_ptr; const byte_llvm_ty = self.context.intType(8); - const ptr_as_bytes = self.builder.buildBitCast(struct_ptr, byte_llvm_ty.pointerType(0), ""); const llvm_usize = try self.dg.lowerType(Type.usize); const llvm_index = llvm_usize.constInt(byte_offset, .False); const indices: [1]*llvm.Value = .{llvm_index}; - const new_ptr = self.builder.buildInBoundsGEP(byte_llvm_ty, ptr_as_bytes, &indices, indices.len, ""); - return self.builder.buildBitCast(new_ptr, result_llvm_ty, ""); + return self.builder.buildInBoundsGEP(byte_llvm_ty, struct_ptr, &indices, indices.len, ""); }, else => { const struct_llvm_ty = try self.dg.lowerPtrElemTy(struct_ty); @@ -9777,39 +9633,25 @@ pub const FuncGen = struct { // end of the struct. Treat our struct pointer as an array of two and get // the index to the element at index `1` to get a pointer to the end of // the struct. - const llvm_u32 = self.dg.context.intType(32); + const llvm_u32 = self.context.intType(32); const llvm_index = llvm_u32.constInt(@boolToInt(struct_ty.hasRuntimeBitsIgnoreComptime()), .False); const indices: [1]*llvm.Value = .{llvm_index}; return self.builder.buildInBoundsGEP(struct_llvm_ty, struct_ptr, &indices, indices.len, ""); } }, }, - .Union => return self.unionFieldPtr(inst, struct_ptr, struct_ty), + .Union => { + const layout = struct_ty.unionGetLayout(target); + if (layout.payload_size == 0 or struct_ty.containerLayout() == .Packed) return struct_ptr; + const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); + const union_llvm_ty = try self.dg.lowerType(struct_ty); + const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_ptr, payload_index, ""); + return union_field_ptr; + }, else => unreachable, } } - fn unionFieldPtr( - self: *FuncGen, - inst: Air.Inst.Index, - union_ptr: *llvm.Value, - union_ty: Type, - ) !?*llvm.Value { - const target = self.dg.module.getTarget(); - const layout = union_ty.unionGetLayout(target); - const result_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); - if (layout.payload_size == 0) { - return self.builder.buildBitCast(union_ptr, result_llvm_ty, ""); - } - if (union_ty.containerLayout() == .Packed) { - return self.builder.buildBitCast(union_ptr, result_llvm_ty, ""); - } - const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); - const union_llvm_ty = try self.dg.lowerType(union_ty); - const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, union_ptr, payload_index, ""); - return self.builder.buildBitCast(union_field_ptr, result_llvm_ty, ""); - } - fn getIntrinsic(self: *FuncGen, name: []const u8, types: []const *llvm.Type) *llvm.Value { const id = llvm.lookupIntrinsicID(name.ptr, name.len); assert(id != 0); @@ -9828,13 +9670,12 @@ pub const FuncGen = struct { const target = fg.dg.module.getTarget(); const result_align = @max(ptr_alignment, pointee_type.abiAlignment(target)); const result_ptr = fg.buildAlloca(pointee_llvm_ty, result_align); - const llvm_ptr_u8 = fg.context.intType(8).pointerType(0); const llvm_usize = fg.context.intType(Type.usize.intInfo(target).bits); const size_bytes = pointee_type.abiSize(target); _ = fg.builder.buildMemCpy( - fg.builder.buildBitCast(result_ptr, llvm_ptr_u8, ""), + result_ptr, result_align, - fg.builder.buildBitCast(ptr, llvm_ptr_u8, ""), + ptr, ptr_alignment, llvm_usize.constInt(size_bytes, .False), is_volatile, @@ -9855,7 +9696,7 @@ pub const FuncGen = struct { assert(info.vector_index != .runtime); if (info.vector_index != .none) { - const index_u32 = self.dg.context.intType(32).constInt(@enumToInt(info.vector_index), .False); + const index_u32 = self.context.intType(32).constInt(@enumToInt(info.vector_index), .False); const vec_elem_ty = try self.dg.lowerType(info.pointee_type); const vec_ty = vec_elem_ty.vectorType(info.host_size); @@ -9878,8 +9719,7 @@ pub const FuncGen = struct { } const int_elem_ty = self.context.intType(info.host_size * 8); - const int_ptr = self.builder.buildBitCast(ptr, int_elem_ty.pointerType(0), ""); - const containing_int = self.builder.buildLoad(int_elem_ty, int_ptr, ""); + const containing_int = self.builder.buildLoad(int_elem_ty, ptr, ""); containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); @@ -9894,8 +9734,7 @@ pub const FuncGen = struct { const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); - const bitcasted_ptr = self.builder.buildBitCast(result_ptr, same_size_int.pointerType(0), ""); - const store_inst = self.builder.buildStore(truncated_int, bitcasted_ptr); + const store_inst = self.builder.buildStore(truncated_int, result_ptr); store_inst.setAlignment(result_align); return result_ptr; } @@ -9933,7 +9772,7 @@ pub const FuncGen = struct { assert(info.vector_index != .runtime); if (info.vector_index != .none) { - const index_u32 = self.dg.context.intType(32).constInt(@enumToInt(info.vector_index), .False); + const index_u32 = self.context.intType(32).constInt(@enumToInt(info.vector_index), .False); const vec_elem_ty = try self.dg.lowerType(elem_ty); const vec_ty = vec_elem_ty.vectorType(info.host_size); @@ -9952,8 +9791,7 @@ pub const FuncGen = struct { if (info.host_size != 0) { const int_elem_ty = self.context.intType(info.host_size * 8); - const int_ptr = self.builder.buildBitCast(ptr, int_elem_ty.pointerType(0), ""); - const containing_int = self.builder.buildLoad(int_elem_ty, int_ptr, ""); + const containing_int = self.builder.buildLoad(int_elem_ty, ptr, ""); assert(ordering == .NotAtomic); containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); @@ -9978,7 +9816,7 @@ pub const FuncGen = struct { const shifted_value = self.builder.buildShl(extended_value, shift_amt, ""); const ored_value = self.builder.buildOr(shifted_value, anded_containing_int, ""); - const store_inst = self.builder.buildStore(ored_value, int_ptr); + const store_inst = self.builder.buildStore(ored_value, ptr); assert(ordering == .NotAtomic); store_inst.setAlignment(ptr_alignment); store_inst.setVolatile(ptr_volatile); @@ -9992,12 +9830,11 @@ pub const FuncGen = struct { return; } assert(ordering == .NotAtomic); - const llvm_ptr_u8 = self.context.intType(8).pointerType(0); const size_bytes = elem_ty.abiSize(target); _ = self.builder.buildMemCpy( - self.builder.buildBitCast(ptr, llvm_ptr_u8, ""), + ptr, ptr_alignment, - self.builder.buildBitCast(elem, llvm_ptr_u8, ""), + elem, elem_ty.abiAlignment(target), self.context.intType(Type.usize.intInfo(target).bits).constInt(size_bytes, .False), info.@"volatile", @@ -11187,6 +11024,7 @@ fn compilerRtIntBits(bits: u16) u16 { } fn buildAllocaInner( + context: *llvm.Context, builder: *llvm.Builder, llvm_func: *llvm.Value, di_scope_non_null: bool, @@ -11224,7 +11062,7 @@ fn buildAllocaInner( // The pointer returned from this function should have the generic address space, // if this isn't the case then cast it to the generic address space. if (address_space != llvm.address_space.default) { - return builder.buildAddrSpaceCast(alloca, llvm_ty.pointerType(llvm.address_space.default), ""); + return builder.buildAddrSpaceCast(alloca, context.pointerType(llvm.address_space.default), ""); } return alloca; |
