aboutsummaryrefslogtreecommitdiff
path: root/src/codegen/llvm.zig
diff options
context:
space:
mode:
Diffstat (limited to 'src/codegen/llvm.zig')
-rw-r--r--src/codegen/llvm.zig86
1 files changed, 81 insertions, 5 deletions
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 18237efe65..233ec21ac1 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -2724,6 +2724,9 @@ pub const DeclGen = struct {
if (comp.bin_file.options.llvm_cpu_features) |s| {
llvm_fn.addFunctionAttr("target-features", s);
}
+ if (comp.getTarget().cpu.arch.isBpf()) {
+ llvm_fn.addFunctionAttr("no-builtins", "");
+ }
}
fn resolveGlobalDecl(dg: *DeclGen, decl_index: Module.Decl.Index) Error!*llvm.Value {
@@ -3402,7 +3405,7 @@ pub const DeclGen = struct {
};
return dg.context.constStruct(&fields, fields.len, .False);
},
- .int_u64, .one, .int_big_positive => {
+ .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => {
const llvm_usize = try dg.lowerType(Type.usize);
const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(target), .False);
return llvm_int.constIntToPtr(try dg.lowerType(tv.ty));
@@ -3820,6 +3823,8 @@ pub const DeclGen = struct {
const field_ty = union_obj.fields.values()[field_index].ty;
if (union_obj.layout == .Packed) {
+ if (!field_ty.hasRuntimeBits())
+ return llvm_union_ty.constNull();
const non_int_val = try lowerValue(dg, .{ .ty = field_ty, .val = tag_and_val.val });
const ty_bit_size = @intCast(u16, field_ty.bitSize(target));
const small_int_ty = dg.context.intType(ty_bit_size);
@@ -4745,6 +4750,10 @@ pub const FuncGen = struct {
.c_va_copy => try self.airCVaCopy(inst),
.c_va_end => try self.airCVaEnd(inst),
.c_va_start => try self.airCVaStart(inst),
+
+ .work_item_id => try self.airWorkItemId(inst),
+ .work_group_size => try self.airWorkGroupSize(inst),
+ .work_group_id => try self.airWorkGroupId(inst),
// zig fmt: on
};
if (opt_value) |val| {
@@ -5561,7 +5570,7 @@ pub const FuncGen = struct {
return fg.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(target), false);
}
- const load_inst = fg.builder.buildLoad(payload_ptr.getGEPResultElementType(), payload_ptr, "");
+ const load_inst = fg.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, "");
load_inst.setAlignment(payload_ty.abiAlignment(target));
return load_inst;
}
@@ -6795,7 +6804,7 @@ pub const FuncGen = struct {
return self.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(target), false);
}
- const load_inst = self.builder.buildLoad(payload_ptr.getGEPResultElementType(), payload_ptr, "");
+ const load_inst = self.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, "");
load_inst.setAlignment(payload_ty.abiAlignment(target));
return load_inst;
}
@@ -8212,7 +8221,6 @@ pub const FuncGen = struct {
const dest_ptr = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.air.typeOf(bin_op.lhs);
const operand_ty = ptr_ty.childType();
- if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return null;
// TODO Sema should emit a different instruction when the store should
// possibly do the safety 0xaa bytes for undefined.
@@ -8583,7 +8591,7 @@ pub const FuncGen = struct {
}
const tag_index = @boolToInt(layout.tag_align < layout.payload_align);
const tag_field_ptr = self.builder.buildStructGEP(llvm_un_ty, union_handle, tag_index, "");
- return self.builder.buildLoad(tag_field_ptr.getGEPResultElementType(), tag_field_ptr, "");
+ return self.builder.buildLoad(llvm_un_ty.structGetTypeAtIndex(tag_index), tag_field_ptr, "");
} else {
if (layout.payload_size == 0) {
return union_handle;
@@ -9567,6 +9575,74 @@ pub const FuncGen = struct {
return self.builder.buildAddrSpaceCast(operand, llvm_dest_ty, "");
}
+ fn amdgcnWorkIntrinsic(self: *FuncGen, dimension: u32, default: u32, comptime basename: []const u8) !?*llvm.Value {
+ const llvm_u32 = self.context.intType(32);
+
+ const llvm_fn_name = switch (dimension) {
+ 0 => basename ++ ".x",
+ 1 => basename ++ ".y",
+ 2 => basename ++ ".z",
+ else => return llvm_u32.constInt(default, .False),
+ };
+
+ const args: [0]*llvm.Value = .{};
+ const llvm_fn = self.getIntrinsic(llvm_fn_name, &.{});
+ return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
+ }
+
+ fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const target = self.dg.module.getTarget();
+ assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
+
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const dimension = pl_op.payload;
+ return self.amdgcnWorkIntrinsic(dimension, 0, "llvm.amdgcn.workitem.id");
+ }
+
+ fn airWorkGroupSize(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const target = self.dg.module.getTarget();
+ assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
+
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const dimension = pl_op.payload;
+ const llvm_u32 = self.context.intType(32);
+ if (dimension >= 3) {
+ return llvm_u32.constInt(1, .False);
+ }
+
+ // Fetch the dispatch pointer, which points to this structure:
+ // https://github.com/RadeonOpenCompute/ROCR-Runtime/blob/adae6c61e10d371f7cbc3d0e94ae2c070cab18a4/src/inc/hsa.h#L2913
+ const llvm_fn = self.getIntrinsic("llvm.amdgcn.dispatch.ptr", &.{});
+ const args: [0]*llvm.Value = .{};
+ const dispatch_ptr = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
+ dispatch_ptr.setAlignment(4);
+
+ // Load the work_group_* member from the struct as u16.
+ // Just treat the dispatch pointer as an array of u16 to keep things simple.
+ const offset = 2 + dimension;
+ const index = [_]*llvm.Value{llvm_u32.constInt(offset, .False)};
+ const llvm_u16 = self.context.intType(16);
+ const workgroup_size_ptr = self.builder.buildInBoundsGEP(llvm_u16, dispatch_ptr, &index, index.len, "");
+ const workgroup_size = self.builder.buildLoad(llvm_u16, workgroup_size_ptr, "");
+ workgroup_size.setAlignment(2);
+ return workgroup_size;
+ }
+
+ fn airWorkGroupId(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const target = self.dg.module.getTarget();
+ assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
+
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const dimension = pl_op.payload;
+ return self.amdgcnWorkIntrinsic(dimension, 0, "llvm.amdgcn.workgroup.id");
+ }
+
fn getErrorNameTable(self: *FuncGen) !*llvm.Value {
if (self.dg.object.error_name_table) |table| {
return table;