aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorRobin Voetter <robin@voetter.nl>2025-03-18 22:31:57 +0100
committerGitHub <noreply@github.com>2025-03-18 22:31:57 +0100
commit5105c3c7fa46969dc731b7447415436fd7572e87 (patch)
treef3bdcc2b4f9193476cfc738a25b6d8869908999a /src
parent074dd4d083b8ddefc370425568b61c890efe905d (diff)
parentee06b2ce760d927b62726de1e2e3cb33b48d4932 (diff)
downloadzig-5105c3c7fa46969dc731b7447415436fd7572e87.tar.gz
zig-5105c3c7fa46969dc731b7447415436fd7572e87.zip
Merge pull request #23158 from alichraghi/ali_spirv
spirv: miscellaneous stuff #2
Diffstat (limited to 'src')
-rw-r--r--src/Sema.zig77
-rw-r--r--src/codegen/spirv.zig267
-rw-r--r--src/codegen/spirv/Assembler.zig56
-rw-r--r--src/codegen/spirv/Module.zig29
4 files changed, 310 insertions, 119 deletions
diff --git a/src/Sema.zig b/src/Sema.zig
index cd1711c8b7..99e23e6f5a 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -3648,7 +3648,7 @@ fn indexablePtrLen(
const object_ty = sema.typeOf(object);
const is_pointer_to = object_ty.isSinglePointer(zcu);
const indexable_ty = if (is_pointer_to) object_ty.childType(zcu) else object_ty;
- try checkIndexable(sema, block, src, indexable_ty);
+ try sema.checkIndexable(block, src, indexable_ty);
const field_name = try zcu.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls);
return sema.fieldVal(block, src, object, field_name, src);
}
@@ -10103,6 +10103,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
try sema.requireRuntimeBlock(block, block.nodeOffset(inst_data.src_node), ptr_src);
try sema.validateRuntimeValue(block, ptr_src, operand);
+ try sema.checkLogicalPtrOperation(block, ptr_src, ptr_ty);
if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) {
return block.addBitCast(dest_ty, operand);
}
@@ -16389,6 +16390,8 @@ fn analyzeArithmetic(
};
try sema.requireRuntimeBlock(block, src, runtime_src);
+ try sema.checkLogicalPtrOperation(block, src, lhs_ty);
+ try sema.checkLogicalPtrOperation(block, src, rhs_ty);
const lhs_int = try block.addBitCast(.usize, lhs);
const rhs_int = try block.addBitCast(.usize, rhs);
const address = try block.addBinOp(.sub_wrap, lhs_int, rhs_int);
@@ -16620,24 +16623,7 @@ fn analyzePtrArithmetic(
};
try sema.requireRuntimeBlock(block, op_src, runtime_src);
-
- const target = zcu.getTarget();
- if (target_util.arePointersLogical(target, ptr_info.flags.address_space)) {
- return sema.failWithOwnedErrorMsg(block, msg: {
- const msg = try sema.errMsg(op_src, "illegal pointer arithmetic on pointer of type '{}'", .{ptr_ty.fmt(pt)});
- errdefer msg.destroy(sema.gpa);
-
- const backend = target_util.zigBackend(target, zcu.comp.config.use_llvm);
- try sema.errNote(op_src, msg, "arithmetic cannot be performed on pointers with address space '{s}' on target {s}-{s} by compiler backend {s}", .{
- @tagName(ptr_info.flags.address_space),
- target.cpu.arch.genericName(),
- @tagName(target.os.tag),
- @tagName(backend),
- });
-
- break :msg msg;
- });
- }
+ try sema.checkLogicalPtrOperation(block, op_src, ptr_ty);
return block.addInst(.{
.tag = air_tag,
@@ -22501,6 +22487,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
});
}
try sema.requireRuntimeBlock(block, src, operand_src);
+ try sema.checkLogicalPtrOperation(block, src, ptr_ty);
if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) {
if (block.wantSafety() and (try elem_ty.hasRuntimeBitsSema(pt) or elem_ty.zigTypeTag(zcu) == .@"fn")) {
if (!ptr_ty.isAllowzeroPtr(zcu)) {
@@ -23165,8 +23152,9 @@ fn ptrCastFull(
try sema.validateRuntimeValue(block, operand_src, operand);
- const need_null_check = block.wantSafety() and operand_ty.ptrAllowsZero(zcu) and !dest_ty.ptrAllowsZero(zcu);
- const need_align_check = block.wantSafety() and dest_align.compare(.gt, src_align);
+ const can_cast_to_int = !target_util.arePointersLogical(zcu.getTarget(), operand_ty.ptrAddressSpace(zcu));
+ const need_null_check = can_cast_to_int and block.wantSafety() and operand_ty.ptrAllowsZero(zcu) and !dest_ty.ptrAllowsZero(zcu);
+ const need_align_check = can_cast_to_int and block.wantSafety() and dest_align.compare(.gt, src_align);
// `operand` might be a slice. If `need_operand_ptr`, we'll populate `operand_ptr` with the raw pointer.
const need_operand_ptr = src_info.flags.size != .slice or // we already have it
@@ -23832,6 +23820,32 @@ fn checkPtrType(
return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(pt)});
}
+fn checkLogicalPtrOperation(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
+ const pt = sema.pt;
+ const zcu = pt.zcu;
+ if (zcu.intern_pool.indexToKey(ty.toIntern()) == .ptr_type) {
+ const target = zcu.getTarget();
+ const as = ty.ptrAddressSpace(zcu);
+ if (target_util.arePointersLogical(target, as)) {
+ return sema.failWithOwnedErrorMsg(block, msg: {
+ const msg = try sema.errMsg(src, "illegal operation on logical pointer of type '{}'", .{ty.fmt(pt)});
+ errdefer msg.destroy(sema.gpa);
+ try sema.errNote(
+ src,
+ msg,
+ "cannot perform arithmetic on pointers with address space '{s}' on target {s}-{s}",
+ .{
+ @tagName(as),
+ target.cpu.arch.genericName(),
+ @tagName(target.os.tag),
+ },
+ );
+ break :msg msg;
+ });
+ }
+ }
+}
+
fn checkVectorElemType(
sema: *Sema,
block: *Block,
@@ -28326,7 +28340,7 @@ fn elemPtr(
.pointer => indexable_ptr_ty.childType(zcu),
else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(pt)}),
};
- try checkIndexable(sema, block, src, indexable_ty);
+ try sema.checkIndexable(block, src, indexable_ty);
const elem_ptr = switch (indexable_ty.zigTypeTag(zcu)) {
.array, .vector => try sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety),
@@ -28362,7 +28376,7 @@ fn elemPtrOneLayerOnly(
const pt = sema.pt;
const zcu = pt.zcu;
- try checkIndexable(sema, block, src, indexable_ty);
+ try sema.checkIndexable(block, src, indexable_ty);
switch (indexable_ty.ptrSize(zcu)) {
.slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
@@ -28376,6 +28390,8 @@ fn elemPtrOneLayerOnly(
const elem_ptr = try ptr_val.ptrElem(index, pt);
return Air.internedToRef(elem_ptr.toIntern());
}
+
+ try sema.checkLogicalPtrOperation(block, src, indexable_ty);
const result_ty = try indexable_ty.elemPtrType(null, pt);
return block.addPtrElemPtr(indexable, elem_index, result_ty);
@@ -28412,7 +28428,7 @@ fn elemVal(
const pt = sema.pt;
const zcu = pt.zcu;
- try checkIndexable(sema, block, src, indexable_ty);
+ try sema.checkIndexable(block, src, indexable_ty);
// TODO in case of a vector of pointers, we need to detect whether the element
// index is a scalar or vector instead of unconditionally casting to usize.
@@ -28438,6 +28454,7 @@ fn elemVal(
return Air.internedToRef((try pt.getCoerced(elem_val, elem_ty)).toIntern());
}
+ try sema.checkLogicalPtrOperation(block, src, indexable_ty);
return block.addBinOp(.ptr_elem_val, indexable, elem_index);
},
.one => {
@@ -28477,6 +28494,9 @@ fn validateRuntimeElemAccess(
parent_ty: Type,
parent_src: LazySrcLoc,
) CompileError!void {
+ const pt = sema.pt;
+ const zcu = pt.zcu;
+
if (try elem_ty.comptimeOnlySema(sema.pt)) {
const msg = msg: {
const msg = try sema.errMsg(
@@ -28492,6 +28512,14 @@ fn validateRuntimeElemAccess(
};
return sema.failWithOwnedErrorMsg(block, msg);
}
+
+ if (zcu.intern_pool.indexToKey(parent_ty.toIntern()) == .ptr_type) {
+ const target = zcu.getTarget();
+ const as = parent_ty.ptrAddressSpace(zcu);
+ if (target_util.arePointersLogical(target, as)) {
+ return sema.fail(block, elem_index_src, "cannot access element of logical pointer '{}'", .{parent_ty.fmt(pt)});
+ }
+ }
}
fn tupleFieldPtr(
@@ -31158,6 +31186,7 @@ fn coerceCompatiblePtrs(
if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(zcu) and
(try dest_ty.elemType2(zcu).hasRuntimeBitsSema(pt) or dest_ty.elemType2(zcu).zigTypeTag(zcu) == .@"fn"))
{
+ try sema.checkLogicalPtrOperation(block, inst_src, inst_ty);
const actual_ptr = if (inst_ty.isSlice(zcu))
try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty)
else
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index 22aa9e7df0..c361090b51 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -464,7 +464,7 @@ const NavGen = struct {
const zcu = self.pt.zcu;
const ty = Type.fromInterned(zcu.intern_pool.typeOf(val));
- const decl_ptr_ty_id = try self.ptrType(ty, .Generic, .indirect);
+ const decl_ptr_ty_id = try self.ptrType(ty, self.spvStorageClass(.generic), .indirect);
const spv_decl_index = blk: {
const entry = try self.object.uav_link.getOrPut(self.object.gpa, .{ val, .Function });
@@ -581,18 +581,18 @@ const NavGen = struct {
/// that size. In this case, multiple elements of the largest type should be used.
/// The backing type will be chosen as the smallest supported integer larger or equal to it in number of bits.
/// The result is valid to be used with OpTypeInt.
- /// TODO: The extension SPV_INTEL_arbitrary_precision_integers allows any integer size (at least up to 32 bits).
- /// TODO: This probably needs an ABI-version as well (especially in combination with SPV_INTEL_arbitrary_precision_integers).
/// TODO: Should the result of this function be cached?
fn backingIntBits(self: *NavGen, bits: u16) ?u16 {
// The backend will never be asked to compiler a 0-bit integer, so we won't have to handle those in this function.
assert(bits != 0);
- // 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively.
+ if (self.spv.hasFeature(.arbitrary_precision_integers) and bits <= 32) return bits;
+
+ // We require Int8 and Int16 capabilities and benefit Int64 when available.
// 32-bit integers are always supported (see spec, 2.16.1, Data rules).
const ints = [_]struct { bits: u16, feature: ?Target.spirv.Feature }{
- .{ .bits = 8, .feature = .int8 },
- .{ .bits = 16, .feature = .int16 },
+ .{ .bits = 8, .feature = null },
+ .{ .bits = 16, .feature = null },
.{ .bits = 32, .feature = null },
.{ .bits = 64, .feature = .int64 },
};
@@ -714,6 +714,7 @@ const NavGen = struct {
const int_info = scalar_ty.intInfo(zcu);
// Use backing bits so that negatives are sign extended
const backing_bits = self.backingIntBits(int_info.bits).?; // Assertion failure means big int
+ assert(backing_bits != 0); // u0 is comptime
const signedness: Signedness = switch (@typeInfo(@TypeOf(value))) {
.int => |int| int.signedness,
@@ -721,35 +722,35 @@ const NavGen = struct {
else => unreachable,
};
- const value64: u64 = switch (signedness) {
- .signed => @bitCast(@as(i64, @intCast(value))),
- .unsigned => @as(u64, @intCast(value)),
- };
+ const final_value: spec.LiteralContextDependentNumber = blk: {
+ if (self.spv.hasFeature(.kernel)) {
+ const value64: u64 = switch (signedness) {
+ .signed => @bitCast(@as(i64, @intCast(value))),
+ .unsigned => @as(u64, @intCast(value)),
+ };
- // Manually truncate the value to the right amount of bits.
- const truncated_value = if (backing_bits == 64)
- value64
- else
- value64 & (@as(u64, 1) << @intCast(backing_bits)) - 1;
+ // Manually truncate the value to the right amount of bits.
+ const truncated_value = if (backing_bits == 64)
+ value64
+ else
+ value64 & (@as(u64, 1) << @intCast(backing_bits)) - 1;
- const result_ty_id = try self.resolveType(scalar_ty, .indirect);
- const result_id = self.spv.allocId();
+ break :blk switch (backing_bits) {
+ 1...32 => .{ .uint32 = @truncate(truncated_value) },
+ 33...64 => .{ .uint64 = truncated_value },
+ else => unreachable, // TODO: Large integer constants
+ };
+ }
- const section = &self.spv.sections.types_globals_constants;
- switch (backing_bits) {
- 0 => unreachable, // u0 is comptime
- 1...32 => try section.emit(self.spv.gpa, .OpConstant, .{
- .id_result_type = result_ty_id,
- .id_result = result_id,
- .value = .{ .uint32 = @truncate(truncated_value) },
- }),
- 33...64 => try section.emit(self.spv.gpa, .OpConstant, .{
- .id_result_type = result_ty_id,
- .id_result = result_id,
- .value = .{ .uint64 = truncated_value },
- }),
- else => unreachable, // TODO: Large integer constants
- }
+ break :blk switch (backing_bits) {
+ 1...32 => if (signedness == .signed) .{ .int32 = @intCast(value) } else .{ .uint32 = @intCast(value) },
+ 33...64 => if (signedness == .signed) .{ .int64 = value } else .{ .uint64 = value },
+ else => unreachable, // TODO: Large integer constants
+ };
+ };
+
+ const result_ty_id = try self.resolveType(scalar_ty, .indirect);
+ const result_id = try self.spv.constant(result_ty_id, final_value);
if (!ty.isVector(zcu)) return result_id;
return self.constructCompositeSplat(ty, result_id);
@@ -804,8 +805,6 @@ const NavGen = struct {
return self.spv.constUndef(result_ty_id);
}
- const section = &self.spv.sections.types_globals_constants;
-
const cacheable_id = cache: {
switch (ip.indexToKey(val.toIntern())) {
.int_type,
@@ -860,13 +859,7 @@ const NavGen = struct {
80, 128 => unreachable, // TODO
else => unreachable,
};
- const result_id = self.spv.allocId();
- try section.emit(self.spv.gpa, .OpConstant, .{
- .id_result_type = result_ty_id,
- .id_result = result_id,
- .value = lit,
- });
- break :cache result_id;
+ break :cache try self.spv.constant(result_ty_id, lit);
},
.err => |err| {
const value = try pt.getErrorValue(err.name);
@@ -989,8 +982,17 @@ const NavGen = struct {
},
.struct_type => {
const struct_type = zcu.typeToStruct(ty).?;
+
if (struct_type.layout == .@"packed") {
- return self.todo("packed struct constants", .{});
+ // TODO: composite int
+ // TODO: endianness
+ const bits: u16 = @intCast(ty.bitSize(zcu));
+ const bytes = std.mem.alignForward(u16, self.backingIntBits(bits).?, 8) / 8;
+ var limbs: [8]u8 = undefined;
+ @memset(&limbs, 0);
+ val.writeToPackedMemory(ty, pt, limbs[0..bytes], 0) catch unreachable;
+ const backing_ty = Type.fromInterned(struct_type.backingIntTypeUnordered(ip));
+ return try self.constInt(backing_ty, @as(u64, @bitCast(limbs)));
}
var types = std.ArrayList(Type).init(self.gpa);
@@ -1022,6 +1024,11 @@ const NavGen = struct {
else => unreachable,
},
.un => |un| {
+ if (un.tag == .none) {
+ assert(ty.containerLayout(zcu) == .@"packed"); // TODO
+ const int_ty = try pt.intType(.unsigned, @intCast(ty.bitSize(zcu)));
+ return try self.constant(int_ty, Value.fromInterned(un.val), .direct);
+ }
const active_field = ty.unionTagFieldIndex(Value.fromInterned(un.tag), zcu).?;
const union_obj = zcu.typeToUnion(ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[active_field]);
@@ -1354,7 +1361,7 @@ const NavGen = struct {
const union_obj = zcu.typeToUnion(ty).?;
if (union_obj.flagsUnordered(ip).layout == .@"packed") {
- return self.todo("packed union types", .{});
+ return try self.intType(.unsigned, @intCast(ty.bitSize(zcu)));
}
const layout = self.unionLayout(ty);
@@ -1366,7 +1373,7 @@ const NavGen = struct {
var member_types: [4]IdRef = undefined;
var member_names: [4][]const u8 = undefined;
- const u8_ty_id = try self.resolveType(Type.u8, .direct); // TODO: What if Int8Type is not enabled?
+ const u8_ty_id = try self.resolveType(Type.u8, .direct);
if (layout.tag_size != 0) {
const tag_ty_id = try self.resolveType(Type.fromInterned(union_obj.enum_tag_ty), .indirect);
@@ -2821,6 +2828,7 @@ const NavGen = struct {
/// TODO is to also write out the error as a function call parameter, and to somehow fetch
/// the name of an error in the text executor.
fn generateTestEntryPoint(self: *NavGen, name: []const u8, spv_test_decl_index: SpvModule.Decl.Index) !void {
+ const zcu = self.pt.zcu;
const target = self.spv.target;
const anyerror_ty_id = try self.resolveType(Type.anyerror, .direct);
@@ -2950,7 +2958,7 @@ const NavGen = struct {
.pointer = p_error_id,
.object = error_id,
.memory_access = .{
- .Aligned = .{ .literal_integer = @sizeOf(u16) },
+ .Aligned = .{ .literal_integer = @intCast(Type.abiAlignment(.anyerror, zcu).toByteUnits().?) },
},
});
try section.emit(self.spv.gpa, .OpReturn, {});
@@ -3223,10 +3231,13 @@ const NavGen = struct {
};
fn load(self: *NavGen, value_ty: Type, ptr_id: IdRef, options: MemoryOptions) !IdRef {
+ const zcu = self.pt.zcu;
+ const alignment: u32 = @intCast(value_ty.abiAlignment(zcu).toByteUnits().?);
const indirect_value_ty_id = try self.resolveType(value_ty, .indirect);
const result_id = self.spv.allocId();
const access = spec.MemoryAccess.Extended{
.Volatile = options.is_volatile,
+ .Aligned = .{ .literal_integer = alignment },
};
try self.func.body.emit(self.spv.gpa, .OpLoad, .{
.id_result_type = indirect_value_ty_id,
@@ -4229,7 +4240,7 @@ const NavGen = struct {
defer self.gpa.free(ids);
const result_id = self.spv.allocId();
- if (self.spv.hasFeature(.kernel)) {
+ if (self.spv.hasFeature(.addresses)) {
try self.func.body.emit(self.spv.gpa, .OpInBoundsPtrAccessChain, .{
.id_result_type = result_ty_id,
.id_result = result_id,
@@ -4308,6 +4319,7 @@ const NavGen = struct {
) !Temporary {
const pt = self.pt;
const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const scalar_ty = lhs.ty.scalarType(zcu);
const is_vector = lhs.ty.isVector(zcu);
@@ -4318,6 +4330,11 @@ const NavGen = struct {
const ty = lhs.ty.intTagType(zcu);
return try self.cmp(op, lhs.pun(ty), rhs.pun(ty));
},
+ .@"struct" => {
+ const struct_ty = zcu.typeToPackedStruct(scalar_ty).?;
+ const ty = Type.fromInterned(struct_ty.backingIntTypeUnordered(ip));
+ return try self.cmp(op, lhs.pun(ty), rhs.pun(ty));
+ },
.error_set => {
assert(!is_vector);
const err_int_ty = try pt.errorIntType();
@@ -4745,8 +4762,42 @@ const NavGen = struct {
switch (result_ty.zigTypeTag(zcu)) {
.@"struct" => {
if (zcu.typeToPackedStruct(result_ty)) |struct_type| {
- _ = struct_type;
- unreachable; // TODO
+ comptime assert(Type.packed_struct_layout_version == 2);
+ const backing_int_ty = Type.fromInterned(struct_type.backingIntTypeUnordered(ip));
+ var running_int_id = try self.constInt(backing_int_ty, 0);
+ var running_bits: u16 = 0;
+ for (struct_type.field_types.get(ip), elements) |field_ty_ip, element| {
+ const field_ty = Type.fromInterned(field_ty_ip);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ const field_id = try self.resolve(element);
+ const ty_bit_size: u16 = @intCast(field_ty.bitSize(zcu));
+ const field_int_ty = try self.pt.intType(.unsigned, ty_bit_size);
+ const field_int_id = blk: {
+ if (field_ty.isPtrAtRuntime(zcu)) {
+ assert(self.spv.hasFeature(.addresses) or
+ (self.spv.hasFeature(.physical_storage_buffer) and field_ty.ptrAddressSpace(zcu) == .storage_buffer));
+ break :blk try self.intFromPtr(field_id);
+ }
+ break :blk try self.bitCast(field_int_ty, field_ty, field_id);
+ };
+ const shift_rhs = try self.constInt(backing_int_ty, running_bits);
+ const extended_int_conv = try self.buildIntConvert(backing_int_ty, .{
+ .ty = field_int_ty,
+ .value = .{ .singleton = field_int_id },
+ });
+ const shifted = try self.buildBinary(.sll, extended_int_conv, .{
+ .ty = backing_int_ty,
+ .value = .{ .singleton = shift_rhs },
+ });
+ const running_int_tmp = try self.buildBinary(
+ .bit_or,
+ .{ .ty = backing_int_ty, .value = .{ .singleton = running_int_id } },
+ shifted,
+ );
+ running_int_id = try running_int_tmp.materialize(self);
+ running_bits += ty_bit_size;
+ }
+ return running_int_id;
}
const types = try self.gpa.alloc(Type, elements.len);
@@ -5087,11 +5138,33 @@ const NavGen = struct {
const union_ty = zcu.typeToUnion(ty).?;
const tag_ty = Type.fromInterned(union_ty.enum_tag_ty);
+ const layout = self.unionLayout(ty);
+ const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]);
+
if (union_ty.flagsUnordered(ip).layout == .@"packed") {
- unreachable; // TODO
- }
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ const int_ty = try pt.intType(.unsigned, @intCast(ty.bitSize(zcu)));
+ return self.constInt(int_ty, 0);
+ }
- const layout = self.unionLayout(ty);
+ assert(payload != null);
+ if (payload_ty.isInt(zcu)) {
+ if (ty.bitSize(zcu) == payload_ty.bitSize(zcu)) {
+ return self.bitCast(ty, payload_ty, payload.?);
+ }
+
+ const trunc = try self.buildIntConvert(ty, .{ .ty = payload_ty, .value = .{ .singleton = payload.? } });
+ return try trunc.materialize(self);
+ }
+
+ const payload_int_ty = try pt.intType(.unsigned, @intCast(payload_ty.bitSize(zcu)));
+ const payload_int = if (payload_ty.ip_index == .bool_type)
+ try self.convertToIndirect(payload_ty, payload.?)
+ else
+ try self.bitCast(payload_int_ty, payload_ty, payload.?);
+ const trunc = try self.buildIntConvert(ty, .{ .ty = payload_int_ty, .value = .{ .singleton = payload_int } });
+ return try trunc.materialize(self);
+ }
const tag_int = if (layout.tag_size != 0) blk: {
const tag_val = try pt.enumValueFieldIndex(tag_ty, active_field);
@@ -5112,7 +5185,6 @@ const NavGen = struct {
try self.store(tag_ty, ptr_id, tag_id, .{});
}
- const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]);
if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, .Function, .indirect);
const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, tmp_id, &.{layout.payload_index});
@@ -5167,11 +5239,51 @@ const NavGen = struct {
switch (object_ty.zigTypeTag(zcu)) {
.@"struct" => switch (object_ty.containerLayout(zcu)) {
- .@"packed" => unreachable, // TODO
+ .@"packed" => {
+ const struct_ty = zcu.typeToPackedStruct(object_ty).?;
+ const bit_offset = pt.structPackedFieldBitOffset(struct_ty, field_index);
+ const bit_offset_id = try self.constInt(.u16, bit_offset);
+ const signedness = if (field_ty.isInt(zcu)) field_ty.intInfo(zcu).signedness else .unsigned;
+ const field_bit_size: u16 = @intCast(field_ty.bitSize(zcu));
+ const field_int_ty = try pt.intType(signedness, field_bit_size);
+ const shift_lhs: Temporary = .{ .ty = object_ty, .value = .{ .singleton = object_id } };
+ const shift = try self.buildBinary(.srl, shift_lhs, .{ .ty = .u16, .value = .{ .singleton = bit_offset_id } });
+ const mask_id = try self.constInt(object_ty, (@as(u64, 1) << @as(u6, @intCast(field_bit_size))) - 1);
+ const masked = try self.buildBinary(.bit_and, shift, .{ .ty = object_ty, .value = .{ .singleton = mask_id } });
+ const result_id = blk: {
+ if (self.backingIntBits(field_bit_size).? == self.backingIntBits(@intCast(object_ty.bitSize(zcu))).?)
+ break :blk try self.bitCast(field_int_ty, object_ty, try masked.materialize(self));
+ const trunc = try self.buildIntConvert(field_int_ty, masked);
+ break :blk try trunc.materialize(self);
+ };
+ if (field_ty.ip_index == .bool_type) return try self.convertToDirect(.bool, result_id);
+ if (field_ty.isInt(zcu)) return result_id;
+ return try self.bitCast(field_ty, field_int_ty, result_id);
+ },
else => return try self.extractField(field_ty, object_id, field_index),
},
.@"union" => switch (object_ty.containerLayout(zcu)) {
- .@"packed" => unreachable, // TODO
+ .@"packed" => {
+ const backing_int_ty = try pt.intType(.unsigned, @intCast(object_ty.bitSize(zcu)));
+ const signedness = if (field_ty.isInt(zcu)) field_ty.intInfo(zcu).signedness else .unsigned;
+ const field_bit_size: u16 = @intCast(field_ty.bitSize(zcu));
+ const int_ty = try pt.intType(signedness, field_bit_size);
+ const mask_id = try self.constInt(backing_int_ty, (@as(u64, 1) << @as(u6, @intCast(field_bit_size))) - 1);
+ const masked = try self.buildBinary(
+ .bit_and,
+ .{ .ty = backing_int_ty, .value = .{ .singleton = object_id } },
+ .{ .ty = backing_int_ty, .value = .{ .singleton = mask_id } },
+ );
+ const result_id = blk: {
+ if (self.backingIntBits(field_bit_size).? == self.backingIntBits(@intCast(backing_int_ty.bitSize(zcu))).?)
+ break :blk try self.bitCast(int_ty, backing_int_ty, try masked.materialize(self));
+ const trunc = try self.buildIntConvert(int_ty, masked);
+ break :blk try trunc.materialize(self);
+ };
+ if (field_ty.ip_index == .bool_type) return try self.convertToDirect(.bool, result_id);
+ if (field_ty.isInt(zcu)) return result_id;
+ return try self.bitCast(field_ty, int_ty, result_id);
+ },
else => {
// Store, ptr-elem-ptr, pointer-cast, load
const layout = self.unionLayout(object_ty);
@@ -5252,28 +5364,28 @@ const NavGen = struct {
return try self.accessChain(result_ty_id, object_ptr, &.{field_index});
},
},
- .@"union" => switch (object_ty.containerLayout(zcu)) {
- .@"packed" => return self.todo("implement field access for packed unions", .{}),
- else => {
- const layout = self.unionLayout(object_ty);
- if (!layout.has_payload) {
- // Asked to get a pointer to a zero-sized field. Just lower this
- // to undefined, there is no reason to make it be a valid pointer.
- return try self.spv.constUndef(result_ty_id);
- }
+ .@"union" => {
+ const layout = self.unionLayout(object_ty);
+ if (!layout.has_payload) {
+ // Asked to get a pointer to a zero-sized field. Just lower this
+ // to undefined, there is no reason to make it be a valid pointer.
+ return try self.spv.constUndef(result_ty_id);
+ }
- const storage_class = self.spvStorageClass(object_ptr_ty.ptrAddressSpace(zcu));
- const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, storage_class, .indirect);
- const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, object_ptr, &.{layout.payload_index});
+ const storage_class = self.spvStorageClass(object_ptr_ty.ptrAddressSpace(zcu));
+ const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, storage_class, .indirect);
+ const pl_ptr_id = blk: {
+ if (object_ty.containerLayout(zcu) == .@"packed") break :blk object_ptr;
+ break :blk try self.accessChain(pl_ptr_ty_id, object_ptr, &.{layout.payload_index});
+ };
- const active_pl_ptr_id = self.spv.allocId();
- try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
- .id_result_type = result_ty_id,
- .id_result = active_pl_ptr_id,
- .operand = pl_ptr_id,
- });
- return active_pl_ptr_id;
- },
+ const active_pl_ptr_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
+ .id_result_type = result_ty_id,
+ .id_result = active_pl_ptr_id,
+ .operand = pl_ptr_id,
+ });
+ return active_pl_ptr_id;
},
else => unreachable,
}
@@ -5292,7 +5404,7 @@ const NavGen = struct {
/// The final storage class of the pointer. This may be either `.Generic` or `.Function`.
/// In either case, the local is allocated in the `.Function` storage class, and optionally
/// cast back to `.Generic`.
- storage_class: StorageClass = .Generic,
+ storage_class: StorageClass,
};
// Allocate a function-local variable, with possible initializer.
@@ -5332,9 +5444,10 @@ const NavGen = struct {
fn airAlloc(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const zcu = self.pt.zcu;
const ptr_ty = self.typeOfIndex(inst);
- assert(ptr_ty.ptrAddressSpace(zcu) == .generic);
const child_ty = ptr_ty.childType(zcu);
- return try self.alloc(child_ty, .{});
+ return try self.alloc(child_ty, .{
+ .storage_class = self.spvStorageClass(ptr_ty.ptrAddressSpace(zcu)),
+ });
}
fn airArg(self: *NavGen) IdRef {
diff --git a/src/codegen/spirv/Assembler.zig b/src/codegen/spirv/Assembler.zig
index 25a5481fb5..264613b240 100644
--- a/src/codegen/spirv/Assembler.zig
+++ b/src/codegen/spirv/Assembler.zig
@@ -368,6 +368,40 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
});
break :blk result_id;
},
+ .OpTypeStruct => blk: {
+ const ids = try self.gpa.alloc(IdRef, operands[1..].len);
+ defer self.gpa.free(ids);
+ for (operands[1..], ids) |op, *id| id.* = try self.resolveRefId(op.ref_id);
+ const result_id = self.spv.allocId();
+ try self.spv.structType(result_id, ids, null);
+ break :blk result_id;
+ },
+ .OpTypeImage => blk: {
+ const sampled_type = try self.resolveRefId(operands[1].ref_id);
+ const result_id = self.spv.allocId();
+ try section.emit(self.gpa, .OpTypeImage, .{
+ .id_result = result_id,
+ .sampled_type = sampled_type,
+ .dim = @enumFromInt(operands[2].value),
+ .depth = operands[3].literal32,
+ .arrayed = operands[4].literal32,
+ .ms = operands[5].literal32,
+ .sampled = operands[6].literal32,
+ .image_format = @enumFromInt(operands[7].value),
+ });
+ break :blk result_id;
+ },
+ .OpTypeSampler => blk: {
+ const result_id = self.spv.allocId();
+ try section.emit(self.gpa, .OpTypeSampler, .{ .id_result = result_id });
+ break :blk result_id;
+ },
+ .OpTypeSampledImage => blk: {
+ const image_type = try self.resolveRefId(operands[1].ref_id);
+ const result_id = self.spv.allocId();
+ try section.emit(self.gpa, .OpTypeSampledImage, .{ .id_result = result_id, .image_type = image_type });
+ break :blk result_id;
+ },
.OpTypeFunction => blk: {
const param_operands = operands[2..];
const return_type = try self.resolveRefId(operands[1].ref_id);
@@ -406,18 +440,18 @@ fn processGenericInstruction(self: *Assembler) !?AsmValue {
else => switch (self.inst.opcode) {
.OpEntryPoint => unreachable,
.OpExecutionMode, .OpExecutionModeId => &self.spv.sections.execution_modes,
- .OpVariable => switch (@as(spec.StorageClass, @enumFromInt(operands[2].value))) {
- .Function => &self.func.prologue,
- .Input, .Output => section: {
- maybe_spv_decl_index = try self.spv.allocDecl(.global);
- try self.func.decl_deps.put(self.spv.gpa, maybe_spv_decl_index.?, {});
- // TODO: In theory this can be non-empty if there is an initializer which depends on another global...
- try self.spv.declareDeclDeps(maybe_spv_decl_index.?, &.{});
+ .OpVariable => section: {
+ const storage_class: spec.StorageClass = @enumFromInt(operands[2].value);
+ if (storage_class == .Function) break :section &self.func.prologue;
+ maybe_spv_decl_index = try self.spv.allocDecl(.global);
+ if (self.spv.version.minor < 4 and storage_class != .Input and storage_class != .Output) {
+ // Before version 1.4, the interface’s storage classes are limited to the Input and Output
break :section &self.spv.sections.types_globals_constants;
- },
- // These don't need to be marked in the dependency system.
- // Probably we should add them anyway, then filter out PushConstant globals.
- else => &self.spv.sections.types_globals_constants,
+ }
+ try self.func.decl_deps.put(self.spv.gpa, maybe_spv_decl_index.?, {});
+ // TODO: In theory this can be non-empty if there is an initializer which depends on another global...
+ try self.spv.declareDeclDeps(maybe_spv_decl_index.?, &.{});
+ break :section &self.spv.sections.types_globals_constants;
},
// Default case - to be worked out further.
else => &self.func.body,
diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig
index 5ed1e2df1a..1aa082f6bc 100644
--- a/src/codegen/spirv/Module.zig
+++ b/src/codegen/spirv/Module.zig
@@ -333,8 +333,6 @@ pub fn finalize(self: *Module, a: Allocator) ![]Word {
// Versions
.v1_0, .v1_1, .v1_2, .v1_3, .v1_4, .v1_5, .v1_6 => {},
// Features with no dependencies
- .int8 => try self.addCapability(.Int8),
- .int16 => try self.addCapability(.Int16),
.int64 => try self.addCapability(.Int64),
.float16 => try self.addCapability(.Float16),
.float64 => try self.addCapability(.Float64),
@@ -343,21 +341,27 @@ pub fn finalize(self: *Module, a: Allocator) ![]Word {
try self.addExtension("SPV_KHR_16bit_storage");
try self.addCapability(.StoragePushConstant16);
},
- .addresses => if (self.hasFeature(.shader)) {
- try self.addExtension("SPV_KHR_physical_storage_buffer");
- try self.addCapability(.PhysicalStorageBufferAddresses);
- } else {
- try self.addCapability(.Addresses);
+ .arbitrary_precision_integers => {
+ try self.addExtension("SPV_INTEL_arbitrary_precision_integers");
+ try self.addCapability(.ArbitraryPrecisionIntegersINTEL);
},
+ .addresses => try self.addCapability(.Addresses),
// Kernel
.kernel => try self.addCapability(.Kernel),
.generic_pointer => try self.addCapability(.GenericPointer),
.vector16 => try self.addCapability(.Vector16),
// Shader
.shader => try self.addCapability(.Shader),
+ .physical_storage_buffer => {
+ try self.addExtension("SPV_KHR_physical_storage_buffer");
+ try self.addCapability(.PhysicalStorageBufferAddresses);
+ },
}
}
}
+ // These are well supported
+ try self.addCapability(.Int8);
+ try self.addCapability(.Int16);
// Emit memory model
const addressing_model: spec.AddressingModel = blk: {
@@ -610,6 +614,17 @@ pub fn functionType(self: *Module, return_ty_id: IdRef, param_type_ids: []const
return result_id;
}
+pub fn constant(self: *Module, result_ty_id: IdRef, value: spec.LiteralContextDependentNumber) !IdRef {
+ const result_id = self.allocId();
+ const section = &self.sections.types_globals_constants;
+ try section.emit(self.gpa, .OpConstant, .{
+ .id_result_type = result_ty_id,
+ .id_result = result_id,
+ .value = value,
+ });
+ return result_id;
+}
+
pub fn constBool(self: *Module, value: bool) !IdRef {
if (self.cache.bool_const[@intFromBool(value)]) |b| return b;