From 2fc409a32f18b0c62e3918f0b832ed9e4c8d142d Mon Sep 17 00:00:00 2001 From: Ali Cheraghi Date: Sat, 8 Mar 2025 19:20:59 +0330 Subject: spirv: don't hardcode test error type alignment --- src/codegen/spirv.zig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'src/codegen') diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 22aa9e7df0..aeede443bf 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -2821,6 +2821,7 @@ const NavGen = struct { /// TODO is to also write out the error as a function call parameter, and to somehow fetch /// the name of an error in the text executor. fn generateTestEntryPoint(self: *NavGen, name: []const u8, spv_test_decl_index: SpvModule.Decl.Index) !void { + const zcu = self.pt.zcu; const target = self.spv.target; const anyerror_ty_id = try self.resolveType(Type.anyerror, .direct); @@ -2950,7 +2951,7 @@ const NavGen = struct { .pointer = p_error_id, .object = error_id, .memory_access = .{ - .Aligned = .{ .literal_integer = @sizeOf(u16) }, + .Aligned = .{ .literal_integer = @intCast(Type.abiAlignment(.anyerror, zcu).toByteUnits().?) }, }, }); try section.emit(self.spv.gpa, .OpReturn, {}); -- cgit v1.2.3 From c1977bf0fbe523afb4721bc8346ee6536e3c0aa2 Mon Sep 17 00:00:00 2001 From: Ali Cheraghi Date: Sun, 9 Mar 2025 06:41:56 +0330 Subject: Sema: error on illegal code when targeting spirv --- src/Sema.zig | 77 +++++++++++++++------- src/codegen/spirv.zig | 11 ++-- test/behavior/globals.zig | 4 ++ test/behavior/ptrfromint.zig | 2 + test/behavior/sizeof_and_typeof.zig | 2 + .../illegal_operation_on_logical_ptr.zig | 52 +++++++++++++++ 6 files changed, 119 insertions(+), 29 deletions(-) create mode 100644 test/cases/compile_errors/illegal_operation_on_logical_ptr.zig (limited to 'src/codegen') diff --git a/src/Sema.zig b/src/Sema.zig index cd1711c8b7..99e23e6f5a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3648,7 +3648,7 @@ fn indexablePtrLen( const object_ty = sema.typeOf(object); const is_pointer_to = object_ty.isSinglePointer(zcu); const indexable_ty = if (is_pointer_to) object_ty.childType(zcu) else object_ty; - try checkIndexable(sema, block, src, indexable_ty); + try sema.checkIndexable(block, src, indexable_ty); const field_name = try zcu.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls); return sema.fieldVal(block, src, object, field_name, src); } @@ -10103,6 +10103,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } try sema.requireRuntimeBlock(block, block.nodeOffset(inst_data.src_node), ptr_src); try sema.validateRuntimeValue(block, ptr_src, operand); + try sema.checkLogicalPtrOperation(block, ptr_src, ptr_ty); if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) { return block.addBitCast(dest_ty, operand); } @@ -16389,6 +16390,8 @@ fn analyzeArithmetic( }; try sema.requireRuntimeBlock(block, src, runtime_src); + try sema.checkLogicalPtrOperation(block, src, lhs_ty); + try sema.checkLogicalPtrOperation(block, src, rhs_ty); const lhs_int = try block.addBitCast(.usize, lhs); const rhs_int = try block.addBitCast(.usize, rhs); const address = try block.addBinOp(.sub_wrap, lhs_int, rhs_int); @@ -16620,24 +16623,7 @@ fn analyzePtrArithmetic( }; try sema.requireRuntimeBlock(block, op_src, runtime_src); - - const target = zcu.getTarget(); - if (target_util.arePointersLogical(target, ptr_info.flags.address_space)) { - return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(op_src, "illegal pointer arithmetic on pointer of type '{}'", .{ptr_ty.fmt(pt)}); - errdefer msg.destroy(sema.gpa); - - const backend = target_util.zigBackend(target, zcu.comp.config.use_llvm); - try sema.errNote(op_src, msg, "arithmetic cannot be performed on pointers with address space '{s}' on target {s}-{s} by compiler backend {s}", .{ - @tagName(ptr_info.flags.address_space), - target.cpu.arch.genericName(), - @tagName(target.os.tag), - @tagName(backend), - }); - - break :msg msg; - }); - } + try sema.checkLogicalPtrOperation(block, op_src, ptr_ty); return block.addInst(.{ .tag = air_tag, @@ -22501,6 +22487,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! }); } try sema.requireRuntimeBlock(block, src, operand_src); + try sema.checkLogicalPtrOperation(block, src, ptr_ty); if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) { if (block.wantSafety() and (try elem_ty.hasRuntimeBitsSema(pt) or elem_ty.zigTypeTag(zcu) == .@"fn")) { if (!ptr_ty.isAllowzeroPtr(zcu)) { @@ -23165,8 +23152,9 @@ fn ptrCastFull( try sema.validateRuntimeValue(block, operand_src, operand); - const need_null_check = block.wantSafety() and operand_ty.ptrAllowsZero(zcu) and !dest_ty.ptrAllowsZero(zcu); - const need_align_check = block.wantSafety() and dest_align.compare(.gt, src_align); + const can_cast_to_int = !target_util.arePointersLogical(zcu.getTarget(), operand_ty.ptrAddressSpace(zcu)); + const need_null_check = can_cast_to_int and block.wantSafety() and operand_ty.ptrAllowsZero(zcu) and !dest_ty.ptrAllowsZero(zcu); + const need_align_check = can_cast_to_int and block.wantSafety() and dest_align.compare(.gt, src_align); // `operand` might be a slice. If `need_operand_ptr`, we'll populate `operand_ptr` with the raw pointer. const need_operand_ptr = src_info.flags.size != .slice or // we already have it @@ -23832,6 +23820,32 @@ fn checkPtrType( return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(pt)}); } +fn checkLogicalPtrOperation(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { + const pt = sema.pt; + const zcu = pt.zcu; + if (zcu.intern_pool.indexToKey(ty.toIntern()) == .ptr_type) { + const target = zcu.getTarget(); + const as = ty.ptrAddressSpace(zcu); + if (target_util.arePointersLogical(target, as)) { + return sema.failWithOwnedErrorMsg(block, msg: { + const msg = try sema.errMsg(src, "illegal operation on logical pointer of type '{}'", .{ty.fmt(pt)}); + errdefer msg.destroy(sema.gpa); + try sema.errNote( + src, + msg, + "cannot perform arithmetic on pointers with address space '{s}' on target {s}-{s}", + .{ + @tagName(as), + target.cpu.arch.genericName(), + @tagName(target.os.tag), + }, + ); + break :msg msg; + }); + } + } +} + fn checkVectorElemType( sema: *Sema, block: *Block, @@ -28326,7 +28340,7 @@ fn elemPtr( .pointer => indexable_ptr_ty.childType(zcu), else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(pt)}), }; - try checkIndexable(sema, block, src, indexable_ty); + try sema.checkIndexable(block, src, indexable_ty); const elem_ptr = switch (indexable_ty.zigTypeTag(zcu)) { .array, .vector => try sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety), @@ -28362,7 +28376,7 @@ fn elemPtrOneLayerOnly( const pt = sema.pt; const zcu = pt.zcu; - try checkIndexable(sema, block, src, indexable_ty); + try sema.checkIndexable(block, src, indexable_ty); switch (indexable_ty.ptrSize(zcu)) { .slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), @@ -28376,6 +28390,8 @@ fn elemPtrOneLayerOnly( const elem_ptr = try ptr_val.ptrElem(index, pt); return Air.internedToRef(elem_ptr.toIntern()); } + + try sema.checkLogicalPtrOperation(block, src, indexable_ty); const result_ty = try indexable_ty.elemPtrType(null, pt); return block.addPtrElemPtr(indexable, elem_index, result_ty); @@ -28412,7 +28428,7 @@ fn elemVal( const pt = sema.pt; const zcu = pt.zcu; - try checkIndexable(sema, block, src, indexable_ty); + try sema.checkIndexable(block, src, indexable_ty); // TODO in case of a vector of pointers, we need to detect whether the element // index is a scalar or vector instead of unconditionally casting to usize. @@ -28438,6 +28454,7 @@ fn elemVal( return Air.internedToRef((try pt.getCoerced(elem_val, elem_ty)).toIntern()); } + try sema.checkLogicalPtrOperation(block, src, indexable_ty); return block.addBinOp(.ptr_elem_val, indexable, elem_index); }, .one => { @@ -28477,6 +28494,9 @@ fn validateRuntimeElemAccess( parent_ty: Type, parent_src: LazySrcLoc, ) CompileError!void { + const pt = sema.pt; + const zcu = pt.zcu; + if (try elem_ty.comptimeOnlySema(sema.pt)) { const msg = msg: { const msg = try sema.errMsg( @@ -28492,6 +28512,14 @@ fn validateRuntimeElemAccess( }; return sema.failWithOwnedErrorMsg(block, msg); } + + if (zcu.intern_pool.indexToKey(parent_ty.toIntern()) == .ptr_type) { + const target = zcu.getTarget(); + const as = parent_ty.ptrAddressSpace(zcu); + if (target_util.arePointersLogical(target, as)) { + return sema.fail(block, elem_index_src, "cannot access element of logical pointer '{}'", .{parent_ty.fmt(pt)}); + } + } } fn tupleFieldPtr( @@ -31158,6 +31186,7 @@ fn coerceCompatiblePtrs( if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(zcu) and (try dest_ty.elemType2(zcu).hasRuntimeBitsSema(pt) or dest_ty.elemType2(zcu).zigTypeTag(zcu) == .@"fn")) { + try sema.checkLogicalPtrOperation(block, inst_src, inst_ty); const actual_ptr = if (inst_ty.isSlice(zcu)) try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty) else diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index aeede443bf..3a00a05823 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -464,7 +464,7 @@ const NavGen = struct { const zcu = self.pt.zcu; const ty = Type.fromInterned(zcu.intern_pool.typeOf(val)); - const decl_ptr_ty_id = try self.ptrType(ty, .Generic, .indirect); + const decl_ptr_ty_id = try self.ptrType(ty, self.spvStorageClass(.generic), .indirect); const spv_decl_index = blk: { const entry = try self.object.uav_link.getOrPut(self.object.gpa, .{ val, .Function }); @@ -4230,7 +4230,7 @@ const NavGen = struct { defer self.gpa.free(ids); const result_id = self.spv.allocId(); - if (self.spv.hasFeature(.kernel)) { + if (self.spv.hasFeature(.addresses)) { try self.func.body.emit(self.spv.gpa, .OpInBoundsPtrAccessChain, .{ .id_result_type = result_ty_id, .id_result = result_id, @@ -5293,7 +5293,7 @@ const NavGen = struct { /// The final storage class of the pointer. This may be either `.Generic` or `.Function`. /// In either case, the local is allocated in the `.Function` storage class, and optionally /// cast back to `.Generic`. - storage_class: StorageClass = .Generic, + storage_class: StorageClass, }; // Allocate a function-local variable, with possible initializer. @@ -5333,9 +5333,10 @@ const NavGen = struct { fn airAlloc(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const zcu = self.pt.zcu; const ptr_ty = self.typeOfIndex(inst); - assert(ptr_ty.ptrAddressSpace(zcu) == .generic); const child_ty = ptr_ty.childType(zcu); - return try self.alloc(child_ty, .{}); + return try self.alloc(child_ty, .{ + .storage_class = self.spvStorageClass(ptr_ty.ptrAddressSpace(zcu)), + }); } fn airArg(self: *NavGen) IdRef { diff --git a/test/behavior/globals.zig b/test/behavior/globals.zig index c11fa7cb25..1a07eea95f 100644 --- a/test/behavior/globals.zig +++ b/test/behavior/globals.zig @@ -69,6 +69,8 @@ test "global loads can affect liveness" { } test "global const can be self-referential" { + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + const S = struct { self: *const @This(), x: u32, @@ -113,6 +115,8 @@ test "global var can be self-referential" { } test "global const can be indirectly self-referential" { + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + const S = struct { other: *const @This(), x: u32, diff --git a/test/behavior/ptrfromint.zig b/test/behavior/ptrfromint.zig index 73ecefddb2..564a3c9614 100644 --- a/test/behavior/ptrfromint.zig +++ b/test/behavior/ptrfromint.zig @@ -3,6 +3,8 @@ const builtin = @import("builtin"); const expectEqual = std.testing.expectEqual; test "casting integer address to function pointer" { + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + addressToFunction(); comptime addressToFunction(); } diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig index 82fae00a99..099dfd41e6 100644 --- a/test/behavior/sizeof_and_typeof.zig +++ b/test/behavior/sizeof_and_typeof.zig @@ -233,6 +233,8 @@ test "@sizeOf comparison against zero" { } test "hardcoded address in typeof expression" { + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + const S = struct { fn func() @TypeOf(@as(*[]u8, @ptrFromInt(0x10)).*[0]) { return 0; diff --git a/test/cases/compile_errors/illegal_operation_on_logical_ptr.zig b/test/cases/compile_errors/illegal_operation_on_logical_ptr.zig new file mode 100644 index 0000000000..d75e7dbd05 --- /dev/null +++ b/test/cases/compile_errors/illegal_operation_on_logical_ptr.zig @@ -0,0 +1,52 @@ +export fn elemPtr() void { + var ptr: [*]u8 = undefined; + ptr[0] = 0; +} + +export fn elemVal() void { + var ptr: [*]u8 = undefined; + var val = ptr[0]; + _ = &ptr; + _ = &val; +} + +export fn intFromPtr() void { + var value: u8 = 0; + _ = @intFromPtr(&value); +} + +export fn ptrFromInt() void { + var v: u32 = 0x1234; + var ptr: *u8 = @ptrFromInt(v); + _ = &v; + _ = &ptr; +} + +export fn ptrPtrArithmetic() void { + var value0: u8 = 0; + var value1: u8 = 0; + _ = &value0 - &value1; +} + +export fn ptrIntArithmetic() void { + var ptr0: [*]u8 = undefined; + _ = &ptr0; + _ = ptr0 - 10; +} + +// error +// backend=stage2 +// target=spirv64-vulkan +// +// :3:8: error: illegal operation on logical pointer of type '[*]u8' +// :3:8: note: cannot perform arithmetic on pointers with address space 'generic' on target spirv-vulkan +// :8:18: error: illegal operation on logical pointer of type '[*]u8' +// :8:18: note: cannot perform arithmetic on pointers with address space 'generic' on target spirv-vulkan +// :15:21: error: illegal operation on logical pointer of type '*u8' +// :15:21: note: cannot perform arithmetic on pointers with address space 'generic' on target spirv-vulkan +// :20:20: error: illegal operation on logical pointer of type '*u8' +// :20:20: note: cannot perform arithmetic on pointers with address space 'generic' on target spirv-vulkan +// :28:17: error: illegal operation on logical pointer of type '*u8' +// :28:17: note: cannot perform arithmetic on pointers with address space 'generic' on target spirv-vulkan +// :34:14: error: illegal operation on logical pointer of type '[*]u8' +// :34:14: note: cannot perform arithmetic on pointers with address space 'generic' on target spirv-vulkan -- cgit v1.2.3 From 78ad866dd1fc49cb826215002274991201cbb89e Mon Sep 17 00:00:00 2001 From: Ali Cheraghi Date: Sun, 9 Mar 2025 06:42:38 +0330 Subject: target: split `addresses` and `physical_storage_buffer` features --- lib/std/Target/spirv.zig | 20 +++++++++++++------- src/codegen/spirv/Module.zig | 11 +++++------ 2 files changed, 18 insertions(+), 13 deletions(-) (limited to 'src/codegen') diff --git a/lib/std/Target/spirv.zig b/lib/std/Target/spirv.zig index 6657bfd971..f45f9a6358 100644 --- a/lib/std/Target/spirv.zig +++ b/lib/std/Target/spirv.zig @@ -15,13 +15,14 @@ pub const Feature = enum { int64, float16, float64, - addresses, matrix, storage_push_constant16, kernel, + addresses, generic_pointer, vector16, shader, + physical_storage_buffer, }; pub const featureSet = CpuFeature.FeatureSetFns(Feature).featureSet; @@ -94,11 +95,6 @@ pub const all_features = blk: { .description = "Enable Float64 capability", .dependencies = featureSet(&[_]Feature{.v1_0}), }; - result[@intFromEnum(Feature.addresses)] = .{ - .llvm_name = null, - .description = "Enable either the Addresses capability or, SPV_KHR_physical_storage_buffer extension and the PhysicalStorageBufferAddresses capability", - .dependencies = featureSet(&[_]Feature{.v1_0}), - }; result[@intFromEnum(Feature.matrix)] = .{ .llvm_name = null, .description = "Enable Matrix capability", @@ -114,6 +110,11 @@ pub const all_features = blk: { .description = "Enable Kernel capability", .dependencies = featureSet(&[_]Feature{.v1_0}), }; + result[@intFromEnum(Feature.addresses)] = .{ + .llvm_name = null, + .description = "Enable Addresses capability", + .dependencies = featureSet(&[_]Feature{.v1_0}), + }; result[@intFromEnum(Feature.generic_pointer)] = .{ .llvm_name = null, .description = "Enable GenericPointer capability", @@ -129,6 +130,11 @@ pub const all_features = blk: { .description = "Enable Shader capability", .dependencies = featureSet(&[_]Feature{ .v1_0, .matrix }), }; + result[@intFromEnum(Feature.physical_storage_buffer)] = .{ + .llvm_name = null, + .description = "Enable SPV_KHR_physical_storage_buffer extension and the PhysicalStorageBufferAddresses capability", + .dependencies = featureSet(&[_]Feature{.v1_0}), + }; const ti = @typeInfo(Feature); for (&result, 0..) |*elem, i| { elem.index = i; @@ -147,7 +153,7 @@ pub const cpu = struct { pub const vulkan_v1_2: CpuModel = .{ .name = "vulkan_v1_2", .llvm_name = null, - .features = featureSet(&[_]Feature{ .v1_5, .shader, .addresses }), + .features = featureSet(&[_]Feature{ .v1_5, .shader, .physical_storage_buffer }), }; pub const opencl_v2: CpuModel = .{ diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig index 5ed1e2df1a..f7d32ba178 100644 --- a/src/codegen/spirv/Module.zig +++ b/src/codegen/spirv/Module.zig @@ -343,18 +343,17 @@ pub fn finalize(self: *Module, a: Allocator) ![]Word { try self.addExtension("SPV_KHR_16bit_storage"); try self.addCapability(.StoragePushConstant16); }, - .addresses => if (self.hasFeature(.shader)) { - try self.addExtension("SPV_KHR_physical_storage_buffer"); - try self.addCapability(.PhysicalStorageBufferAddresses); - } else { - try self.addCapability(.Addresses); - }, + .addresses => try self.addCapability(.Addresses), // Kernel .kernel => try self.addCapability(.Kernel), .generic_pointer => try self.addCapability(.GenericPointer), .vector16 => try self.addCapability(.Vector16), // Shader .shader => try self.addCapability(.Shader), + .physical_storage_buffer => { + try self.addExtension("SPV_KHR_physical_storage_buffer"); + try self.addCapability(.PhysicalStorageBufferAddresses); + }, } } } -- cgit v1.2.3 From e2e75774748591fb44bfc905080e7a14008d4ec3 Mon Sep 17 00:00:00 2001 From: Ali Cheraghi Date: Sun, 9 Mar 2025 18:02:46 +0330 Subject: spirv: lower more types in assembler --- lib/std/gpu.zig | 15 +++++++---- src/codegen/spirv/Assembler.zig | 56 +++++++++++++++++++++++++++++++++-------- 2 files changed, 55 insertions(+), 16 deletions(-) (limited to 'src/codegen') diff --git a/lib/std/gpu.zig b/lib/std/gpu.zig index b9ad2fcda0..d02b2424d4 100644 --- a/lib/std/gpu.zig +++ b/lib/std/gpu.zig @@ -80,7 +80,8 @@ pub fn fragmentDepth(comptime ptr: *addrspace(.output) f32) void { /// Forms the main linkage for `input` and `output` address spaces. /// `ptr` must be a reference to variable or struct field. pub fn location(comptime ptr: anytype, comptime loc: u32) void { - asm volatile ("OpDecorate %ptr Location $loc" + asm volatile ( + \\OpDecorate %ptr Location $loc : : [ptr] "" (ptr), [loc] "c" (loc), @@ -110,7 +111,8 @@ pub const Origin = enum(u32) { /// The coordinates appear to originate in the specified `origin`. /// Only valid with the `Fragment` calling convention. pub fn fragmentOrigin(comptime entry_point: anytype, comptime origin: Origin) void { - asm volatile ("OpExecutionMode %entry_point $origin" + asm volatile ( + \\OpExecutionMode %entry_point $origin : : [entry_point] "" (entry_point), [origin] "c" (@intFromEnum(origin)), @@ -137,7 +139,8 @@ pub const DepthMode = enum(u32) { /// Only valid with the `Fragment` calling convention. pub fn depthMode(comptime entry_point: anytype, comptime mode: DepthMode) void { - asm volatile ("OpExecutionMode %entry_point $mode" + asm volatile ( + \\OpExecutionMode %entry_point $mode : : [entry_point] "" (entry_point), [mode] "c" (mode), @@ -147,7 +150,8 @@ pub fn depthMode(comptime entry_point: anytype, comptime mode: DepthMode) void { /// Indicates the workgroup size in the `x`, `y`, and `z` dimensions. /// Only valid with the `GLCompute` or `Kernel` calling conventions. pub fn workgroupSize(comptime entry_point: anytype, comptime size: @Vector(3, u32)) void { - asm volatile ("OpExecutionMode %entry_point LocalSize %x %y %z" + asm volatile ( + \\OpExecutionMode %entry_point LocalSize %x %y %z : : [entry_point] "" (entry_point), [x] "c" (size[0]), @@ -159,7 +163,8 @@ pub fn workgroupSize(comptime entry_point: anytype, comptime size: @Vector(3, u3 /// A hint to the client, which indicates the workgroup size in the `x`, `y`, and `z` dimensions. /// Only valid with the `GLCompute` or `Kernel` calling conventions. pub fn workgroupSizeHint(comptime entry_point: anytype, comptime size: @Vector(3, u32)) void { - asm volatile ("OpExecutionMode %entry_point LocalSizeHint %x %y %z" + asm volatile ( + \\OpExecutionMode %entry_point LocalSizeHint %x %y %z : : [entry_point] "" (entry_point), [x] "c" (size[0]), diff --git a/src/codegen/spirv/Assembler.zig b/src/codegen/spirv/Assembler.zig index 25a5481fb5..264613b240 100644 --- a/src/codegen/spirv/Assembler.zig +++ b/src/codegen/spirv/Assembler.zig @@ -368,6 +368,40 @@ fn processTypeInstruction(self: *Assembler) !AsmValue { }); break :blk result_id; }, + .OpTypeStruct => blk: { + const ids = try self.gpa.alloc(IdRef, operands[1..].len); + defer self.gpa.free(ids); + for (operands[1..], ids) |op, *id| id.* = try self.resolveRefId(op.ref_id); + const result_id = self.spv.allocId(); + try self.spv.structType(result_id, ids, null); + break :blk result_id; + }, + .OpTypeImage => blk: { + const sampled_type = try self.resolveRefId(operands[1].ref_id); + const result_id = self.spv.allocId(); + try section.emit(self.gpa, .OpTypeImage, .{ + .id_result = result_id, + .sampled_type = sampled_type, + .dim = @enumFromInt(operands[2].value), + .depth = operands[3].literal32, + .arrayed = operands[4].literal32, + .ms = operands[5].literal32, + .sampled = operands[6].literal32, + .image_format = @enumFromInt(operands[7].value), + }); + break :blk result_id; + }, + .OpTypeSampler => blk: { + const result_id = self.spv.allocId(); + try section.emit(self.gpa, .OpTypeSampler, .{ .id_result = result_id }); + break :blk result_id; + }, + .OpTypeSampledImage => blk: { + const image_type = try self.resolveRefId(operands[1].ref_id); + const result_id = self.spv.allocId(); + try section.emit(self.gpa, .OpTypeSampledImage, .{ .id_result = result_id, .image_type = image_type }); + break :blk result_id; + }, .OpTypeFunction => blk: { const param_operands = operands[2..]; const return_type = try self.resolveRefId(operands[1].ref_id); @@ -406,18 +440,18 @@ fn processGenericInstruction(self: *Assembler) !?AsmValue { else => switch (self.inst.opcode) { .OpEntryPoint => unreachable, .OpExecutionMode, .OpExecutionModeId => &self.spv.sections.execution_modes, - .OpVariable => switch (@as(spec.StorageClass, @enumFromInt(operands[2].value))) { - .Function => &self.func.prologue, - .Input, .Output => section: { - maybe_spv_decl_index = try self.spv.allocDecl(.global); - try self.func.decl_deps.put(self.spv.gpa, maybe_spv_decl_index.?, {}); - // TODO: In theory this can be non-empty if there is an initializer which depends on another global... - try self.spv.declareDeclDeps(maybe_spv_decl_index.?, &.{}); + .OpVariable => section: { + const storage_class: spec.StorageClass = @enumFromInt(operands[2].value); + if (storage_class == .Function) break :section &self.func.prologue; + maybe_spv_decl_index = try self.spv.allocDecl(.global); + if (self.spv.version.minor < 4 and storage_class != .Input and storage_class != .Output) { + // Before version 1.4, the interface’s storage classes are limited to the Input and Output break :section &self.spv.sections.types_globals_constants; - }, - // These don't need to be marked in the dependency system. - // Probably we should add them anyway, then filter out PushConstant globals. - else => &self.spv.sections.types_globals_constants, + } + try self.func.decl_deps.put(self.spv.gpa, maybe_spv_decl_index.?, {}); + // TODO: In theory this can be non-empty if there is an initializer which depends on another global... + try self.spv.declareDeclDeps(maybe_spv_decl_index.?, &.{}); + break :section &self.spv.sections.types_globals_constants; }, // Default case - to be worked out further. else => &self.func.body, -- cgit v1.2.3 From 50539a2447c0720f91789063d7349bd0103de4bd Mon Sep 17 00:00:00 2001 From: Ali Cheraghi Date: Tue, 11 Mar 2025 00:49:16 +0330 Subject: spirv/target: arbitrary_precision_integers feature support --- lib/std/Target/spirv.zig | 6 ++++++ src/codegen/spirv.zig | 4 ++-- src/codegen/spirv/Module.zig | 4 ++++ test/behavior/vector.zig | 2 -- 4 files changed, 12 insertions(+), 4 deletions(-) (limited to 'src/codegen') diff --git a/lib/std/Target/spirv.zig b/lib/std/Target/spirv.zig index f45f9a6358..ca83ad2d26 100644 --- a/lib/std/Target/spirv.zig +++ b/lib/std/Target/spirv.zig @@ -17,6 +17,7 @@ pub const Feature = enum { float64, matrix, storage_push_constant16, + arbitrary_precision_integers, kernel, addresses, generic_pointer, @@ -105,6 +106,11 @@ pub const all_features = blk: { .description = "Enable SPV_KHR_16bit_storage extension and the StoragePushConstant16 capability", .dependencies = featureSet(&[_]Feature{.v1_3}), }; + result[@intFromEnum(Feature.arbitrary_precision_integers)] = .{ + .llvm_name = null, + .description = "Enable SPV_INTEL_arbitrary_precision_integers extension and the ArbitraryPrecisionIntegersINTEL capability", + .dependencies = featureSet(&[_]Feature{ .v1_5, .int8, .int16 }), + }; result[@intFromEnum(Feature.kernel)] = .{ .llvm_name = null, .description = "Enable Kernel capability", diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 3a00a05823..7fcbc66e82 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -581,13 +581,13 @@ const NavGen = struct { /// that size. In this case, multiple elements of the largest type should be used. /// The backing type will be chosen as the smallest supported integer larger or equal to it in number of bits. /// The result is valid to be used with OpTypeInt. - /// TODO: The extension SPV_INTEL_arbitrary_precision_integers allows any integer size (at least up to 32 bits). - /// TODO: This probably needs an ABI-version as well (especially in combination with SPV_INTEL_arbitrary_precision_integers). /// TODO: Should the result of this function be cached? fn backingIntBits(self: *NavGen, bits: u16) ?u16 { // The backend will never be asked to compiler a 0-bit integer, so we won't have to handle those in this function. assert(bits != 0); + if (self.spv.hasFeature(.arbitrary_precision_integers) and bits <= 32) return bits; + // 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively. // 32-bit integers are always supported (see spec, 2.16.1, Data rules). const ints = [_]struct { bits: u16, feature: ?Target.spirv.Feature }{ diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig index f7d32ba178..6dc0e5842a 100644 --- a/src/codegen/spirv/Module.zig +++ b/src/codegen/spirv/Module.zig @@ -343,6 +343,10 @@ pub fn finalize(self: *Module, a: Allocator) ![]Word { try self.addExtension("SPV_KHR_16bit_storage"); try self.addCapability(.StoragePushConstant16); }, + .arbitrary_precision_integers => { + try self.addExtension("SPV_INTEL_arbitrary_precision_integers"); + try self.addCapability(.ArbitraryPrecisionIntegersINTEL); + }, .addresses => try self.addCapability(.Addresses), // Kernel .kernel => try self.addCapability(.Kernel), diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index df372be1ad..2d4c92d6fd 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -11,7 +11,6 @@ test "implicit cast vector to array - bool" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -30,7 +29,6 @@ test "vector wrap operators" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; -- cgit v1.2.3 From 54c097f50ddc794dc2b3890490379ab2f8371443 Mon Sep 17 00:00:00 2001 From: Ali Cheraghi Date: Wed, 12 Mar 2025 07:38:50 +0330 Subject: spirv: packed struct init + field val access --- src/codegen/spirv.zig | 142 +++++++++++++++------ src/codegen/spirv/Module.zig | 11 ++ test/behavior/bitcast.zig | 2 - test/behavior/packed-struct.zig | 14 -- .../packed_struct_explicit_backing_int.zig | 1 - test/behavior/ptrcast.zig | 2 - test/behavior/struct.zig | 3 - test/behavior/vector.zig | 2 + 8 files changed, 116 insertions(+), 61 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 7fcbc66e82..f57f5c0074 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -714,6 +714,7 @@ const NavGen = struct { const int_info = scalar_ty.intInfo(zcu); // Use backing bits so that negatives are sign extended const backing_bits = self.backingIntBits(int_info.bits).?; // Assertion failure means big int + assert(backing_bits != 0); // u0 is comptime const signedness: Signedness = switch (@typeInfo(@TypeOf(value))) { .int => |int| int.signedness, @@ -721,35 +722,35 @@ const NavGen = struct { else => unreachable, }; - const value64: u64 = switch (signedness) { - .signed => @bitCast(@as(i64, @intCast(value))), - .unsigned => @as(u64, @intCast(value)), - }; + const final_value: spec.LiteralContextDependentNumber = blk: { + if (self.spv.hasFeature(.kernel)) { + const value64: u64 = switch (signedness) { + .signed => @bitCast(@as(i64, @intCast(value))), + .unsigned => @as(u64, @intCast(value)), + }; - // Manually truncate the value to the right amount of bits. - const truncated_value = if (backing_bits == 64) - value64 - else - value64 & (@as(u64, 1) << @intCast(backing_bits)) - 1; + // Manually truncate the value to the right amount of bits. + const truncated_value = if (backing_bits == 64) + value64 + else + value64 & (@as(u64, 1) << @intCast(backing_bits)) - 1; - const result_ty_id = try self.resolveType(scalar_ty, .indirect); - const result_id = self.spv.allocId(); + break :blk switch (backing_bits) { + 1...32 => .{ .uint32 = @truncate(truncated_value) }, + 33...64 => .{ .uint64 = truncated_value }, + else => unreachable, // TODO: Large integer constants + }; + } - const section = &self.spv.sections.types_globals_constants; - switch (backing_bits) { - 0 => unreachable, // u0 is comptime - 1...32 => try section.emit(self.spv.gpa, .OpConstant, .{ - .id_result_type = result_ty_id, - .id_result = result_id, - .value = .{ .uint32 = @truncate(truncated_value) }, - }), - 33...64 => try section.emit(self.spv.gpa, .OpConstant, .{ - .id_result_type = result_ty_id, - .id_result = result_id, - .value = .{ .uint64 = truncated_value }, - }), - else => unreachable, // TODO: Large integer constants - } + break :blk switch (backing_bits) { + 1...32 => if (signedness == .signed) .{ .int32 = @intCast(value) } else .{ .uint32 = @intCast(value) }, + 33...64 => if (signedness == .signed) .{ .int64 = value } else .{ .uint64 = value }, + else => unreachable, // TODO: Large integer constants + }; + }; + + const result_ty_id = try self.resolveType(scalar_ty, .indirect); + const result_id = try self.spv.constant(result_ty_id, final_value); if (!ty.isVector(zcu)) return result_id; return self.constructCompositeSplat(ty, result_id); @@ -804,8 +805,6 @@ const NavGen = struct { return self.spv.constUndef(result_ty_id); } - const section = &self.spv.sections.types_globals_constants; - const cacheable_id = cache: { switch (ip.indexToKey(val.toIntern())) { .int_type, @@ -860,13 +859,7 @@ const NavGen = struct { 80, 128 => unreachable, // TODO else => unreachable, }; - const result_id = self.spv.allocId(); - try section.emit(self.spv.gpa, .OpConstant, .{ - .id_result_type = result_ty_id, - .id_result = result_id, - .value = lit, - }); - break :cache result_id; + break :cache try self.spv.constant(result_ty_id, lit); }, .err => |err| { const value = try pt.getErrorValue(err.name); @@ -989,8 +982,17 @@ const NavGen = struct { }, .struct_type => { const struct_type = zcu.typeToStruct(ty).?; + if (struct_type.layout == .@"packed") { - return self.todo("packed struct constants", .{}); + // TODO: composite int + // TODO: endianness + const bits: u16 = @intCast(ty.bitSize(zcu)); + const bytes = std.mem.alignForward(u16, self.backingIntBits(bits).?, 8) / 8; + var limbs: [8]u8 = undefined; + @memset(&limbs, 0); + val.writeToPackedMemory(ty, pt, limbs[0..bytes], 0) catch unreachable; + const backing_ty = Type.fromInterned(struct_type.backingIntTypeUnordered(ip)); + return try self.constInt(backing_ty, @as(u64, @bitCast(limbs))); } var types = std.ArrayList(Type).init(self.gpa); @@ -4309,6 +4311,7 @@ const NavGen = struct { ) !Temporary { const pt = self.pt; const zcu = pt.zcu; + const ip = &zcu.intern_pool; const scalar_ty = lhs.ty.scalarType(zcu); const is_vector = lhs.ty.isVector(zcu); @@ -4319,6 +4322,11 @@ const NavGen = struct { const ty = lhs.ty.intTagType(zcu); return try self.cmp(op, lhs.pun(ty), rhs.pun(ty)); }, + .@"struct" => { + const struct_ty = zcu.typeToPackedStruct(scalar_ty).?; + const ty = Type.fromInterned(struct_ty.backingIntTypeUnordered(ip)); + return try self.cmp(op, lhs.pun(ty), rhs.pun(ty)); + }, .error_set => { assert(!is_vector); const err_int_ty = try pt.errorIntType(); @@ -4746,8 +4754,42 @@ const NavGen = struct { switch (result_ty.zigTypeTag(zcu)) { .@"struct" => { if (zcu.typeToPackedStruct(result_ty)) |struct_type| { - _ = struct_type; - unreachable; // TODO + comptime assert(Type.packed_struct_layout_version == 2); + const backing_int_ty = Type.fromInterned(struct_type.backingIntTypeUnordered(ip)); + var running_int_id = try self.constInt(backing_int_ty, 0); + var running_bits: u16 = 0; + for (struct_type.field_types.get(ip), elements) |field_ty_ip, element| { + const field_ty = Type.fromInterned(field_ty_ip); + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + const field_id = try self.resolve(element); + const ty_bit_size: u16 = @intCast(field_ty.bitSize(zcu)); + const field_int_ty = try self.pt.intType(.unsigned, ty_bit_size); + const field_int_id = blk: { + if (field_ty.isPtrAtRuntime(zcu)) { + assert(self.spv.hasFeature(.addresses) or + (self.spv.hasFeature(.physical_storage_buffer) and field_ty.ptrAddressSpace(zcu) == .storage_buffer)); + break :blk try self.intFromPtr(field_id); + } + break :blk try self.bitCast(field_int_ty, field_ty, field_id); + }; + const shift_rhs = try self.constInt(backing_int_ty, running_bits); + const extended_int_conv = try self.buildIntConvert(backing_int_ty, .{ + .ty = field_int_ty, + .value = .{ .singleton = field_int_id }, + }); + const shifted = try self.buildBinary(.sll, extended_int_conv, .{ + .ty = backing_int_ty, + .value = .{ .singleton = shift_rhs }, + }); + const running_int_tmp = try self.buildBinary( + .bit_or, + .{ .ty = backing_int_ty, .value = .{ .singleton = running_int_id } }, + shifted, + ); + running_int_id = try running_int_tmp.materialize(self); + running_bits += ty_bit_size; + } + return running_int_id; } const types = try self.gpa.alloc(Type, elements.len); @@ -5156,6 +5198,7 @@ const NavGen = struct { fn airStructFieldVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; const zcu = pt.zcu; + const ip = &zcu.intern_pool; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; @@ -5168,7 +5211,28 @@ const NavGen = struct { switch (object_ty.zigTypeTag(zcu)) { .@"struct" => switch (object_ty.containerLayout(zcu)) { - .@"packed" => unreachable, // TODO + .@"packed" => { + const struct_ty = zcu.typeToPackedStruct(object_ty).?; + const backing_int_ty = Type.fromInterned(struct_ty.backingIntTypeUnordered(ip)); + const bit_offset = pt.structPackedFieldBitOffset(struct_ty, field_index); + const bit_offset_id = try self.constInt(.u16, bit_offset); + const signedness = if (field_ty.isInt(zcu)) field_ty.intInfo(zcu).signedness else .unsigned; + const field_bit_size: u16 = @intCast(field_ty.bitSize(zcu)); + const int_ty = try pt.intType(signedness, field_bit_size); + const shift_lhs: Temporary = .{ .ty = backing_int_ty, .value = .{ .singleton = object_id } }; + const shift = try self.buildBinary(.srl, shift_lhs, .{ .ty = .u16, .value = .{ .singleton = bit_offset_id } }); + const mask_id = try self.constInt(backing_int_ty, (@as(u64, 1) << @as(u6, @intCast(field_bit_size))) - 1); + const masked = try self.buildBinary(.bit_and, shift, .{ .ty = backing_int_ty, .value = .{ .singleton = mask_id } }); + const result_id = blk: { + if (self.backingIntBits(field_bit_size).? == self.backingIntBits(@intCast(backing_int_ty.bitSize(zcu))).?) + break :blk try self.bitCast(int_ty, backing_int_ty, try masked.materialize(self)); + const trunc = try self.buildIntConvert(int_ty, masked); + break :blk try trunc.materialize(self); + }; + if (field_ty.ip_index == .bool_type) return try self.convertToDirect(.bool, result_id); + if (field_ty.isInt(zcu)) return result_id; + return try self.bitCast(field_ty, int_ty, result_id); + }, else => return try self.extractField(field_ty, object_id, field_index), }, .@"union" => switch (object_ty.containerLayout(zcu)) { diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig index 6dc0e5842a..1acdc0915c 100644 --- a/src/codegen/spirv/Module.zig +++ b/src/codegen/spirv/Module.zig @@ -613,6 +613,17 @@ pub fn functionType(self: *Module, return_ty_id: IdRef, param_type_ids: []const return result_id; } +pub fn constant(self: *Module, result_ty_id: IdRef, value: spec.LiteralContextDependentNumber) !IdRef { + const result_id = self.allocId(); + const section = &self.sections.types_globals_constants; + try section.emit(self.gpa, .OpConstant, .{ + .id_result_type = result_ty_id, + .id_result = result_id, + .value = value, + }); + return result_id; +} + pub fn constBool(self: *Module, value: bool) !IdRef { if (self.cache.bool_const[@intFromBool(value)]) |b| return b; diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index 6b5be9421b..36e6b264af 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -165,7 +165,6 @@ test "@bitCast packed structs at runtime and comptime" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const Full = packed struct { number: u16, @@ -226,7 +225,6 @@ test "bitcast packed struct to integer and back" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const LevelUpMove = packed struct { move_id: u9, diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index 666a7427e9..592d9fa2bd 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -123,7 +123,6 @@ test "correct sizeOf and offsets in packed structs" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const PStruct = packed struct { bool_a: bool, @@ -191,7 +190,6 @@ test "nested packed structs" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S1 = packed struct { a: u8, b: u8, c: u8 }; @@ -257,7 +255,6 @@ test "nested packed struct unaligned" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet const S1 = packed struct { @@ -895,7 +892,6 @@ test "packed struct passed to callconv(.c) function" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { @@ -944,7 +940,6 @@ test "packed struct initialized in bitcast" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = packed struct { val: u8 }; @@ -982,7 +977,6 @@ test "pointer to container level packed struct field" { test "store undefined to packed result location" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u4 = 0; @@ -992,8 +986,6 @@ test "store undefined to packed result location" { } test "bitcast back and forth" { - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - // Originally reported at https://github.com/ziglang/zig/issues/9914 const S = packed struct { one: u6, two: u1 }; const s = S{ .one = 0b110101, .two = 0b1 }; @@ -1290,8 +1282,6 @@ test "2-byte packed struct argument in C calling convention" { } test "packed struct contains optional pointer" { - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - const foo: packed struct { a: ?*@This() = null, } = .{}; @@ -1299,8 +1289,6 @@ test "packed struct contains optional pointer" { } test "packed struct equality" { - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - const Foo = packed struct { a: u4, b: u4, @@ -1321,8 +1309,6 @@ test "packed struct equality" { } test "packed struct with signed field" { - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - var s: packed struct { a: i2, b: u6, diff --git a/test/behavior/packed_struct_explicit_backing_int.zig b/test/behavior/packed_struct_explicit_backing_int.zig index 29b8c4aa9b..c1bc2426d8 100644 --- a/test/behavior/packed_struct_explicit_backing_int.zig +++ b/test/behavior/packed_struct_explicit_backing_int.zig @@ -9,7 +9,6 @@ test "packed struct explicit backing integer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S1 = packed struct { a: u8, b: u8, c: u8 }; diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig index 84a87bba9c..91955e3e94 100644 --- a/test/behavior/ptrcast.zig +++ b/test/behavior/ptrcast.zig @@ -287,8 +287,6 @@ test "@ptrCast undefined value at comptime" { } test "comptime @ptrCast with packed struct leaves value unmodified" { - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - const S = packed struct { three: u3 }; const st: S = .{ .three = 6 }; try expect(st.three == 6); diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 367d11588f..27f56c7cba 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -1023,7 +1023,6 @@ test "packed struct with undefined initializers" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = struct { const P = packed struct { @@ -1221,7 +1220,6 @@ test "packed struct aggregate init" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { @@ -1971,7 +1969,6 @@ test "struct field default value is a call" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const Z = packed struct { a: u32, diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 2d4c92d6fd..df372be1ad 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -11,6 +11,7 @@ test "implicit cast vector to array - bool" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -29,6 +30,7 @@ test "vector wrap operators" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; -- cgit v1.2.3 From d18eaf8586cf173d5605d5885fcbe26d64af00c5 Mon Sep 17 00:00:00 2001 From: Ali Cheraghi Date: Wed, 12 Mar 2025 16:52:03 +0330 Subject: spirv: aligned load for physical storage variables Resolves #23212 --- src/codegen/spirv.zig | 115 +++++++++++++++++++++++++++------------ test/behavior/cast_int.zig | 4 -- test/behavior/export_keyword.zig | 1 - test/behavior/packed-union.zig | 3 - test/behavior/union.zig | 14 +---- 5 files changed, 83 insertions(+), 54 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index f57f5c0074..899a8db7fe 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1024,6 +1024,11 @@ const NavGen = struct { else => unreachable, }, .un => |un| { + if (un.tag == .none) { + assert(ty.containerLayout(zcu) == .@"packed"); // TODO + const int_ty = try pt.intType(.unsigned, @intCast(ty.bitSize(zcu))); + return try self.constant(int_ty, Value.fromInterned(un.val), .direct); + } const active_field = ty.unionTagFieldIndex(Value.fromInterned(un.tag), zcu).?; const union_obj = zcu.typeToUnion(ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[active_field]); @@ -1356,7 +1361,7 @@ const NavGen = struct { const union_obj = zcu.typeToUnion(ty).?; if (union_obj.flagsUnordered(ip).layout == .@"packed") { - return self.todo("packed union types", .{}); + return try self.intType(.unsigned, @intCast(ty.bitSize(zcu))); } const layout = self.unionLayout(ty); @@ -3226,10 +3231,13 @@ const NavGen = struct { }; fn load(self: *NavGen, value_ty: Type, ptr_id: IdRef, options: MemoryOptions) !IdRef { + const zcu = self.pt.zcu; + const alignment: u32 = @intCast(value_ty.abiAlignment(zcu).toByteUnits().?); const indirect_value_ty_id = try self.resolveType(value_ty, .indirect); const result_id = self.spv.allocId(); const access = spec.MemoryAccess.Extended{ .Volatile = options.is_volatile, + .Aligned = .{ .literal_integer = alignment }, }; try self.func.body.emit(self.spv.gpa, .OpLoad, .{ .id_result_type = indirect_value_ty_id, @@ -5130,11 +5138,33 @@ const NavGen = struct { const union_ty = zcu.typeToUnion(ty).?; const tag_ty = Type.fromInterned(union_ty.enum_tag_ty); + const layout = self.unionLayout(ty); + const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]); + if (union_ty.flagsUnordered(ip).layout == .@"packed") { - unreachable; // TODO - } + if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + const int_ty = try pt.intType(.unsigned, @intCast(ty.bitSize(zcu))); + return self.constInt(int_ty, 0); + } - const layout = self.unionLayout(ty); + assert(payload != null); + if (payload_ty.isInt(zcu)) { + if (ty.bitSize(zcu) == payload_ty.bitSize(zcu)) { + return self.bitCast(ty, payload_ty, payload.?); + } + + const trunc = try self.buildIntConvert(ty, .{ .ty = payload_ty, .value = .{ .singleton = payload.? } }); + return try trunc.materialize(self); + } + + const payload_int_ty = try pt.intType(.unsigned, @intCast(payload_ty.bitSize(zcu))); + const payload_int = if (payload_ty.ip_index == .bool_type) + try self.convertToIndirect(payload_ty, payload.?) + else + try self.bitCast(payload_int_ty, payload_ty, payload.?); + const trunc = try self.buildIntConvert(ty, .{ .ty = payload_int_ty, .value = .{ .singleton = payload_int } }); + return try trunc.materialize(self); + } const tag_int = if (layout.tag_size != 0) blk: { const tag_val = try pt.enumValueFieldIndex(tag_ty, active_field); @@ -5155,7 +5185,6 @@ const NavGen = struct { try self.store(tag_ty, ptr_id, tag_id, .{}); } - const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]); if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, .Function, .indirect); const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, tmp_id, &.{layout.payload_index}); @@ -5198,7 +5227,6 @@ const NavGen = struct { fn airStructFieldVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; const zcu = pt.zcu; - const ip = &zcu.intern_pool; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; @@ -5213,16 +5241,39 @@ const NavGen = struct { .@"struct" => switch (object_ty.containerLayout(zcu)) { .@"packed" => { const struct_ty = zcu.typeToPackedStruct(object_ty).?; - const backing_int_ty = Type.fromInterned(struct_ty.backingIntTypeUnordered(ip)); const bit_offset = pt.structPackedFieldBitOffset(struct_ty, field_index); const bit_offset_id = try self.constInt(.u16, bit_offset); const signedness = if (field_ty.isInt(zcu)) field_ty.intInfo(zcu).signedness else .unsigned; const field_bit_size: u16 = @intCast(field_ty.bitSize(zcu)); - const int_ty = try pt.intType(signedness, field_bit_size); - const shift_lhs: Temporary = .{ .ty = backing_int_ty, .value = .{ .singleton = object_id } }; + const field_int_ty = try pt.intType(signedness, field_bit_size); + const shift_lhs: Temporary = .{ .ty = object_ty, .value = .{ .singleton = object_id } }; const shift = try self.buildBinary(.srl, shift_lhs, .{ .ty = .u16, .value = .{ .singleton = bit_offset_id } }); + const mask_id = try self.constInt(object_ty, (@as(u64, 1) << @as(u6, @intCast(field_bit_size))) - 1); + const masked = try self.buildBinary(.bit_and, shift, .{ .ty = object_ty, .value = .{ .singleton = mask_id } }); + const result_id = blk: { + if (self.backingIntBits(field_bit_size).? == self.backingIntBits(@intCast(object_ty.bitSize(zcu))).?) + break :blk try self.bitCast(field_int_ty, object_ty, try masked.materialize(self)); + const trunc = try self.buildIntConvert(field_int_ty, masked); + break :blk try trunc.materialize(self); + }; + if (field_ty.ip_index == .bool_type) return try self.convertToDirect(.bool, result_id); + if (field_ty.isInt(zcu)) return result_id; + return try self.bitCast(field_ty, field_int_ty, result_id); + }, + else => return try self.extractField(field_ty, object_id, field_index), + }, + .@"union" => switch (object_ty.containerLayout(zcu)) { + .@"packed" => { + const backing_int_ty = try pt.intType(.unsigned, @intCast(object_ty.bitSize(zcu))); + const signedness = if (field_ty.isInt(zcu)) field_ty.intInfo(zcu).signedness else .unsigned; + const field_bit_size: u16 = @intCast(field_ty.bitSize(zcu)); + const int_ty = try pt.intType(signedness, field_bit_size); const mask_id = try self.constInt(backing_int_ty, (@as(u64, 1) << @as(u6, @intCast(field_bit_size))) - 1); - const masked = try self.buildBinary(.bit_and, shift, .{ .ty = backing_int_ty, .value = .{ .singleton = mask_id } }); + const masked = try self.buildBinary( + .bit_and, + .{ .ty = backing_int_ty, .value = .{ .singleton = object_id } }, + .{ .ty = backing_int_ty, .value = .{ .singleton = mask_id } }, + ); const result_id = blk: { if (self.backingIntBits(field_bit_size).? == self.backingIntBits(@intCast(backing_int_ty.bitSize(zcu))).?) break :blk try self.bitCast(int_ty, backing_int_ty, try masked.materialize(self)); @@ -5233,10 +5284,6 @@ const NavGen = struct { if (field_ty.isInt(zcu)) return result_id; return try self.bitCast(field_ty, int_ty, result_id); }, - else => return try self.extractField(field_ty, object_id, field_index), - }, - .@"union" => switch (object_ty.containerLayout(zcu)) { - .@"packed" => unreachable, // TODO else => { // Store, ptr-elem-ptr, pointer-cast, load const layout = self.unionLayout(object_ty); @@ -5317,28 +5364,28 @@ const NavGen = struct { return try self.accessChain(result_ty_id, object_ptr, &.{field_index}); }, }, - .@"union" => switch (object_ty.containerLayout(zcu)) { - .@"packed" => return self.todo("implement field access for packed unions", .{}), - else => { - const layout = self.unionLayout(object_ty); - if (!layout.has_payload) { - // Asked to get a pointer to a zero-sized field. Just lower this - // to undefined, there is no reason to make it be a valid pointer. - return try self.spv.constUndef(result_ty_id); - } + .@"union" => { + const layout = self.unionLayout(object_ty); + if (!layout.has_payload) { + // Asked to get a pointer to a zero-sized field. Just lower this + // to undefined, there is no reason to make it be a valid pointer. + return try self.spv.constUndef(result_ty_id); + } - const storage_class = self.spvStorageClass(object_ptr_ty.ptrAddressSpace(zcu)); - const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, storage_class, .indirect); - const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, object_ptr, &.{layout.payload_index}); + const storage_class = self.spvStorageClass(object_ptr_ty.ptrAddressSpace(zcu)); + const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, storage_class, .indirect); + const pl_ptr_id = blk: { + if (object_ty.containerLayout(zcu) == .@"packed") break :blk object_ptr; + break :blk try self.accessChain(pl_ptr_ty_id, object_ptr, &.{layout.payload_index}); + }; - const active_pl_ptr_id = self.spv.allocId(); - try self.func.body.emit(self.spv.gpa, .OpBitcast, .{ - .id_result_type = result_ty_id, - .id_result = active_pl_ptr_id, - .operand = pl_ptr_id, - }); - return active_pl_ptr_id; - }, + const active_pl_ptr_id = self.spv.allocId(); + try self.func.body.emit(self.spv.gpa, .OpBitcast, .{ + .id_result_type = result_ty_id, + .id_result = active_pl_ptr_id, + .operand = pl_ptr_id, + }); + return active_pl_ptr_id; }, else => unreachable, } diff --git a/test/behavior/cast_int.zig b/test/behavior/cast_int.zig index c6ecc9f20d..9cbcddc191 100644 --- a/test/behavior/cast_int.zig +++ b/test/behavior/cast_int.zig @@ -22,7 +22,6 @@ test "coerce i8 to i32 and @intCast back" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var x: i8 = -5; var y: i32 = -5; @@ -36,8 +35,6 @@ test "coerce i8 to i32 and @intCast back" { } test "coerce non byte-sized integers accross 32bits boundary" { - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - { var v: u21 = 6417; _ = &v; @@ -217,7 +214,6 @@ test "load non byte-sized value in union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; diff --git a/test/behavior/export_keyword.zig b/test/behavior/export_keyword.zig index 56a005f521..270dcda56f 100644 --- a/test/behavior/export_keyword.zig +++ b/test/behavior/export_keyword.zig @@ -25,7 +25,6 @@ const PackedUnion = packed union { test "packed struct, enum, union parameters in extern function" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; testPackedStuff(&(PackedStruct{ .a = 1, diff --git a/test/behavior/packed-union.zig b/test/behavior/packed-union.zig index b70a16f354..7181080704 100644 --- a/test/behavior/packed-union.zig +++ b/test/behavior/packed-union.zig @@ -137,7 +137,6 @@ test "packed union initialized with a runtime value" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Fields = packed struct { @@ -174,8 +173,6 @@ test "assigning to non-active field at comptime" { } test "comptime packed union of pointers" { - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - const U = packed union { a: *const u32, b: *const [1]u32, diff --git a/test/behavior/union.zig b/test/behavior/union.zig index 3449bb6f93..c4ac8ac458 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -1372,14 +1372,13 @@ test "packed union in packed struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = packed struct { nested: packed union { - val: usize, + val: u16, foo: u32, }, - bar: u32, + bar: u16, fn unpack(self: @This()) usize { return self.nested.foo; @@ -1460,7 +1459,6 @@ test "packed union with zero-bit field" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = packed struct { nested: packed union { @@ -1479,7 +1477,6 @@ test "packed union with zero-bit field" { test "reinterpreting enum value inside packed union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const U = packed union { tag: enum(u8) { a, b }, @@ -1527,7 +1524,6 @@ test "defined-layout union field pointer has correct alignment" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO const S = struct { fn doTheTest(comptime U: type) !void { @@ -1901,8 +1897,6 @@ test "inner struct initializer uses union layout" { } test "inner struct initializer uses packed union layout" { - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - const namespace = struct { const U = packed union { a: packed struct { @@ -1946,8 +1940,6 @@ test "extern union initialized via reintepreted struct field initializer" { } test "packed union initialized via reintepreted struct field initializer" { - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd }; const U = packed union { @@ -1988,8 +1980,6 @@ test "store of comptime reinterpreted memory to extern union" { } test "store of comptime reinterpreted memory to packed union" { - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd }; const U = packed union { -- cgit v1.2.3 From ee06b2ce760d927b62726de1e2e3cb33b48d4932 Mon Sep 17 00:00:00 2001 From: Ali Cheraghi Date: Thu, 13 Mar 2025 03:01:19 +0330 Subject: spirv: require int8/int16 capabilities --- lib/std/Target/spirv.zig | 14 +------------- src/codegen/spirv.zig | 8 ++++---- src/codegen/spirv/Module.zig | 5 +++-- test/tests.zig | 2 +- 4 files changed, 9 insertions(+), 20 deletions(-) (limited to 'src/codegen') diff --git a/lib/std/Target/spirv.zig b/lib/std/Target/spirv.zig index ca83ad2d26..a2575b2fe8 100644 --- a/lib/std/Target/spirv.zig +++ b/lib/std/Target/spirv.zig @@ -10,8 +10,6 @@ pub const Feature = enum { v1_4, v1_5, v1_6, - int8, - int16, int64, float16, float64, @@ -71,16 +69,6 @@ pub const all_features = blk: { .description = "Enable version 1.6", .dependencies = featureSet(&[_]Feature{.v1_5}), }; - result[@intFromEnum(Feature.int8)] = .{ - .llvm_name = null, - .description = "Enable Int8 capability", - .dependencies = featureSet(&[_]Feature{.v1_0}), - }; - result[@intFromEnum(Feature.int16)] = .{ - .llvm_name = null, - .description = "Enable Int16 capability", - .dependencies = featureSet(&[_]Feature{.v1_0}), - }; result[@intFromEnum(Feature.int64)] = .{ .llvm_name = null, .description = "Enable Int64 capability", @@ -109,7 +97,7 @@ pub const all_features = blk: { result[@intFromEnum(Feature.arbitrary_precision_integers)] = .{ .llvm_name = null, .description = "Enable SPV_INTEL_arbitrary_precision_integers extension and the ArbitraryPrecisionIntegersINTEL capability", - .dependencies = featureSet(&[_]Feature{ .v1_5, .int8, .int16 }), + .dependencies = featureSet(&[_]Feature{.v1_5}), }; result[@intFromEnum(Feature.kernel)] = .{ .llvm_name = null, diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 899a8db7fe..c361090b51 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -588,11 +588,11 @@ const NavGen = struct { if (self.spv.hasFeature(.arbitrary_precision_integers) and bits <= 32) return bits; - // 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively. + // We require Int8 and Int16 capabilities and benefit Int64 when available. // 32-bit integers are always supported (see spec, 2.16.1, Data rules). const ints = [_]struct { bits: u16, feature: ?Target.spirv.Feature }{ - .{ .bits = 8, .feature = .int8 }, - .{ .bits = 16, .feature = .int16 }, + .{ .bits = 8, .feature = null }, + .{ .bits = 16, .feature = null }, .{ .bits = 32, .feature = null }, .{ .bits = 64, .feature = .int64 }, }; @@ -1373,7 +1373,7 @@ const NavGen = struct { var member_types: [4]IdRef = undefined; var member_names: [4][]const u8 = undefined; - const u8_ty_id = try self.resolveType(Type.u8, .direct); // TODO: What if Int8Type is not enabled? + const u8_ty_id = try self.resolveType(Type.u8, .direct); if (layout.tag_size != 0) { const tag_ty_id = try self.resolveType(Type.fromInterned(union_obj.enum_tag_ty), .indirect); diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig index 1acdc0915c..1aa082f6bc 100644 --- a/src/codegen/spirv/Module.zig +++ b/src/codegen/spirv/Module.zig @@ -333,8 +333,6 @@ pub fn finalize(self: *Module, a: Allocator) ![]Word { // Versions .v1_0, .v1_1, .v1_2, .v1_3, .v1_4, .v1_5, .v1_6 => {}, // Features with no dependencies - .int8 => try self.addCapability(.Int8), - .int16 => try self.addCapability(.Int16), .int64 => try self.addCapability(.Int64), .float16 => try self.addCapability(.Float16), .float64 => try self.addCapability(.Float64), @@ -361,6 +359,9 @@ pub fn finalize(self: *Module, a: Allocator) ![]Word { } } } + // These are well supported + try self.addCapability(.Int8); + try self.addCapability(.Int16); // Emit memory model const addressing_model: spec.AddressingModel = blk: { diff --git a/test/tests.zig b/test/tests.zig index 28c6fbc67e..9224f97c7e 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -143,7 +143,7 @@ const test_targets = blk: { .{ .target = std.Target.Query.parse(.{ .arch_os_abi = "spirv64-vulkan", - .cpu_features = "vulkan_v1_2+int8+int16+int64+float16+float64", + .cpu_features = "vulkan_v1_2+int64+float16+float64", }) catch unreachable, .use_llvm = false, .use_lld = false, -- cgit v1.2.3