diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2022-03-22 00:23:54 -0700 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2022-03-22 15:45:58 -0700 |
| commit | 593130ce0a4b06185fcb4806f8330857a1da9f92 (patch) | |
| tree | ae30c61a06e6cecb97cc760dc6acd0cbfb83c162 /src/codegen/spirv.zig | |
| parent | b74f2924102fe06addb688dc5fd039dc2756f619 (diff) | |
| download | zig-593130ce0a4b06185fcb4806f8330857a1da9f92.tar.gz zig-593130ce0a4b06185fcb4806f8330857a1da9f92.zip | |
stage2: lazy `@alignOf`
Add a `target` parameter to every function that deals with Type and
Value.
Diffstat (limited to 'src/codegen/spirv.zig')
| -rw-r--r-- | src/codegen/spirv.zig | 18 |
1 files changed, 10 insertions, 8 deletions
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 0d9d1ae223..6072c59845 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -313,7 +313,7 @@ pub const DeclGen = struct { // As of yet, there is no vector support in the self-hosted compiler. .Vector => self.todo("implement arithmeticTypeInfo for Vector", .{}), // TODO: For which types is this the case? - else => self.todo("implement arithmeticTypeInfo for {}", .{ty}), + else => self.todo("implement arithmeticTypeInfo for {}", .{ty.fmtDebug()}), }; } @@ -335,7 +335,7 @@ pub const DeclGen = struct { const int_info = ty.intInfo(target); const backing_bits = self.backingIntBits(int_info.bits) orelse { // Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits. - return self.todo("implement composite int constants for {}", .{ty}); + return self.todo("implement composite int constants for {}", .{ty.fmtDebug()}); }; // We can just use toSignedInt/toUnsignedInt here as it returns u64 - a type large enough to hold any @@ -345,7 +345,7 @@ pub const DeclGen = struct { // Note, value is required to be sign-extended, so we don't need to mask off the upper bits. // See https://www.khronos.org/registry/SPIR-V/specs/unified1/SPIRV.html#Literal - var int_bits = if (ty.isSignedInt()) @bitCast(u64, val.toSignedInt()) else val.toUnsignedInt(); + var int_bits = if (ty.isSignedInt()) @bitCast(u64, val.toSignedInt()) else val.toUnsignedInt(target); const value: spec.LiteralContextDependentNumber = switch (backing_bits) { 1...32 => .{ .uint32 = @truncate(u32, int_bits) }, @@ -388,7 +388,7 @@ pub const DeclGen = struct { }); }, .Void => unreachable, - else => return self.todo("constant generation of type {}", .{ty}), + else => return self.todo("constant generation of type {}", .{ty.fmtDebug()}), } return result_id.toRef(); @@ -414,7 +414,7 @@ pub const DeclGen = struct { const backing_bits = self.backingIntBits(int_info.bits) orelse { // TODO: Integers too big for any native type are represented as "composite integers": // An array of largestSupportedIntBits. - return self.todo("Implement composite int type {}", .{ty}); + return self.todo("Implement composite int type {}", .{ty.fmtDebug()}); }; const payload = try self.spv.arena.create(SpvType.Payload.Int); @@ -644,8 +644,10 @@ pub const DeclGen = struct { const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(ty); - assert(self.air.typeOf(bin_op.lhs).eql(ty)); - assert(self.air.typeOf(bin_op.rhs).eql(ty)); + const target = self.getTarget(); + + assert(self.air.typeOf(bin_op.lhs).eql(ty, target)); + assert(self.air.typeOf(bin_op.rhs).eql(ty, target)); // Binary operations are generally applicable to both scalar and vector operations // in SPIR-V, but int and float versions of operations require different opcodes. @@ -692,7 +694,7 @@ pub const DeclGen = struct { const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(Type.initTag(.bool)); const op_ty = self.air.typeOf(bin_op.lhs); - assert(op_ty.eql(self.air.typeOf(bin_op.rhs))); + assert(op_ty.eql(self.air.typeOf(bin_op.rhs), self.getTarget())); // Comparisons are generally applicable to both scalar and vector operations in SPIR-V, // but int and float versions of operations require different opcodes. |
