aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/Sema.zig10
-rw-r--r--src/codegen/llvm.zig6
-rw-r--r--src/link/Dwarf.zig54
-rw-r--r--src/type.zig113
-rw-r--r--test/behavior/eval.zig33
-rw-r--r--test/behavior/packed-struct.zig56
6 files changed, 153 insertions, 119 deletions
diff --git a/src/Sema.zig b/src/Sema.zig
index c0e8b80dbd..048a702e7b 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -11747,11 +11747,11 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
+ const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
- const unresolved_operand_ty = try sema.resolveType(block, operand_src, inst_data.operand);
- const operand_ty = try sema.resolveTypeFields(block, operand_src, unresolved_operand_ty);
+ const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand);
const target = sema.mod.getTarget();
- const bit_size = operand_ty.bitSize(target);
+ const bit_size = try operand_ty.bitSizeAdvanced(target, sema.kit(block, src));
return sema.addIntUnsigned(Type.comptime_int, bit_size);
}
@@ -25047,9 +25047,7 @@ pub fn typeRequiresComptime(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Typ
}
pub fn typeHasRuntimeBits(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool {
- if ((try sema.typeHasOnePossibleValue(block, src, ty)) != null) return false;
- if (try sema.typeRequiresComptime(block, src, ty)) return false;
- return true;
+ return ty.hasRuntimeBitsAdvanced(false, sema.kit(block, src));
}
fn typeAbiSize(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !u64 {
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 60803eff69..19a6917be4 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -9185,7 +9185,7 @@ fn isByRef(ty: Type) bool {
.AnyFrame,
=> return false,
- .Array, .Frame => return ty.hasRuntimeBitsIgnoreComptime(),
+ .Array, .Frame => return ty.hasRuntimeBits(),
.Struct => {
// Packed structs are represented to LLVM as integers.
if (ty.containerLayout() == .Packed) return false;
@@ -9204,7 +9204,7 @@ fn isByRef(ty: Type) bool {
var count: usize = 0;
const fields = ty.structFields();
for (fields.values()) |field| {
- if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (field.is_comptime or !field.ty.hasRuntimeBits()) continue;
count += 1;
if (count > max_fields_byval) return true;
@@ -9212,7 +9212,7 @@ fn isByRef(ty: Type) bool {
}
return false;
},
- .Union => return ty.hasRuntimeBitsIgnoreComptime(),
+ .Union => return ty.hasRuntimeBits(),
.ErrorUnion => return isByRef(ty.errorUnionPayload()),
.Optional => {
var buf: Type.Payload.ElemType = undefined;
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index f291dd4255..efbd86bc7f 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -352,6 +352,7 @@ pub const DeclState = struct {
const fields = ty.structFields();
for (fields.keys()) |field_name, field_index| {
const field = fields.get(field_name).?;
+ if (!field.ty.hasRuntimeBits()) continue;
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2);
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member));
@@ -1037,6 +1038,7 @@ pub fn commitDeclState(
}
}
+ log.debug("updateDeclDebugInfoAllocation for '{s}'", .{decl.name});
try self.updateDeclDebugInfoAllocation(file, atom, @intCast(u32, dbg_info_buffer.items.len));
while (decl_state.abbrev_relocs.popOrNull()) |reloc| {
@@ -1098,6 +1100,7 @@ pub fn commitDeclState(
}
}
+ log.debug("writeDeclDebugInfo for '{s}", .{decl.name});
try self.writeDeclDebugInfo(file, atom, dbg_info_buffer.items);
}
@@ -1141,7 +1144,10 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, file: *File, atom: *Atom, len: u3
},
.wasm => {
const wasm_file = file.cast(File.Wasm).?;
- writeDbgInfoNopsBuffered(wasm_file.debug_info.items, atom.off, 0, &.{0}, atom.len, false);
+ const segment_index = try wasm_file.getDebugInfoIndex();
+ const segment = &wasm_file.segments.items[segment_index];
+ const offset = segment.offset + atom.off;
+ try writeDbgInfoNopsToArrayList(gpa, &wasm_file.debug_info, offset, 0, &.{0}, atom.len, false);
},
else => unreachable,
}
@@ -1283,8 +1289,12 @@ fn writeDeclDebugInfo(self: *Dwarf, file: *File, atom: *Atom, dbg_info_buf: []co
debug_info.items.len = needed_size;
}
const offset = segment.offset + atom.off;
- writeDbgInfoNopsBuffered(
- debug_info.items,
+ log.debug(" writeDbgInfoNopsToArrayList debug_info_len={d} offset={d} content_len={d} next_padding_size={d}", .{
+ debug_info.items.len, offset, dbg_info_buf.len, next_padding_size,
+ });
+ try writeDbgInfoNopsToArrayList(
+ gpa,
+ debug_info,
offset,
prev_padding_size,
dbg_info_buf,
@@ -1678,7 +1688,7 @@ pub fn writeDbgInfoHeader(self: *Dwarf, file: *File, module: *Module, low_pc: u6
},
.wasm => {
const wasm_file = file.cast(File.Wasm).?;
- writeDbgInfoNopsBuffered(wasm_file.debug_info.items, 0, 0, di_buf.items, jmp_amt, false);
+ try writeDbgInfoNopsToArrayList(self.allocator, &wasm_file.debug_info, 0, 0, di_buf.items, jmp_amt, false);
},
else => unreachable,
}
@@ -1884,35 +1894,25 @@ fn pwriteDbgInfoNops(
try file.pwritevAll(vecs[0..vec_index], offset - prev_padding_size);
}
-fn writeDbgInfoNopsBuffered(
- buf: []u8,
+fn writeDbgInfoNopsToArrayList(
+ gpa: Allocator,
+ buffer: *std.ArrayListUnmanaged(u8),
offset: u32,
prev_padding_size: usize,
content: []const u8,
next_padding_size: usize,
trailing_zero: bool,
-) void {
- assert(buf.len >= content.len + prev_padding_size + next_padding_size + @boolToInt(trailing_zero));
- const tracy = trace(@src());
- defer tracy.end();
-
- {
- var padding_left = prev_padding_size;
- while (padding_left > 0) : (padding_left -= 1) {
- buf[offset - padding_left] = @enumToInt(AbbrevKind.pad1);
- }
- }
-
- mem.copy(u8, buf[offset..], content);
- {
- var padding_left = next_padding_size;
- while (padding_left > 0) : (padding_left -= 1) {
- buf[offset + content.len + padding_left] = @enumToInt(AbbrevKind.pad1);
- }
- }
+) Allocator.Error!void {
+ try buffer.resize(gpa, @maximum(
+ buffer.items.len,
+ offset + content.len + next_padding_size + 1,
+ ));
+ mem.set(u8, buffer.items[offset - prev_padding_size .. offset], @enumToInt(AbbrevKind.pad1));
+ mem.copy(u8, buffer.items[offset..], content);
+ mem.set(u8, buffer.items[offset + content.len ..][0..next_padding_size], @enumToInt(AbbrevKind.pad1));
if (trailing_zero) {
- buf[offset + content.len + next_padding_size] = 0;
+ buffer.items[offset + content.len + next_padding_size] = 0;
}
}
@@ -2249,7 +2249,9 @@ pub fn flushModule(self: *Dwarf, file: *File, module: *Module) !void {
try addDbgInfoErrorSet(arena, module, error_ty, self.target, &dbg_info_buffer);
try self.managed_atoms.append(gpa, atom);
+ log.debug("updateDeclDebugInfoAllocation in flushModule", .{});
try self.updateDeclDebugInfoAllocation(file, atom, @intCast(u32, dbg_info_buffer.items.len));
+ log.debug("writeDeclDebugInfo in flushModule", .{});
try self.writeDeclDebugInfo(file, atom, dbg_info_buffer.items);
const file_pos = blk: {
diff --git a/src/type.zig b/src/type.zig
index 9b91572427..8556526e18 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -2365,6 +2365,7 @@ pub const Type = extern union {
.@"anyframe",
.anyopaque,
.@"opaque",
+ .type_info,
=> return true,
// These are false because they are comptime-only types.
@@ -2379,7 +2380,6 @@ pub const Type = extern union {
.enum_literal,
.empty_struct,
.empty_struct_literal,
- .type_info,
.bound_fn,
// These are function *bodies*, not pointers.
// Special exceptions have to be made when emitting functions due to
@@ -2464,14 +2464,6 @@ pub const Type = extern union {
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
- if (sema_kit) |sk| {
- _ = try sk.sema.typeRequiresComptime(sk.block, sk.src, ty);
- }
- switch (struct_obj.requires_comptime) {
- .yes => return false,
- .wip, .no => if (struct_obj.known_non_opv) return true,
- .unknown => {},
- }
if (struct_obj.status == .field_types_wip) {
// In this case, we guess that hasRuntimeBits() for this type is true,
// and then later if our guess was incorrect, we emit a compile error.
@@ -3550,9 +3542,19 @@ pub const Type = extern union {
);
}
- /// Asserts the type has the bit size already resolved.
pub fn bitSize(ty: Type, target: Target) u64 {
- return switch (ty.tag()) {
+ return bitSizeAdvanced(ty, target, null) catch unreachable;
+ }
+
+ /// If you pass `sema_kit`, any recursive type resolutions will happen if
+ /// necessary, possibly returning a CompileError. Passing `null` instead asserts
+ /// the type is fully resolved, and there will be no error, guaranteed.
+ pub fn bitSizeAdvanced(
+ ty: Type,
+ target: Target,
+ sema_kit: ?Module.WipAnalysis,
+ ) Module.CompileError!u64 {
+ switch (ty.tag()) {
.fn_noreturn_no_args => unreachable, // represents machine code; not a pointer
.fn_void_no_args => unreachable, // represents machine code; not a pointer
.fn_naked_noreturn_no_args => unreachable, // represents machine code; not a pointer
@@ -3576,40 +3578,30 @@ pub const Type = extern union {
.generic_poison => unreachable,
.bound_fn => unreachable,
- .void => 0,
- .bool, .u1 => 1,
- .u8, .i8 => 8,
- .i16, .u16, .f16 => 16,
- .u29 => 29,
- .i32, .u32, .f32 => 32,
- .i64, .u64, .f64 => 64,
- .f80 => 80,
- .u128, .i128, .f128 => 128,
+ .void => return 0,
+ .bool, .u1 => return 1,
+ .u8, .i8 => return 8,
+ .i16, .u16, .f16 => return 16,
+ .u29 => return 29,
+ .i32, .u32, .f32 => return 32,
+ .i64, .u64, .f64 => return 64,
+ .f80 => return 80,
+ .u128, .i128, .f128 => return 128,
.@"struct" => {
- const field_count = ty.structFieldCount();
- if (field_count == 0) return 0;
-
- const struct_obj = ty.castTag(.@"struct").?.data;
- assert(struct_obj.haveFieldTypes());
-
- switch (struct_obj.layout) {
- .Auto, .Extern => {
- var total: u64 = 0;
- for (struct_obj.fields.values()) |field| {
- total += field.ty.bitSize(target);
- }
- return total;
- },
- .Packed => return struct_obj.packedIntegerBits(target),
+ if (sema_kit) |sk| _ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty);
+ var total: u64 = 0;
+ for (ty.structFields().values()) |field| {
+ total += try bitSizeAdvanced(field.ty, target, sema_kit);
}
+ return total;
},
.tuple, .anon_struct => {
- const tuple = ty.tupleFields();
+ if (sema_kit) |sk| _ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty);
var total: u64 = 0;
- for (tuple.types) |field_ty| {
- total += field_ty.bitSize(target);
+ for (ty.tupleFields().types) |field_ty| {
+ total += try bitSizeAdvanced(field_ty, target, sema_kit);
}
return total;
},
@@ -3617,37 +3609,35 @@ pub const Type = extern union {
.enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = ty.intTagType(&buffer);
- return int_tag_ty.bitSize(target);
+ return try bitSizeAdvanced(int_tag_ty, target, sema_kit);
},
.@"union", .union_tagged => {
+ if (sema_kit) |sk| _ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty);
const union_obj = ty.cast(Payload.Union).?.data;
-
- const fields = union_obj.fields;
- if (fields.count() == 0) return 0;
-
assert(union_obj.haveFieldTypes());
var size: u64 = 0;
- for (fields.values()) |field| {
- size = @maximum(size, field.ty.bitSize(target));
+ for (union_obj.fields.values()) |field| {
+ size = @maximum(size, try bitSizeAdvanced(field.ty, target, sema_kit));
}
return size;
},
.vector => {
const payload = ty.castTag(.vector).?.data;
- const elem_bit_size = payload.elem_type.bitSize(target);
+ const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, sema_kit);
return elem_bit_size * payload.len;
},
- .array_u8 => 8 * ty.castTag(.array_u8).?.data,
- .array_u8_sentinel_0 => 8 * (ty.castTag(.array_u8_sentinel_0).?.data + 1),
+ .array_u8 => return 8 * ty.castTag(.array_u8).?.data,
+ .array_u8_sentinel_0 => return 8 * (ty.castTag(.array_u8_sentinel_0).?.data + 1),
.array => {
const payload = ty.castTag(.array).?.data;
const elem_size = std.math.max(payload.elem_type.abiAlignment(target), payload.elem_type.abiSize(target));
if (elem_size == 0 or payload.len == 0)
- return 0;
- return (payload.len - 1) * 8 * elem_size + payload.elem_type.bitSize(target);
+ return @as(u64, 0);
+ const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, sema_kit);
+ return (payload.len - 1) * 8 * elem_size + elem_bit_size;
},
.array_sentinel => {
const payload = ty.castTag(.array_sentinel).?.data;
@@ -3655,14 +3645,15 @@ pub const Type = extern union {
payload.elem_type.abiAlignment(target),
payload.elem_type.abiSize(target),
);
- return payload.len * 8 * elem_size + payload.elem_type.bitSize(target);
+ const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, sema_kit);
+ return payload.len * 8 * elem_size + elem_bit_size;
},
.isize,
.usize,
.@"anyframe",
.anyframe_T,
- => target.cpu.arch.ptrBitWidth(),
+ => return target.cpu.arch.ptrBitWidth(),
.const_slice,
.mut_slice,
@@ -3670,7 +3661,7 @@ pub const Type = extern union {
.const_slice_u8,
.const_slice_u8_sentinel_0,
- => target.cpu.arch.ptrBitWidth() * 2,
+ => return target.cpu.arch.ptrBitWidth() * 2,
.optional_single_const_pointer,
.optional_single_mut_pointer,
@@ -3689,8 +3680,8 @@ pub const Type = extern union {
},
.pointer => switch (ty.castTag(.pointer).?.data.size) {
- .Slice => target.cpu.arch.ptrBitWidth() * 2,
- else => target.cpu.arch.ptrBitWidth(),
+ .Slice => return target.cpu.arch.ptrBitWidth() * 2,
+ else => return target.cpu.arch.ptrBitWidth(),
},
.manyptr_u8,
@@ -3716,7 +3707,7 @@ pub const Type = extern union {
.error_set_merged,
=> return 16, // TODO revisit this when we have the concept of the error tag type
- .int_signed, .int_unsigned => ty.cast(Payload.Bits).?.data,
+ .int_signed, .int_unsigned => return ty.cast(Payload.Bits).?.data,
.optional => {
var buf: Payload.ElemType = undefined;
@@ -3730,7 +3721,8 @@ pub const Type = extern union {
// field and a boolean as the second. Since the child type's abi alignment is
// guaranteed to be >= that of bool's (1 byte) the added size is exactly equal
// to the child type's ABI alignment.
- return child_type.bitSize(target) + 1;
+ const child_bit_size = try bitSizeAdvanced(child_type, target, sema_kit);
+ return child_bit_size + 1;
},
.error_union => {
@@ -3738,9 +3730,9 @@ pub const Type = extern union {
if (!payload.error_set.hasRuntimeBits() and !payload.payload.hasRuntimeBits()) {
return 0;
} else if (!payload.error_set.hasRuntimeBits()) {
- return payload.payload.bitSize(target);
+ return payload.payload.bitSizeAdvanced(target, sema_kit);
} else if (!payload.payload.hasRuntimeBits()) {
- return payload.error_set.bitSize(target);
+ return payload.error_set.bitSizeAdvanced(target, sema_kit);
}
@panic("TODO bitSize error union");
},
@@ -3757,7 +3749,7 @@ pub const Type = extern union {
.extern_options,
.type_info,
=> @panic("TODO at some point we gotta resolve builtin types"),
- };
+ }
}
pub fn isSinglePointer(self: Type) bool {
@@ -5514,6 +5506,7 @@ pub const Type = extern union {
switch (ty.tag()) {
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
+ assert(struct_obj.haveFieldTypes());
return struct_obj.fields.count();
},
.empty_struct, .empty_struct_literal => return 0,
diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig
index 220768c820..e56ea0cad5 100644
--- a/test/behavior/eval.zig
+++ b/test/behavior/eval.zig
@@ -1196,7 +1196,9 @@ test "equality of pointers to comptime const" {
}
test "storing an array of type in a field" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() void {
@@ -1221,3 +1223,32 @@ test "storing an array of type in a field" {
S.doTheTest();
}
+
+test "pass pointer to field of comptime-only type as a runtime parameter" {
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const Mixed = struct {
+ T: type,
+ x: i32,
+ };
+ const bag: Mixed = .{
+ .T = bool,
+ .x = 1234,
+ };
+
+ var ok = false;
+
+ fn doTheTest() !void {
+ foo(&bag.x);
+ try expect(ok);
+ }
+
+ fn foo(ptr: *const i32) void {
+ ok = ptr.* == 1234;
+ }
+ };
+ try S.doTheTest();
+}
diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig
index 2483bbea69..ab5adc554a 100644
--- a/test/behavior/packed-struct.zig
+++ b/test/behavior/packed-struct.zig
@@ -33,10 +33,10 @@ test "correct size of packed structs" {
}
test "flags in packed structs" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
const Flags1 = packed struct {
- // byte 0
+ // first 8 bits
b0_0: u1,
b0_1: u1,
b0_2: u1,
@@ -46,7 +46,7 @@ test "flags in packed structs" {
b0_6: u1,
b0_7: u1,
- // partial byte 1 (but not 8 bits)
+ // 7 more bits
b1_0: u1,
b1_1: u1,
b1_2: u1,
@@ -55,12 +55,12 @@ test "flags in packed structs" {
b1_5: u1,
b1_6: u1,
- // some padding to fill to size 3
+ // some padding to fill to 24 bits
_: u9,
};
- try expectEqual(3, @sizeOf(Flags1));
- try expectEqual(3 * 8, @bitSizeOf(Flags1));
+ try expectEqual(@sizeOf(u24), @sizeOf(Flags1));
+ try expectEqual(24, @bitSizeOf(Flags1));
const Flags2 = packed struct {
// byte 0
@@ -86,8 +86,8 @@ test "flags in packed structs" {
_: u10,
};
- try expectEqual(4, @sizeOf(Flags2));
- try expectEqual(8 + 7 + 10, @bitSizeOf(Flags2));
+ try expectEqual(@sizeOf(u25), @sizeOf(Flags2));
+ try expectEqual(25, @bitSizeOf(Flags2));
const Flags3 = packed struct {
// byte 0
@@ -114,30 +114,30 @@ test "flags in packed structs" {
_: u16, // it works, if the padding is 8-based
};
- try expectEqual(4, @sizeOf(Flags3));
- try expectEqual(4 * 8, @bitSizeOf(Flags3));
+ try expectEqual(@sizeOf(u32), @sizeOf(Flags3));
+ try expectEqual(32, @bitSizeOf(Flags3));
}
test "arrays in packed structs" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
const T1 = packed struct { array: [3][3]u8 };
const T2 = packed struct { array: [9]u8 };
- try expectEqual(9, @sizeOf(T1));
- try expectEqual(9 * 8, @bitSizeOf(T1));
- try expectEqual(9, @sizeOf(T2));
- try expectEqual(9 * 8, @bitSizeOf(T2));
+ try expectEqual(@sizeOf(u72), @sizeOf(T1));
+ try expectEqual(72, @bitSizeOf(T1));
+ try expectEqual(@sizeOf(u72), @sizeOf(T2));
+ try expectEqual(72, @bitSizeOf(T2));
}
test "consistent size of packed structs" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
const TxData1 = packed struct { data: u8, _23: u23, full: bool = false };
const TxData2 = packed struct { data: u9, _22: u22, full: bool = false };
const register_size_bits = 32;
- const register_size_bytes = register_size_bits / 8;
+ const register_size_bytes = @sizeOf(u32);
try expectEqual(register_size_bits, @bitSizeOf(TxData1));
try expectEqual(register_size_bytes, @sizeOf(TxData1));
@@ -151,7 +151,7 @@ test "consistent size of packed structs" {
const TxData6 = packed struct { a: u24, b: u32 };
const expectedBitSize = 56;
- const expectedByteSize = expectedBitSize / 8;
+ const expectedByteSize = @sizeOf(u56);
try expectEqual(expectedBitSize, @bitSizeOf(TxData3));
try expectEqual(expectedByteSize, @sizeOf(TxData3));
@@ -167,7 +167,12 @@ test "consistent size of packed structs" {
}
test "correct sizeOf and offsets in packed structs" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const PStruct = packed struct {
bool_a: bool,
@@ -234,11 +239,16 @@ test "correct sizeOf and offsets in packed structs" {
try expectEqual(16, @offsetOf(S, "b"));
try expectEqual(128, @bitOffsetOf(S, "b"));
- try expectEqual(20, @sizeOf(S));
+ try expectEqual(@sizeOf(u160), @sizeOf(S));
}
test "nested packed structs" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S1 = packed struct { a: u8, b: u8, c: u8 };
@@ -248,7 +258,7 @@ test "nested packed structs" {
const S3Padded = packed struct { s3: S3, pad: u16 };
try expectEqual(48, @bitSizeOf(S3));
- try expectEqual(6, @sizeOf(S3));
+ try expectEqual(@sizeOf(u48), @sizeOf(S3));
try expectEqual(3, @offsetOf(S3, "y"));
try expectEqual(24, @bitOffsetOf(S3, "y"));
@@ -268,7 +278,7 @@ test "nested packed structs" {
const S6 = packed struct { a: i32, b: S4, c: i8 };
const expectedBitSize = 80;
- const expectedByteSize = expectedBitSize / 8;
+ const expectedByteSize = @sizeOf(u80);
try expectEqual(expectedBitSize, @bitSizeOf(S5));
try expectEqual(expectedByteSize, @sizeOf(S5));
try expectEqual(expectedBitSize, @bitSizeOf(S6));