From ddbdb83c865b1124487b3a00747fc5c1a67e5770 Mon Sep 17 00:00:00 2001 From: Cody Tapscott Date: Fri, 21 Oct 2022 18:56:18 -0700 Subject: stage 2: Update C types' size/alignment These updates were made by testing against the `sizeof/_Alignof` reported by Clang for all supported arch-OS-ABI combinations and correcting any discrepancies. This is bound to have a few errors (the recent long double fix for i386 Android is one example), but Clang is certainly not a bad place to start, especially for our most popular targets. --- src/type.zig | 363 +++++++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 276 insertions(+), 87 deletions(-) (limited to 'src') diff --git a/src/type.zig b/src/type.zig index a2f0bb9e8f..bcb8969484 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2892,41 +2892,24 @@ pub const Type = extern union { .anyframe_T, => return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, - .c_short => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.short.sizeInBits(target), 8) }, - .c_ushort => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.ushort.sizeInBits(target), 8) }, - .c_int => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.int.sizeInBits(target), 8) }, - .c_uint => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.uint.sizeInBits(target), 8) }, - .c_long => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.long.sizeInBits(target), 8) }, - .c_ulong => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.ulong.sizeInBits(target), 8) }, - .c_longlong => switch (target.cpu.arch) { - .i386 => switch (target.os.tag) { - .windows, .uefi => return AbiAlignmentAdvanced{ .scalar = 8 }, - else => return AbiAlignmentAdvanced{ .scalar = 4 }, - }, - else => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.longlong.sizeInBits(target), 8) }, - }, - .c_ulonglong => switch (target.cpu.arch) { - .i386 => switch (target.os.tag) { - .windows, .uefi => return AbiAlignmentAdvanced{ .scalar = 8 }, - else => return AbiAlignmentAdvanced{ .scalar = 4 }, - }, - else => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.ulonglong.sizeInBits(target), 8) }, - }, + .c_short => return AbiAlignmentAdvanced{ .scalar = CType.short.alignment(target) }, + .c_ushort => return AbiAlignmentAdvanced{ .scalar = CType.ushort.alignment(target) }, + .c_int => return AbiAlignmentAdvanced{ .scalar = CType.int.alignment(target) }, + .c_uint => return AbiAlignmentAdvanced{ .scalar = CType.uint.alignment(target) }, + .c_long => return AbiAlignmentAdvanced{ .scalar = CType.long.alignment(target) }, + .c_ulong => return AbiAlignmentAdvanced{ .scalar = CType.ulong.alignment(target) }, + .c_longlong => return AbiAlignmentAdvanced{ .scalar = CType.longlong.alignment(target) }, + .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = CType.ulonglong.alignment(target) }, + .c_longdouble => return AbiAlignmentAdvanced{ .scalar = CType.longdouble.alignment(target) }, .f16 => return AbiAlignmentAdvanced{ .scalar = 2 }, - .f32 => return AbiAlignmentAdvanced{ .scalar = 4 }, - .f64 => switch (target.cpu.arch) { - .i386 => switch (target.os.tag) { - .windows, .uefi => return AbiAlignmentAdvanced{ .scalar = 8 }, - else => return AbiAlignmentAdvanced{ .scalar = 4 }, - }, + .f32 => return AbiAlignmentAdvanced{ .scalar = CType.float.alignment(target) }, + .f64 => switch (CType.double.sizeInBits(target)) { + 64 => return AbiAlignmentAdvanced{ .scalar = CType.double.alignment(target) }, else => return AbiAlignmentAdvanced{ .scalar = 8 }, }, - .f128 => return AbiAlignmentAdvanced{ .scalar = 16 }, - - .f80 => switch (target.cpu.arch) { - .i386 => return AbiAlignmentAdvanced{ .scalar = 4 }, - .x86_64 => return AbiAlignmentAdvanced{ .scalar = 16 }, + .f80 => switch (CType.longdouble.sizeInBits(target)) { + 80 => return AbiAlignmentAdvanced{ .scalar = CType.longdouble.alignment(target) }, else => { var payload: Payload.Bits = .{ .base = .{ .tag = .int_unsigned }, @@ -2936,17 +2919,7 @@ pub const Type = extern union { return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, target) }; }, }, - .c_longdouble => switch (CType.longdouble.sizeInBits(target)) { - 16 => return AbiAlignmentAdvanced{ .scalar = abiAlignment(Type.f16, target) }, - 32 => return AbiAlignmentAdvanced{ .scalar = abiAlignment(Type.f32, target) }, - 64 => return AbiAlignmentAdvanced{ .scalar = abiAlignment(Type.f64, target) }, - 80 => if (target.cpu.arch == .i386 and target.isMinGW()) - return AbiAlignmentAdvanced{ .scalar = 4 } - else - return AbiAlignmentAdvanced{ .scalar = abiAlignment(Type.f80, target) }, - 128 => return AbiAlignmentAdvanced{ .scalar = abiAlignment(Type.f128, target) }, - else => unreachable, - }, + .f128 => return AbiAlignmentAdvanced{ .scalar = 16 }, // TODO revisit this when we have the concept of the error tag type .anyerror_void_error_union, @@ -3411,16 +3384,8 @@ pub const Type = extern union { .f32 => return AbiSizeAdvanced{ .scalar = 4 }, .f64 => return AbiSizeAdvanced{ .scalar = 8 }, .f128 => return AbiSizeAdvanced{ .scalar = 16 }, - - .f80 => switch (target.cpu.arch) { - .i386 => switch (target.os.tag) { - .windows => switch (target.abi) { - .msvc => return AbiSizeAdvanced{ .scalar = 16 }, - else => return AbiSizeAdvanced{ .scalar = 12 }, - }, - else => return AbiSizeAdvanced{ .scalar = 12 }, - }, - .x86_64 => return AbiSizeAdvanced{ .scalar = 16 }, + .f80 => switch (CType.longdouble.sizeInBits(target)) { + 80 => return AbiSizeAdvanced{ .scalar = std.mem.alignForward(10, CType.longdouble.alignment(target)) }, else => { var payload: Payload.Bits = .{ .base = .{ .tag = .int_unsigned }, @@ -6654,45 +6619,80 @@ pub const CType = enum { ulonglong, longdouble, + // We don't have a `c_float`/`c_double` type in Zig, but these + // are useful for querying target-correct alignment and checking + // whether C's double is f64 or f32 + float, + double, + pub fn sizeInBits(self: CType, target: Target) u16 { switch (target.os.tag) { .freestanding, .other => switch (target.cpu.arch) { .msp430 => switch (self) { .short, .ushort, .int, .uint => return 16, - .long, .ulong => return 32, - .longlong, .ulonglong, .longdouble => return 64, + .float, .long, .ulong => return 32, + .longlong, .ulonglong, .double, .longdouble => return 64, }, .avr => switch (self) { .short, .ushort, .int, .uint => return 16, - .long, .ulong, .longdouble => return 32, + .long, .ulong, .float, .double, .longdouble => return 32, .longlong, .ulonglong => return 64, }, + .tce, .tcele => switch (self) { + .short, .ushort => return 16, + .int, .uint, .long, .ulong, .longlong, .ulonglong => return 32, + .float, .double, .longdouble => return 32, + }, + .mips64, .mips64el => switch (self) { + .short, .ushort => return 16, + .int, .uint, .float => return 32, + .long, .ulong => return if (target.abi != .gnuabin32) 64 else 32, + .longlong, .ulonglong, .double => return 64, + .longdouble => return 128, + }, + .x86_64 => switch (self) { + .short, .ushort => return 16, + .int, .uint, .float => return 32, + .long, .ulong => switch (target.abi) { + .gnux32, .muslx32 => return 32, + else => return 64, + }, + .longlong, .ulonglong, .double => return 64, + .longdouble => return 80, + }, else => switch (self) { .short, .ushort => return 16, - .int, .uint => return 32, + .int, .uint, .float => return 32, .long, .ulong => return target.cpu.arch.ptrBitWidth(), - .longlong, .ulonglong => return 64, + .longlong, .ulonglong, .double => return 64, .longdouble => switch (target.cpu.arch) { .i386 => switch (target.abi) { .android => return 64, else => return 80, }, - .x86_64 => return 80, + .powerpc, + .powerpcle, + .powerpc64, + .powerpc64le, + => switch (target.abi) { + .musl, + .musleabi, + .musleabihf, + .muslx32, + => return 64, + else => return 128, + }, + + .riscv32, .riscv64, .aarch64, .aarch64_be, .aarch64_32, .s390x, - .mips64, - .mips64el, .sparc, .sparc64, .sparcel, - .powerpc, - .powerpcle, - .powerpc64, - .powerpc64le, .wasm32, .wasm64, => return 128, @@ -6716,23 +6716,78 @@ pub const CType = enum { .fuchsia, .minix, => switch (target.cpu.arch) { + .msp430 => switch (self) { + .short, .ushort, .int, .uint => return 16, + .long, .ulong, .float => return 32, + .longlong, .ulonglong, .double, .longdouble => return 64, + }, .avr => switch (self) { .short, .ushort, .int, .uint => return 16, - .long, .ulong, .longdouble => return 32, + .long, .ulong, .float, .double, .longdouble => return 32, .longlong, .ulonglong => return 64, }, + .tce, .tcele => switch (self) { + .short, .ushort => return 16, + .int, .uint, .long, .ulong, .longlong, .ulonglong => return 32, + .float, .double, .longdouble => return 32, + }, + .mips64, .mips64el => switch (self) { + .short, .ushort => return 16, + .int, .uint, .float => return 32, + .long, .ulong => return if (target.abi != .gnuabin32) 64 else 32, + .longlong, .ulonglong, .double => return 64, + .longdouble => if (target.os.tag == .freebsd) return 64 else return 128, + }, + .x86_64 => switch (self) { + .short, .ushort => return 16, + .int, .uint, .float => return 32, + .long, .ulong => switch (target.abi) { + .gnux32, .muslx32 => return 32, + else => return 64, + }, + .longlong, .ulonglong, .double => return 64, + .longdouble => return 80, + }, else => switch (self) { .short, .ushort => return 16, - .int, .uint => return 32, + .int, .uint, .float => return 32, .long, .ulong => return target.cpu.arch.ptrBitWidth(), - .longlong, .ulonglong => return 64, + .longlong, .ulonglong, .double => return 64, .longdouble => switch (target.cpu.arch) { .i386 => switch (target.abi) { .android => return 64, else => return 80, }, - .x86_64 => return 80, + .powerpc, + .powerpcle, + => switch (target.abi) { + .musl, + .musleabi, + .musleabihf, + .muslx32, + => return 64, + else => switch (target.os.tag) { + .freebsd, .netbsd, .openbsd => return 64, + else => return 128, + }, + }, + + .powerpc64, + .powerpc64le, + => switch (target.abi) { + .musl, + .musleabi, + .musleabihf, + .muslx32, + => return 64, + else => switch (target.os.tag) { + .freebsd, .openbsd => return 64, + else => return 128, + }, + }, + + .riscv32, .riscv64, .aarch64, .aarch64_be, @@ -6743,10 +6798,6 @@ pub const CType = enum { .sparc, .sparc64, .sparcel, - .powerpc, - .powerpcle, - .powerpc64, - .powerpc64le, .wasm32, .wasm64, => return 128, @@ -6756,37 +6807,65 @@ pub const CType = enum { }, }, - .windows, .uefi => switch (self) { - .short, .ushort => return 16, - .int, .uint, .long, .ulong => return 32, - .longlong, .ulonglong => return 64, - .longdouble => switch (target.cpu.arch) { - .i386 => switch (target.abi) { - .gnu => return 80, + .windows, .uefi => switch (target.cpu.arch) { + .i386 => switch (self) { + .short, .ushort => return 16, + .int, .uint, .float => return 32, + .long, .ulong => return 32, + .longlong, .ulonglong, .double => return 64, + .longdouble => switch (target.abi) { + .gnu, .gnuilp32, .cygnus => return 80, else => return 64, }, - .x86_64 => switch (target.abi) { - .gnu => return 80, + }, + .x86_64 => switch (self) { + .short, .ushort => return 16, + .int, .uint, .float => return 32, + .long, .ulong => switch (target.abi) { + .cygnus => return 64, + else => return 32, + }, + .longlong, .ulonglong, .double => return 64, + .longdouble => switch (target.abi) { + .gnu, .gnuilp32, .cygnus => return 128, else => return 64, }, - else => return 64, + }, + else => switch (self) { + .short, .ushort => return 16, + .int, .uint, .float => return 32, + .long, .ulong => return 32, + .longlong, .ulonglong, .double => return 64, + .longdouble => return 64, }, }, .macos, .ios, .tvos, .watchos => switch (self) { .short, .ushort => return 16, - .int, .uint => return 32, - .long, .ulong, .longlong, .ulonglong => return 64, + .int, .uint, .float => return 32, + .long, .ulong => switch (target.cpu.arch) { + .i386, .arm, .aarch64_32 => return 32, + .x86_64 => switch (target.abi) { + .gnux32, .muslx32 => return 32, + else => return 64, + }, + else => return 64, + }, + .longlong, .ulonglong, .double => return 64, .longdouble => switch (target.cpu.arch) { - .i386, .x86_64 => return 80, + .i386 => switch (target.abi) { + .android => return 64, + else => return 80, + }, + .x86_64 => return 80, else => return 64, }, }, .amdhsa, .amdpal => switch (self) { .short, .ushort => return 16, - .int, .uint => return 32, - .long, .ulong, .longlong, .ulonglong => return 64, + .int, .uint, .float => return 32, + .long, .ulong, .longlong, .ulonglong, .double => return 64, .longdouble => return 128, }, @@ -6814,4 +6893,114 @@ pub const CType = enum { => @panic("TODO specify the C integer and float type sizes for this OS"), } } + + pub fn alignment(self: CType, target: Target) u16 { + + // Overrides for unusual alignments + switch (target.cpu.arch) { + .avr => switch (self) { + .short, .ushort => return 2, + else => return 1, + }, + .i386 => switch (target.os.tag) { + .windows, .uefi => switch (self) { + .longlong, .ulonglong, .double => return 8, + .longdouble => switch (target.abi) { + .gnu, .gnuilp32, .cygnus => return 4, + else => return 8, + }, + else => {}, + }, + else => {}, + }, + else => {}, + } + + // Self-aligned, up to a maximum. + return @min( + std.math.ceilPowerOfTwoAssert(u16, (self.sizeInBits(target) + 7) / 8), + switch (target.cpu.arch) { + .arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) { + .netbsd => switch (target.abi) { + .gnueabi, + .gnueabihf, + .eabi, + .eabihf, + .android, + .musleabi, + .musleabihf, + => 8, + + else => @as(u16, 4), + }, + .ios, .tvos, .watchos => 4, + else => 8, + }, + + .msp430, + .avr, + => 2, + + .arc, + .csky, + .i386, + .xcore, + .dxil, + .loongarch32, + .tce, + .tcele, + .le32, + .amdil, + .hsail, + .spir, + .spirv32, + .kalimba, + .shave, + .renderscript32, + .ve, + .spu_2, + => 4, + + .aarch64_32, + .amdgcn, + .amdil64, + .bpfel, + .bpfeb, + .hexagon, + .hsail64, + .loongarch64, + .m68k, + .mips, + .mipsel, + .sparc, + .sparcel, + .sparc64, + .lanai, + .le64, + .nvptx, + .nvptx64, + .r600, + .s390x, + .spir64, + .spirv64, + .renderscript64, + => 8, + + .aarch64, + .aarch64_be, + .mips64, + .mips64el, + .powerpc, + .powerpcle, + .powerpc64, + .powerpc64le, + .riscv32, + .riscv64, + .x86_64, + .wasm32, + .wasm64, + => 16, + }, + ); + } }; -- cgit v1.2.3 From f0e66ac4d0e6347bf1b5a00b309fafb5da84191b Mon Sep 17 00:00:00 2001 From: Cody Tapscott Date: Fri, 21 Oct 2022 19:20:58 -0700 Subject: std.Target: Remove `longDoubleIs` This function is redundant with CType.sizeInBits(), and until the previous commit they disagreed about the correct long double type for several targets. Although they're all synced up now, it's much simpler just to have a single source of truth. --- lib/std/target.zig | 86 ---------------------------------------------------- src/codegen/llvm.zig | 4 +-- 2 files changed, 2 insertions(+), 88 deletions(-) (limited to 'src') diff --git a/lib/std/target.zig b/lib/std/target.zig index 99a137b4b9..7121c1c3e2 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -1780,92 +1780,6 @@ pub const Target = struct { }; } - pub inline fn longDoubleIs(target: Target, comptime F: type) bool { - switch (target.os.tag) { - .windows, .uefi => switch (target.abi) { - .gnu, .gnuilp32, .cygnus => switch (target.cpu.arch) { - .i386 => return F == f80, - .x86_64 => return F == f128, - else => return F == f64, - }, - else => return F == f64, - }, - else => {}, - } - - if (target.abi == .android and target.cpu.arch == .i386) - return F == f64; - - switch (target.cpu.arch) { - .aarch64, - .aarch64_be, - .aarch64_32, - => switch (target.os.tag) { - // According to Apple's official guide: - // > The long double type is a double precision IEEE754 binary floating-point type, - // > which makes it identical to the double type. This behavior contrasts to the - // > standard specification, in which a long double is a quad-precision, IEEE754 - // > binary, floating-point type. - // https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms - .ios, .macos, .watchos, .tvos => return F == f64, - .windows, .uefi => return F == f64, - else => return F == f128, - }, - - .i386 => return F == f80, - .x86_64 => return F == f80, - - .mips64, - .mips64el, - => switch (target.os.tag) { - .freebsd => return F == f64, - else => return F == f128, - }, - - .powerpc, - .powerpcle, - => switch (target.abi) { - .musl, - .musleabi, - .musleabihf, - .muslx32, - => return F == f64, - else => switch (target.os.tag) { - .freebsd, .netbsd, .openbsd => return F == f64, - else => return F == f128, - }, - }, - - .powerpc64, - .powerpc64le, - => switch (target.abi) { - .musl, - .musleabi, - .musleabihf, - .muslx32, - => return F == f64, - else => switch (target.os.tag) { - .freebsd, .openbsd => return F == f64, - else => return F == f128, - }, - }, - - .riscv32, - .riscv64, - .s390x, - .sparc, - .sparc64, - .sparcel, - .wasm32, - .wasm64, - => return F == f128, - - .avr, .tce, .tcele => return F == f32, - - else => return F == f64, - } - } - pub inline fn maxIntAlignment(target: Target) u16 { return switch (target.cpu.arch) { .avr => 1, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index b0d1588007..68e969f9e7 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -10615,8 +10615,8 @@ fn backendSupportsF128(target: std.Target) bool { fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool { return switch (scalar_ty.tag()) { .f16 => backendSupportsF16(target), - .f80 => target.longDoubleIs(f80) and backendSupportsF80(target), - .f128 => target.longDoubleIs(f128) and backendSupportsF128(target), + .f80 => (CType.longdouble.sizeInBits(target) == 80) and backendSupportsF80(target), + .f128 => (CType.longdouble.sizeInBits(target) == 128) and backendSupportsF128(target), else => true, }; } -- cgit v1.2.3 From 94945864b9d8ffa7b707432fb877ae42e383db68 Mon Sep 17 00:00:00 2001 From: Cody Tapscott Date: Fri, 21 Oct 2022 20:16:00 -0700 Subject: Type.zig: Add `nvcl`/`cuda` CType definitions --- src/type.zig | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/type.zig b/src/type.zig index bcb8969484..51b326e18e 100644 --- a/src/type.zig +++ b/src/type.zig @@ -6862,6 +6862,18 @@ pub const CType = enum { }, }, + .nvcl, .cuda => switch (self) { + .short, .ushort => return 16, + .int, .uint, .float => return 32, + .long, .ulong => switch (target.cpu.arch) { + .nvptx => return 32, + .nvptx64 => return 64, + else => return 64, + }, + .longlong, .ulonglong, .double => return 64, + .longdouble => return 64, + }, + .amdhsa, .amdpal => switch (self) { .short, .ushort => return 16, .int, .uint, .float => return 32, @@ -6876,8 +6888,6 @@ pub const CType = enum { .rtems, .nacl, .aix, - .cuda, - .nvcl, .ps4, .ps5, .elfiamcu, -- cgit v1.2.3 From c96f85852ed2e1d5b2ecb43770a3c41d7f38f284 Mon Sep 17 00:00:00 2001 From: Cody Tapscott Date: Sat, 22 Oct 2022 17:25:02 -0700 Subject: CType: Add `preferredAlignment` This value corresponds to clang/gcc's `__alignof` (rather than `_Alignof` which reports the minimum alignment). We don't use this information yet, but it might be useful for implementing ABIs so it is included here. --- src/type.zig | 128 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 127 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/type.zig b/src/type.zig index 51b326e18e..d78e2909b5 100644 --- a/src/type.zig +++ b/src/type.zig @@ -6926,7 +6926,7 @@ pub const CType = enum { else => {}, } - // Self-aligned, up to a maximum. + // Next-power-of-two-aligned, up to a maximum. return @min( std.math.ceilPowerOfTwoAssert(u16, (self.sizeInBits(target) + 7) / 8), switch (target.cpu.arch) { @@ -7013,4 +7013,130 @@ pub const CType = enum { }, ); } + + pub fn preferredAlignment(self: CType, target: Target) u16 { + + // Overrides for unusual alignments + switch (target.cpu.arch) { + .arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) { + .netbsd => switch (target.abi) { + .gnueabi, + .gnueabihf, + .eabi, + .eabihf, + .android, + .musleabi, + .musleabihf, + => {}, + + else => switch (self) { + .longdouble => return 4, + else => {}, + }, + }, + .ios, .tvos, .watchos => switch (self) { + .longdouble => return 4, + else => {}, + }, + else => {}, + }, + .arc => switch (self) { + .longdouble => return 4, + else => {}, + }, + .avr => switch (self) { + .int, .uint, .long, .ulong, .float, .longdouble => return 1, + .short, .ushort => return 2, + .double => return 4, + .longlong, .ulonglong => return 8, + }, + .i386 => switch (target.os.tag) { + .windows, .uefi => switch (self) { + .longdouble => switch (target.abi) { + .gnu, .gnuilp32, .cygnus => return 4, + else => return 8, + }, + else => {}, + }, + else => switch (self) { + .longdouble => return 4, + else => {}, + }, + }, + else => {}, + } + + // Next-power-of-two-aligned, up to a maximum. + return @min( + std.math.ceilPowerOfTwoAssert(u16, (self.sizeInBits(target) + 7) / 8), + switch (target.cpu.arch) { + .msp430 => @as(u16, 2), + + .csky, + .xcore, + .dxil, + .loongarch32, + .tce, + .tcele, + .le32, + .amdil, + .hsail, + .spir, + .spirv32, + .kalimba, + .shave, + .renderscript32, + .ve, + .spu_2, + => 4, + + .arc, + .arm, + .armeb, + .avr, + .thumb, + .thumbeb, + .aarch64_32, + .amdgcn, + .amdil64, + .bpfel, + .bpfeb, + .hexagon, + .hsail64, + .i386, + .loongarch64, + .m68k, + .mips, + .mipsel, + .sparc, + .sparcel, + .sparc64, + .lanai, + .le64, + .nvptx, + .nvptx64, + .r600, + .s390x, + .spir64, + .spirv64, + .renderscript64, + => 8, + + .aarch64, + .aarch64_be, + .mips64, + .mips64el, + .powerpc, + .powerpcle, + .powerpc64, + .powerpc64le, + .riscv32, + .riscv64, + .x86_64, + .wasm32, + .wasm64, + => 16, + }, + ); + } }; -- cgit v1.2.3 From 4ecc384f99e5f4c5a320714484866fb48699245f Mon Sep 17 00:00:00 2001 From: Cody Tapscott Date: Sun, 23 Oct 2022 23:17:23 -0700 Subject: Fix long double on x86_64-windows The larger alignment on this platform means that long double reports a sizeof 16 bytes, but it's underlying size is really just the 10 bytes of `f80` C doesn't give us a way to see the "underlying" size of a type, so this has to be caught by hand or by monitoring runtime memory. Luckily, x86 and x86-64 are the only platforms that seem to use a non-power-of-two type like this. --- src/type.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/type.zig b/src/type.zig index d78e2909b5..8f2cd7c54d 100644 --- a/src/type.zig +++ b/src/type.zig @@ -6827,7 +6827,7 @@ pub const CType = enum { }, .longlong, .ulonglong, .double => return 64, .longdouble => switch (target.abi) { - .gnu, .gnuilp32, .cygnus => return 128, + .gnu, .gnuilp32, .cygnus => return 80, else => return 64, }, }, -- cgit v1.2.3 From 98285b17b3887de37b630da66f09a44f42ddbe01 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 25 Oct 2022 11:46:42 +0200 Subject: darwin: bump max macOS version to 13.0 --- lib/std/target.zig | 4 ++-- src/target.zig | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/lib/std/target.zig b/lib/std/target.zig index d791e3b035..7fbad5baa3 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -277,13 +277,13 @@ pub const Target = struct { .aarch64 => VersionRange{ .semver = .{ .min = .{ .major = 11, .minor = 6, .patch = 6 }, - .max = .{ .major = 12, .minor = 4 }, + .max = .{ .major = 13, .minor = 0 }, }, }, .x86_64 => VersionRange{ .semver = .{ .min = .{ .major = 10, .minor = 15, .patch = 7 }, - .max = .{ .major = 12, .minor = 4 }, + .max = .{ .major = 13, .minor = 0 }, }, }, else => unreachable, diff --git a/src/target.zig b/src/target.zig index 9e2d26dac6..fc585912c4 100644 --- a/src/target.zig +++ b/src/target.zig @@ -18,6 +18,7 @@ pub const available_libcs = [_]ArchOsAbi{ .{ .arch = .aarch64, .os = .windows, .abi = .gnu }, .{ .arch = .aarch64, .os = .macos, .abi = .none, .os_ver = .{ .major = 11, .minor = 0 } }, .{ .arch = .aarch64, .os = .macos, .abi = .none, .os_ver = .{ .major = 12, .minor = 0 } }, + .{ .arch = .aarch64, .os = .macos, .abi = .none, .os_ver = .{ .major = 13, .minor = 0 } }, .{ .arch = .armeb, .os = .linux, .abi = .gnueabi }, .{ .arch = .armeb, .os = .linux, .abi = .gnueabihf }, .{ .arch = .armeb, .os = .linux, .abi = .musleabi }, @@ -73,6 +74,7 @@ pub const available_libcs = [_]ArchOsAbi{ .{ .arch = .x86_64, .os = .macos, .abi = .none, .os_ver = .{ .major = 10, .minor = 0 } }, .{ .arch = .x86_64, .os = .macos, .abi = .none, .os_ver = .{ .major = 11, .minor = 0 } }, .{ .arch = .x86_64, .os = .macos, .abi = .none, .os_ver = .{ .major = 12, .minor = 0 } }, + .{ .arch = .x86_64, .os = .macos, .abi = .none, .os_ver = .{ .major = 13, .minor = 0 } }, }; pub fn libCGenericName(target: std.Target) [:0]const u8 { -- cgit v1.2.3 From 85b669d497641de383070353d50a6e4fd30abd49 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Wed, 19 Oct 2022 21:48:21 +0200 Subject: wasm-linker: validate feature compatibility Verifies disallowed and used/required features. After verifying, all errors will be emit to notify the user about incompatible features. When the user did not define any featureset, we infer the features from the linked objects instead. --- src/link/Wasm.zig | 107 ++++++++++++++++++++++++++++++++++++++++++++++++ src/link/Wasm/types.zig | 24 +++++------ 2 files changed, 117 insertions(+), 14 deletions(-) (limited to 'src') diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 4c3de84e01..ed07756345 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -651,6 +651,112 @@ fn resolveSymbolsInArchives(wasm: *Wasm) !void { } } +fn validateFeatures(wasm: *const Wasm, arena: Allocator) !void { + const cpu_features = wasm.base.options.target.cpu.features; + const infer = cpu_features.isEmpty(); // when the user did not define any features, we infer them from linked objects. + var allowed = std.AutoHashMap(std.Target.wasm.Feature, void).init(arena); + var used = std.AutoArrayHashMap(std.Target.wasm.Feature, []const u8).init(arena); + var disallowed = std.AutoHashMap(std.Target.wasm.Feature, []const u8).init(arena); + var required = std.AutoHashMap(std.Target.wasm.Feature, []const u8).init(arena); + + // when false, we fail linking. We only verify this after a loop to catch all invalid features. + var valid_feature_set = true; + + // When the user has given an explicit list of features to enable, + // we extract them and insert each into the 'allowed' list. + if (!infer) { + try allowed.ensureUnusedCapacity(std.Target.wasm.all_features.len); + // std.builtin.Type.EnumField + inline for (@typeInfo(std.Target.wasm.Feature).Enum.fields) |feature_field| { + if (cpu_features.isEnabled(feature_field.value)) { + allowed.putAssumeCapacityNoClobber(@intToEnum(std.Target.wasm.Feature, feature_field.value), {}); + } + } + } + + // extract all the used, disallowed and required features from each + // linked object file so we can test them. + for (wasm.objects.items) |object| { + for (object.features) |feature| { + switch (feature.prefix) { + .used => { + const gop = try used.getOrPut(feature.tag); + if (!gop.found_existing) { + gop.value_ptr.* = object.name; + } + }, + .disallowed => { + const gop = try disallowed.getOrPut(feature.tag); + if (!gop.found_existing) { + gop.value_ptr.* = object.name; + } + }, + .required => { + const gop = try required.getOrPut(feature.tag); + if (!gop.found_existing) { + gop.value_ptr.* = object.name; + } + const used_gop = try used.getOrPut(feature.tag); + if (!used_gop.found_existing) { + used_gop.value_ptr.* = object.name; + } + }, + } + } + } + + // when we infer the features, we allow each feature found in the 'used' set + // and insert it into the 'allowed' set. When features are not inferred, + // we validate that a used feature is allowed. + if (infer) try allowed.ensureUnusedCapacity(@intCast(u32, used.count())); + for (used.keys()) |used_feature, used_index| { + if (infer) { + allowed.putAssumeCapacityNoClobber(used_feature, {}); + } else if (!allowed.contains(used_feature)) { + log.err("feature '{s}' not allowed, but used by linked object", .{@tagName(used_feature)}); + log.err(" defined in '{s}'", .{used.values()[used_index]}); + valid_feature_set = false; + } + } + + if (!valid_feature_set) { + return error.InvalidFeatureSet; + } + + // For each linked object, validate the required and disallowed features + for (wasm.objects.items) |object| { + var object_used_features = std.AutoHashMap(std.Target.wasm.Feature, void).init(arena); + try object_used_features.ensureTotalCapacity(@intCast(u32, object.features.len)); + for (object.features) |feature| { + if (feature.prefix == .disallowed) continue; // already defined in 'disallowed' set. + // from here a feature is always used + if (disallowed.get(feature.tag)) |disallowed_object_name| { + log.err("feature '{s}' is disallowed, but used by linked object", .{@tagName(feature.tag)}); + log.err(" disallowed by '{s}'", .{disallowed_object_name}); + log.err(" used in '{s}'", .{object.name}); + valid_feature_set = false; + } + + object_used_features.putAssumeCapacity(feature.tag, {}); + } + + // validate the linked object file has each required feature + var required_it = required.iterator(); + while (required_it.next()) |required_feature| { + if (!object_used_features.contains(required_feature.key_ptr.*)) { + log.err("feature '{s}' is required but not used in linked object", .{@tagName(required_feature.key_ptr.*)}); + log.err(" required by '{s}'", .{required_feature.value_ptr.*}); + log.err(" missing in '{s}'", .{object.name}); + valid_feature_set = false; + } + } + } + + if (!valid_feature_set) { + return error.InvalidFeatureSet; + } +} + fn checkUndefinedSymbols(wasm: *const Wasm) !void { if (wasm.base.options.output_mode == .Obj) return; @@ -2158,6 +2264,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod try wasm.resolveSymbolsInObject(@intCast(u16, object_index)); } + try wasm.validateFeatures(arena); try wasm.resolveSymbolsInArchives(); try wasm.checkUndefinedSymbols(); diff --git a/src/link/Wasm/types.zig b/src/link/Wasm/types.zig index 2006fe1812..5e071c2f20 100644 --- a/src/link/Wasm/types.zig +++ b/src/link/Wasm/types.zig @@ -183,18 +183,7 @@ pub const Feature = struct { /// Type of the feature, must be unique in the sequence of features. tag: Tag, - pub const Tag = enum { - atomics, - bulk_memory, - exception_handling, - multivalue, - mutable_globals, - nontrapping_fptoint, - sign_ext, - simd128, - tail_call, - shared_mem, - }; + pub const Tag = std.Target.wasm.Feature; pub const Prefix = enum(u8) { used = '+', @@ -204,13 +193,18 @@ pub const Feature = struct { pub fn toString(feature: Feature) []const u8 { return switch (feature.tag) { + .atomics => "atomics", .bulk_memory => "bulk-memory", .exception_handling => "exception-handling", + .extended_const => "extended-const", + .multivalue => "multivalue", .mutable_globals => "mutable-globals", .nontrapping_fptoint => "nontrapping-fptoint", + .reference_types => "reference-types", + .relaxed_simd => "relaxed-simd", .sign_ext => "sign-ext", + .simd128 => "simd128", .tail_call => "tail-call", - else => @tagName(feature), }; } @@ -225,11 +219,13 @@ pub const known_features = std.ComptimeStringMap(Feature.Tag, .{ .{ "atomics", .atomics }, .{ "bulk-memory", .bulk_memory }, .{ "exception-handling", .exception_handling }, + .{ "extended-const", .extended_const }, .{ "multivalue", .multivalue }, .{ "mutable-globals", .mutable_globals }, .{ "nontrapping-fptoint", .nontrapping_fptoint }, + .{ "reference-types", .reference_types }, + .{ "relaxed-simd", .relaxed_simd }, .{ "sign-ext", .sign_ext }, .{ "simd128", .simd128 }, .{ "tail-call", .tail_call }, - .{ "shared-mem", .shared_mem }, }); -- cgit v1.2.3 From 777bcbf96871a0250664b9cabdea5dbf51e0e64d Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Thu, 20 Oct 2022 16:58:00 +0200 Subject: wasm-linker: emit `target_features` section When the result is not being stripped, we emit the `target_features` section based on all the used features. This includes features inferred from linked object files. Considering we know all possible features upfront, we can use an array and therefore do not have to dynamically allocate memory. Using this trick we can also easily order all features based the same ordering as found in `std.Target.wasm` which is the same ordering used by LLVM and the like. --- src/link/Wasm.zig | 47 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index ed07756345..da9a878720 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -651,7 +651,12 @@ fn resolveSymbolsInArchives(wasm: *Wasm) !void { } } -fn validateFeatures(wasm: *const Wasm, arena: Allocator) !void { +fn validateFeatures( + wasm: *const Wasm, + arena: Allocator, + to_emit: *[@typeInfo(std.Target.wasm.Feature).Enum.fields.len]bool, + emit_features_count: *u32, +) !void { const cpu_features = wasm.base.options.target.cpu.features; const infer = cpu_features.isEmpty(); // when the user did not define any features, we infer them from linked objects. var allowed = std.AutoHashMap(std.Target.wasm.Feature, void).init(arena); @@ -755,6 +760,13 @@ fn validateFeatures(wasm: *const Wasm, arena: Allocator) !void { if (!valid_feature_set) { return error.InvalidFeatureSet; } + + if (allowed.count() > 0) { + emit_features_count.* = allowed.count(); + for (to_emit) |*feature_enabled, feature_index| { + feature_enabled.* = allowed.contains(@intToEnum(std.Target.wasm.Feature, feature_index)); + } + } } fn checkUndefinedSymbols(wasm: *const Wasm) !void { @@ -2264,7 +2276,9 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod try wasm.resolveSymbolsInObject(@intCast(u16, object_index)); } - try wasm.validateFeatures(arena); + var emit_features_count: u32 = 0; + var enabled_features: [@typeInfo(std.Target.wasm.Feature).Enum.fields.len]bool = undefined; + try wasm.validateFeatures(arena, &enabled_features, &emit_features_count); try wasm.resolveSymbolsInArchives(); try wasm.checkUndefinedSymbols(); @@ -2710,6 +2724,9 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod } try emitProducerSection(&binary_bytes); + if (emit_features_count > 0) { + try emitFeaturesSection(&binary_bytes, &enabled_features, emit_features_count); + } } // Only when writing all sections executed properly we write the magic @@ -2802,6 +2819,32 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void { ); } +fn emitFeaturesSection(binary_bytes: *std.ArrayList(u8), enabled_features: []const bool, features_count: u32) !void { + const header_offset = try reserveCustomSectionHeader(binary_bytes); + + const writer = binary_bytes.writer(); + const target_features = "target_features"; + try leb.writeULEB128(writer, @intCast(u32, target_features.len)); + try writer.writeAll(target_features); + + try leb.writeULEB128(writer, features_count); + for (enabled_features) |enabled, feature_index| { + if (enabled) { + const feature: types.Feature = .{ .prefix = .used, .tag = @intToEnum(types.Feature.Tag, feature_index) }; + try leb.writeULEB128(writer, @enumToInt(feature.prefix)); + const string = feature.toString(); + try leb.writeULEB128(writer, @intCast(u32, string.len)); + try writer.writeAll(string); + } + } + + try writeCustomSectionHeader( + binary_bytes.items, + header_offset, + @intCast(u32, binary_bytes.items.len - header_offset - 6), + ); +} + fn emitNameSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), arena: std.mem.Allocator) !void { const Name = struct { index: u32, -- cgit v1.2.3 From 3d1d19f3877190db42544acf9e0ed26784ba82ba Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Sun, 23 Oct 2022 20:02:25 +0200 Subject: wasm-linker: seperate linker -and cpu features The list of features a Wasm object/binary file can emit can differ from the list of cpu features. The reason for this is because the "target_features" section also contains linker features. An example of this is the "shared-mem" feature, which is a feature for the linker and not that of the cpu target as defined by LLVM. --- src/link/Wasm.zig | 18 +++++++++--------- src/link/Wasm/types.zig | 24 +++++++++++++++++++++++- 2 files changed, 32 insertions(+), 10 deletions(-) (limited to 'src') diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index da9a878720..4b6895c4e5 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -654,15 +654,15 @@ fn resolveSymbolsInArchives(wasm: *Wasm) !void { fn validateFeatures( wasm: *const Wasm, arena: Allocator, - to_emit: *[@typeInfo(std.Target.wasm.Feature).Enum.fields.len]bool, + to_emit: *[@typeInfo(types.Feature.Tag).Enum.fields.len]bool, emit_features_count: *u32, ) !void { const cpu_features = wasm.base.options.target.cpu.features; const infer = cpu_features.isEmpty(); // when the user did not define any features, we infer them from linked objects. - var allowed = std.AutoHashMap(std.Target.wasm.Feature, void).init(arena); - var used = std.AutoArrayHashMap(std.Target.wasm.Feature, []const u8).init(arena); - var disallowed = std.AutoHashMap(std.Target.wasm.Feature, []const u8).init(arena); - var required = std.AutoHashMap(std.Target.wasm.Feature, []const u8).init(arena); + var allowed = std.AutoHashMap(types.Feature.Tag, void).init(arena); + var used = std.AutoArrayHashMap(types.Feature.Tag, []const u8).init(arena); + var disallowed = std.AutoHashMap(types.Feature.Tag, []const u8).init(arena); + var required = std.AutoHashMap(types.Feature.Tag, []const u8).init(arena); // when false, we fail linking. We only verify this after a loop to catch all invalid features. var valid_feature_set = true; @@ -674,7 +674,7 @@ fn validateFeatures( // std.builtin.Type.EnumField inline for (@typeInfo(std.Target.wasm.Feature).Enum.fields) |feature_field| { if (cpu_features.isEnabled(feature_field.value)) { - allowed.putAssumeCapacityNoClobber(@intToEnum(std.Target.wasm.Feature, feature_field.value), {}); + allowed.putAssumeCapacityNoClobber(@intToEnum(types.Feature.Tag, feature_field.value), {}); } } } @@ -730,7 +730,7 @@ fn validateFeatures( // For each linked object, validate the required and disallowed features for (wasm.objects.items) |object| { - var object_used_features = std.AutoHashMap(std.Target.wasm.Feature, void).init(arena); + var object_used_features = std.AutoHashMap(types.Feature.Tag, void).init(arena); try object_used_features.ensureTotalCapacity(@intCast(u32, object.features.len)); for (object.features) |feature| { if (feature.prefix == .disallowed) continue; // already defined in 'disallowed' set. @@ -764,7 +764,7 @@ fn validateFeatures( if (allowed.count() > 0) { emit_features_count.* = allowed.count(); for (to_emit) |*feature_enabled, feature_index| { - feature_enabled.* = allowed.contains(@intToEnum(std.Target.wasm.Feature, feature_index)); + feature_enabled.* = allowed.contains(@intToEnum(types.Feature.Tag, feature_index)); } } } @@ -2277,7 +2277,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod } var emit_features_count: u32 = 0; - var enabled_features: [@typeInfo(std.Target.wasm.Feature).Enum.fields.len]bool = undefined; + var enabled_features: [@typeInfo(types.Feature.Tag).Enum.fields.len]bool = undefined; try wasm.validateFeatures(arena, &enabled_features, &emit_features_count); try wasm.resolveSymbolsInArchives(); try wasm.checkUndefinedSymbols(); diff --git a/src/link/Wasm/types.zig b/src/link/Wasm/types.zig index 5e071c2f20..1b6df86d37 100644 --- a/src/link/Wasm/types.zig +++ b/src/link/Wasm/types.zig @@ -183,7 +183,27 @@ pub const Feature = struct { /// Type of the feature, must be unique in the sequence of features. tag: Tag, - pub const Tag = std.Target.wasm.Feature; + /// Unlike `std.Target.wasm.Feature` this also contains linker-features such as shared-mem + pub const Tag = enum { + atomics, + bulk_memory, + exception_handling, + extended_const, + multivalue, + mutable_globals, + nontrapping_fptoint, + reference_types, + relaxed_simd, + sign_ext, + simd128, + tail_call, + shared_mem, + + /// From a given cpu feature, returns its linker feature + pub fn fromCpuFeature(feature: std.Target.wasm.Feature) Tag { + return @intToEnum(Tag, @enumToInt(feature)); + } + }; pub const Prefix = enum(u8) { used = '+', @@ -205,6 +225,7 @@ pub const Feature = struct { .sign_ext => "sign-ext", .simd128 => "simd128", .tail_call => "tail-call", + .shared_mem => "shared-mem", }; } @@ -228,4 +249,5 @@ pub const known_features = std.ComptimeStringMap(Feature.Tag, .{ .{ "sign-ext", .sign_ext }, .{ "simd128", .simd128 }, .{ "tail-call", .tail_call }, + .{ "shared-mem", .shared_mem }, }); -- cgit v1.2.3 From c0710b0c42716bb7173b9fcc2785f9bf5175ae0f Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Tue, 25 Oct 2022 21:16:51 +0200 Subject: use fixed-size arrays for feature lists Considering all possible features are known by the linker during compile-time, we can create arrays on the stack instead of dynamically allocating hash maps. We use a simple bitset to determine whether a feature is enabled or not, and from which object file it originates. This allows us to make feature validation slightly faster and use less runtime memory. In the future this could be enhanced further by having a single array instead with a more sophisticated bitset. --- src/link.zig | 1 + src/link/Wasm.zig | 85 ++++++++++++++++++++----------------------------- src/link/Wasm/types.zig | 38 +++++++++++----------- 3 files changed, 55 insertions(+), 69 deletions(-) (limited to 'src') diff --git a/src/link.zig b/src/link.zig index 9d4ac0d55b..39f51e90ec 100644 --- a/src/link.zig +++ b/src/link.zig @@ -696,6 +696,7 @@ pub const File = struct { GlobalTypeMismatch, InvalidCharacter, InvalidEntryKind, + InvalidFeatureSet, InvalidFormat, InvalidIndex, InvalidMagicByte, diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 4b6895c4e5..b9f2d74bd8 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -653,16 +653,17 @@ fn resolveSymbolsInArchives(wasm: *Wasm) !void { fn validateFeatures( wasm: *const Wasm, - arena: Allocator, to_emit: *[@typeInfo(types.Feature.Tag).Enum.fields.len]bool, emit_features_count: *u32, ) !void { const cpu_features = wasm.base.options.target.cpu.features; const infer = cpu_features.isEmpty(); // when the user did not define any features, we infer them from linked objects. - var allowed = std.AutoHashMap(types.Feature.Tag, void).init(arena); - var used = std.AutoArrayHashMap(types.Feature.Tag, []const u8).init(arena); - var disallowed = std.AutoHashMap(types.Feature.Tag, []const u8).init(arena); - var required = std.AutoHashMap(types.Feature.Tag, []const u8).init(arena); + const known_features_count = @typeInfo(types.Feature.Tag).Enum.fields.len; + + var allowed = [_]bool{false} ** known_features_count; + var used = [_]u17{0} ** known_features_count; + var disallowed = [_]u17{0} ** known_features_count; + var required = [_]u17{0} ** known_features_count; // when false, we fail linking. We only verify this after a loop to catch all invalid features. var valid_feature_set = true; @@ -670,41 +671,29 @@ fn validateFeatures( // When the user has given an explicit list of features to enable, // we extract them and insert each into the 'allowed' list. if (!infer) { - try allowed.ensureUnusedCapacity(std.Target.wasm.all_features.len); - // std.builtin.Type.EnumField inline for (@typeInfo(std.Target.wasm.Feature).Enum.fields) |feature_field| { if (cpu_features.isEnabled(feature_field.value)) { - allowed.putAssumeCapacityNoClobber(@intToEnum(types.Feature.Tag, feature_field.value), {}); + allowed[feature_field.value] = true; + emit_features_count.* += 1; } } } // extract all the used, disallowed and required features from each // linked object file so we can test them. - for (wasm.objects.items) |object| { + for (wasm.objects.items) |object, object_index| { for (object.features) |feature| { + const value = @intCast(u16, object_index) << 1 | @as(u1, 1); switch (feature.prefix) { .used => { - const gop = try used.getOrPut(feature.tag); - if (!gop.found_existing) { - gop.value_ptr.* = object.name; - } + used[@enumToInt(feature.tag)] = value; }, .disallowed => { - const gop = try disallowed.getOrPut(feature.tag); - if (!gop.found_existing) { - gop.value_ptr.* = object.name; - } + disallowed[@enumToInt(feature.tag)] = value; }, .required => { - const gop = try required.getOrPut(feature.tag); - if (!gop.found_existing) { - gop.value_ptr.* = object.name; - } - const used_gop = try used.getOrPut(feature.tag); - if (!used_gop.found_existing) { - used_gop.value_ptr.* = object.name; - } + required[@enumToInt(feature.tag)] = value; + used[@enumToInt(feature.tag)] = value; }, } } @@ -713,13 +702,14 @@ fn validateFeatures( // when we infer the features, we allow each feature found in the 'used' set // and insert it into the 'allowed' set. When features are not inferred, // we validate that a used feature is allowed. - if (infer) try allowed.ensureUnusedCapacity(@intCast(u32, used.count())); - for (used.keys()) |used_feature, used_index| { + for (used) |used_set, used_index| { + const is_enabled = @truncate(u1, used_set) != 0; if (infer) { - allowed.putAssumeCapacityNoClobber(used_feature, {}); - } else if (!allowed.contains(used_feature)) { - log.err("feature '{s}' not allowed, but used by linked object", .{@tagName(used_feature)}); - log.err(" defined in '{s}'", .{used.values()[used_index]}); + allowed[used_index] = is_enabled; + emit_features_count.* += @boolToInt(is_enabled); + } else if (is_enabled and !allowed[used_index]) { + log.err("feature '{s}' not allowed, but used by linked object", .{(@intToEnum(types.Feature.Tag, used_index)).toString()}); + log.err(" defined in '{s}'", .{wasm.objects.items[used_set >> 1].name}); valid_feature_set = false; } } @@ -730,27 +720,27 @@ fn validateFeatures( // For each linked object, validate the required and disallowed features for (wasm.objects.items) |object| { - var object_used_features = std.AutoHashMap(types.Feature.Tag, void).init(arena); - try object_used_features.ensureTotalCapacity(@intCast(u32, object.features.len)); + var object_used_features = [_]bool{false} ** known_features_count; for (object.features) |feature| { if (feature.prefix == .disallowed) continue; // already defined in 'disallowed' set. // from here a feature is always used - if (disallowed.get(feature.tag)) |disallowed_object_name| { - log.err("feature '{s}' is disallowed, but used by linked object", .{@tagName(feature.tag)}); - log.err(" disallowed by '{s}'", .{disallowed_object_name}); + const disallowed_feature = disallowed[@enumToInt(feature.tag)]; + if (@truncate(u1, disallowed_feature) != 0) { + log.err("feature '{s}' is disallowed, but used by linked object", .{feature.tag.toString()}); + log.err(" disallowed by '{s}'", .{wasm.objects.items[disallowed_feature >> 1].name}); log.err(" used in '{s}'", .{object.name}); valid_feature_set = false; } - object_used_features.putAssumeCapacity(feature.tag, {}); + object_used_features[@enumToInt(feature.tag)] = true; } // validate the linked object file has each required feature - var required_it = required.iterator(); - while (required_it.next()) |required_feature| { - if (!object_used_features.contains(required_feature.key_ptr.*)) { - log.err("feature '{s}' is required but not used in linked object", .{@tagName(required_feature.key_ptr.*)}); - log.err(" required by '{s}'", .{required_feature.value_ptr.*}); + for (required) |required_feature, feature_index| { + const is_required = @truncate(u1, required_feature) != 0; + if (is_required and !object_used_features[feature_index]) { + log.err("feature '{s}' is required but not used in linked object", .{(@intToEnum(types.Feature.Tag, feature_index)).toString()}); + log.err(" required by '{s}'", .{wasm.objects.items[required_feature >> 1].name}); log.err(" missing in '{s}'", .{object.name}); valid_feature_set = false; } @@ -761,12 +751,7 @@ fn validateFeatures( return error.InvalidFeatureSet; } - if (allowed.count() > 0) { - emit_features_count.* = allowed.count(); - for (to_emit) |*feature_enabled, feature_index| { - feature_enabled.* = allowed.contains(@intToEnum(types.Feature.Tag, feature_index)); - } - } + to_emit.* = allowed; } fn checkUndefinedSymbols(wasm: *const Wasm) !void { @@ -2278,7 +2263,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod var emit_features_count: u32 = 0; var enabled_features: [@typeInfo(types.Feature.Tag).Enum.fields.len]bool = undefined; - try wasm.validateFeatures(arena, &enabled_features, &emit_features_count); + try wasm.validateFeatures(&enabled_features, &emit_features_count); try wasm.resolveSymbolsInArchives(); try wasm.checkUndefinedSymbols(); @@ -2832,7 +2817,7 @@ fn emitFeaturesSection(binary_bytes: *std.ArrayList(u8), enabled_features: []con if (enabled) { const feature: types.Feature = .{ .prefix = .used, .tag = @intToEnum(types.Feature.Tag, feature_index) }; try leb.writeULEB128(writer, @enumToInt(feature.prefix)); - const string = feature.toString(); + const string = feature.tag.toString(); try leb.writeULEB128(writer, @intCast(u32, string.len)); try writer.writeAll(string); } diff --git a/src/link/Wasm/types.zig b/src/link/Wasm/types.zig index 1b6df86d37..a46fad4e53 100644 --- a/src/link/Wasm/types.zig +++ b/src/link/Wasm/types.zig @@ -203,6 +203,24 @@ pub const Feature = struct { pub fn fromCpuFeature(feature: std.Target.wasm.Feature) Tag { return @intToEnum(Tag, @enumToInt(feature)); } + + pub fn toString(tag: Tag) []const u8 { + return switch (tag) { + .atomics => "atomics", + .bulk_memory => "bulk-memory", + .exception_handling => "exception-handling", + .extended_const => "extended-const", + .multivalue => "multivalue", + .mutable_globals => "mutable-globals", + .nontrapping_fptoint => "nontrapping-fptoint", + .reference_types => "reference-types", + .relaxed_simd => "relaxed-simd", + .sign_ext => "sign-ext", + .simd128 => "simd128", + .tail_call => "tail-call", + .shared_mem => "shared-mem", + }; + } }; pub const Prefix = enum(u8) { @@ -211,28 +229,10 @@ pub const Feature = struct { required = '=', }; - pub fn toString(feature: Feature) []const u8 { - return switch (feature.tag) { - .atomics => "atomics", - .bulk_memory => "bulk-memory", - .exception_handling => "exception-handling", - .extended_const => "extended-const", - .multivalue => "multivalue", - .mutable_globals => "mutable-globals", - .nontrapping_fptoint => "nontrapping-fptoint", - .reference_types => "reference-types", - .relaxed_simd => "relaxed-simd", - .sign_ext => "sign-ext", - .simd128 => "simd128", - .tail_call => "tail-call", - .shared_mem => "shared-mem", - }; - } - pub fn format(feature: Feature, comptime fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void { _ = opt; _ = fmt; - try writer.print("{c} {s}", .{ feature.prefix, feature.toString() }); + try writer.print("{c} {s}", .{ feature.prefix, feature.tag.toString() }); } }; -- cgit v1.2.3 From 453a2d2fad5728ed0a7fe85dd90e26cc00b938d1 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 26 Oct 2022 08:53:33 +0200 Subject: darwin: remove 10.15.7 as available libc from src/target.zig --- src/target.zig | 1 - 1 file changed, 1 deletion(-) (limited to 'src') diff --git a/src/target.zig b/src/target.zig index fc585912c4..079d115161 100644 --- a/src/target.zig +++ b/src/target.zig @@ -71,7 +71,6 @@ pub const available_libcs = [_]ArchOsAbi{ .{ .arch = .x86_64, .os = .linux, .abi = .gnux32 }, .{ .arch = .x86_64, .os = .linux, .abi = .musl }, .{ .arch = .x86_64, .os = .windows, .abi = .gnu }, - .{ .arch = .x86_64, .os = .macos, .abi = .none, .os_ver = .{ .major = 10, .minor = 0 } }, .{ .arch = .x86_64, .os = .macos, .abi = .none, .os_ver = .{ .major = 11, .minor = 0 } }, .{ .arch = .x86_64, .os = .macos, .abi = .none, .os_ver = .{ .major = 12, .minor = 0 } }, .{ .arch = .x86_64, .os = .macos, .abi = .none, .os_ver = .{ .major = 13, .minor = 0 } }, -- cgit v1.2.3 From 9db293492bbbc5b8d70638bd9c59dea19d13596c Mon Sep 17 00:00:00 2001 From: Hadrien Dorio Date: Sat, 20 Aug 2022 21:08:02 +0000 Subject: make a .rsp file for `zig clang` same as std.build.LibExeObjStep.make() for `zig build-exe` closes #12419 --- src/Compilation.zig | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++++ test/standalone.zig | 5 +--- 2 files changed, 68 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/Compilation.zig b/src/Compilation.zig index fc71da56f3..5c3db25555 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -7,6 +7,9 @@ const Allocator = std.mem.Allocator; const assert = std.debug.assert; const log = std.log.scoped(.compilation); const Target = std.Target; +const ArrayList = std.ArrayList; +const Sha256 = std.crypto.hash.sha2.Sha256; +const fs = std.fs; const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; @@ -3915,6 +3918,70 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P } } + // Windows has an argument length limit of 32,766 characters, macOS 262,144 and Linux + // 2,097,152. If our args exceed 30 KiB, we instead write them to a "response file" and + // pass that to zig, e.g. via 'zig build-lib @args.rsp' + // See @file syntax here: https://gcc.gnu.org/onlinedocs/gcc/Overall-Options.html + var args_length: usize = 0; + for (argv.items) |arg| { + args_length += arg.len + 1; // +1 to account for null terminator + } + if (args_length >= 30 * 1024) { + const allocator = comp.gpa; + const input_args = argv.items[2..]; + const output_dir = comp.local_cache_directory; + + var args_arena = std.heap.ArenaAllocator.init(allocator); + defer args_arena.deinit(); + + const args_to_escape = input_args; + var escaped_args = try ArrayList([]const u8).initCapacity(args_arena.allocator(), args_to_escape.len); + + arg_blk: for (args_to_escape) |arg| { + for (arg) |c, arg_idx| { + if (c == '\\' or c == '"') { + // Slow path for arguments that need to be escaped. We'll need to allocate and copy + var escaped = try ArrayList(u8).initCapacity(args_arena.allocator(), arg.len + 1); + const writer = escaped.writer(); + writer.writeAll(arg[0..arg_idx]) catch unreachable; + for (arg[arg_idx..]) |to_escape| { + if (to_escape == '\\' or to_escape == '"') try writer.writeByte('\\'); + try writer.writeByte(to_escape); + } + escaped_args.appendAssumeCapacity(escaped.items); + continue :arg_blk; + } + } + escaped_args.appendAssumeCapacity(arg); // no escaping needed so just use original argument + } + + const partially_quoted = try std.mem.join(allocator, "\" \"", escaped_args.items); + const args = try std.mem.concat(allocator, u8, &[_][]const u8{ "\"", partially_quoted, "\"" }); + + // Write the args to zig-cache/args/ to avoid conflicts with + // other zig build commands running in parallel. + + var args_hash: [Sha256.digest_length]u8 = undefined; + Sha256.hash(args, &args_hash, .{}); + var args_hex_hash: [Sha256.digest_length * 2]u8 = undefined; + _ = try std.fmt.bufPrint( + &args_hex_hash, + "{s}", + .{std.fmt.fmtSliceHexLower(&args_hash)}, + ); + + const args_dir = "args"; + try output_dir.handle.makePath(args_dir); + const args_file = try fs.path.join(allocator, &[_][]const u8{ + args_dir, args_hex_hash[0..], + }); + try output_dir.handle.writeFile(args_file, args); + const args_file_path = try output_dir.handle.realpathAlloc(allocator, args_file); + + argv.shrinkRetainingCapacity(2); + try argv.append(try std.mem.concat(allocator, u8, &[_][]const u8{ "@", args_file_path })); + } + if (comp.verbose_cc) { dump_argv(argv.items); } diff --git a/test/standalone.zig b/test/standalone.zig index f0567943b6..c945fe6bb6 100644 --- a/test/standalone.zig +++ b/test/standalone.zig @@ -37,10 +37,7 @@ pub fn addCases(cases: *tests.StandaloneContext) void { if (builtin.zig_backend == .stage1) { // https://github.com/ziglang/zig/issues/12194 cases.addBuildFile("test/standalone/issue_9812/build.zig", .{}); } - if (builtin.os.tag != .windows) { - // https://github.com/ziglang/zig/issues/12419 - cases.addBuildFile("test/standalone/issue_11595/build.zig", .{}); - } + cases.addBuildFile("test/standalone/issue_11595/build.zig", .{}); if (builtin.os.tag != .wasi) { cases.addBuildFile("test/standalone/load_dynamic_library/build.zig", .{}); } -- cgit v1.2.3 From db799ae628cc3a131c34f8ae4b7422d7e9520cff Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Mon, 24 Oct 2022 22:42:01 +0300 Subject: Module: mitigate generic deletion bug --- src/Module.zig | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/Module.zig b/src/Module.zig index 4f150b0148..9a395d72c4 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -3367,6 +3367,8 @@ pub fn deinit(mod: *Module) void { for (mod.import_table.keys()) |key| { gpa.free(key); } + var failed_decls = mod.failed_decls; + mod.failed_decls = .{}; for (mod.import_table.values()) |value| { value.destroy(mod); } @@ -3406,10 +3408,10 @@ pub fn deinit(mod: *Module) void { mod.local_zir_cache.handle.close(); mod.global_zir_cache.handle.close(); - for (mod.failed_decls.values()) |value| { + for (failed_decls.values()) |value| { value.destroy(gpa); } - mod.failed_decls.deinit(gpa); + failed_decls.deinit(gpa); if (mod.emit_h) |emit_h| { for (emit_h.failed_decls.values()) |value| { @@ -3482,6 +3484,13 @@ pub fn deinit(mod: *Module) void { pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { const gpa = mod.gpa; { + if (mod.failed_decls.contains(decl_index)) { + blk: { + const errs = mod.comp.getAllErrorsAlloc() catch break :blk; + for (errs.list) |err| Compilation.AllErrors.Message.renderToStdErr(err, .no_color); + } + @panic("Zig compiler bug: attempted to destroy declaration with an attached error"); + } const decl = mod.declPtr(decl_index); log.debug("destroy {*} ({s})", .{ decl, decl.name }); _ = mod.test_functions.swapRemove(decl_index); -- cgit v1.2.3 From b12a5cea7559c4cd4666f142b3e05265b07fe323 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Thu, 27 Oct 2022 01:00:33 +0300 Subject: remove test case triggering generic deletion bug This test should be restored once the underlying issue is resolved (with the typo fixed). --- src/Module.zig | 1 + ..._instantiation_inherits_parent_branch_quota.zig | 30 ---------------------- 2 files changed, 1 insertion(+), 30 deletions(-) delete mode 100644 test/cases/compile_errors/generic_funciton_instantiation_inherits_parent_branch_quota.zig (limited to 'src') diff --git a/src/Module.zig b/src/Module.zig index 9a395d72c4..a8ea63ffc9 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -3489,6 +3489,7 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { const errs = mod.comp.getAllErrorsAlloc() catch break :blk; for (errs.list) |err| Compilation.AllErrors.Message.renderToStdErr(err, .no_color); } + // TODO restore test case triggering this panic @panic("Zig compiler bug: attempted to destroy declaration with an attached error"); } const decl = mod.declPtr(decl_index); diff --git a/test/cases/compile_errors/generic_funciton_instantiation_inherits_parent_branch_quota.zig b/test/cases/compile_errors/generic_funciton_instantiation_inherits_parent_branch_quota.zig deleted file mode 100644 index 1d45ce86db..0000000000 --- a/test/cases/compile_errors/generic_funciton_instantiation_inherits_parent_branch_quota.zig +++ /dev/null @@ -1,30 +0,0 @@ -pub export fn entry1() void { - @setEvalBranchQuota(1001); - // Return type evaluation should inherit both the - // parent's branch quota and count meaning - // at least 2002 backwards branches are required. - comptime var i = 0; - inline while (i < 1000) : (i += 1) {} - _ = simple(10); -} -pub export fn entry2() void { - @setEvalBranchQuota(2001); - comptime var i = 0; - inline while (i < 1000) : (i += 1) {} - _ = simple(10); -} -fn simple(comptime n: usize) Type(n) { - return n; -} -fn Type(comptime n: usize) type { - if (n <= 1) return usize; - return Type(n - 1); -} - -// error -// backend=stage2 -// target=native -// -// :21:16: error: evaluation exceeded 1001 backwards branches -// :21:16: note: use @setEvalBranchQuota() to raise the branch limit from 1001 -// :16:34: note: called from here -- cgit v1.2.3 From dd437ae39948031dc04836f245c8b77d459a428a Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Mon, 24 Oct 2022 14:41:22 +0300 Subject: stage2: optimize size of optional slices --- src/codegen/c.zig | 14 +++++++++----- src/codegen/llvm.zig | 10 ++++++++-- src/type.zig | 46 ++++++++-------------------------------------- test/behavior/cast.zig | 4 ++++ test/behavior/optional.zig | 16 ++++++++++++++++ 5 files changed, 45 insertions(+), 45 deletions(-) (limited to 'src') diff --git a/src/codegen/c.zig b/src/codegen/c.zig index d6584d75ae..5f6f2fd6d5 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -726,7 +726,11 @@ pub const DeclGen = struct { } if (ty.optionalReprIsPayload()) { - return dg.renderValue(writer, payload_ty, val, location); + if (val.castTag(.opt_payload)) |payload| { + return dg.renderValue(writer, payload_ty, payload.data, location); + } else { + return dg.renderValue(writer, payload_ty, val, location); + } } try writer.writeByte('('); @@ -3263,11 +3267,9 @@ fn airIsNull( try f.writeCValue(writer, operand); const ty = f.air.typeOf(un_op); + const opt_ty = if (deref_suffix[0] != 0) ty.childType() else ty; var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = if (deref_suffix[0] != 0) - ty.childType().optionalChild(&opt_buf) - else - ty.optionalChild(&opt_buf); + const payload_ty = opt_ty.optionalChild(&opt_buf); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { try writer.print("){s} {s} true;\n", .{ deref_suffix, operator }); @@ -3276,6 +3278,8 @@ fn airIsNull( try writer.print("){s} {s} NULL;\n", .{ deref_suffix, operator }); } else if (payload_ty.zigTypeTag() == .ErrorSet) { try writer.print("){s} {s} 0;\n", .{ deref_suffix, operator }); + } else if (payload_ty.isSlice() and opt_ty.optionalReprIsPayload()) { + try writer.print("){s}.ptr {s} NULL;\n", .{ deref_suffix, operator }); } else { try writer.print("){s}.is_null {s} true;\n", .{ deref_suffix, operator }); } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ffc19cb6f6..d4a94d1308 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -6316,18 +6316,24 @@ pub const FuncGen = struct { const operand_ty = self.air.typeOf(un_op); const optional_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; const optional_llvm_ty = try self.dg.lowerType(optional_ty); + var buf: Type.Payload.ElemType = undefined; + const payload_ty = optional_ty.optionalChild(&buf); if (optional_ty.optionalReprIsPayload()) { const loaded = if (operand_is_ptr) self.builder.buildLoad(optional_llvm_ty, operand, "") else operand; + if (payload_ty.isSlice()) { + const slice_ptr = self.builder.buildExtractValue(loaded, 0, ""); + var slice_buf: Type.SlicePtrFieldTypeBuffer = undefined; + const ptr_ty = try self.dg.lowerType(payload_ty.slicePtrFieldType(&slice_buf)); + return self.builder.buildICmp(pred, slice_ptr, ptr_ty.constNull(), ""); + } return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), ""); } comptime assert(optional_layout_version == 3); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { const loaded = if (operand_is_ptr) self.builder.buildLoad(optional_llvm_ty, operand, "") diff --git a/src/type.zig b/src/type.zig index a2f0bb9e8f..8904b3178d 100644 --- a/src/type.zig +++ b/src/type.zig @@ -3469,20 +3469,8 @@ pub const Type = extern union { if (!child_type.hasRuntimeBits()) return AbiSizeAdvanced{ .scalar = 1 }; - switch (child_type.zigTypeTag()) { - .Pointer => { - const ptr_info = child_type.ptrInfo().data; - const has_null = switch (ptr_info.size) { - .Slice, .C => true, - else => ptr_info.@"allowzero", - }; - if (!has_null) { - const ptr_size_bytes = @divExact(target.cpu.arch.ptrBitWidth(), 8); - return AbiSizeAdvanced{ .scalar = ptr_size_bytes }; - } - }, - .ErrorSet => return abiSizeAdvanced(Type.anyerror, target, strat), - else => {}, + if (ty.optionalReprIsPayload()) { + return abiSizeAdvanced(child_type, target, strat); } const payload_size = switch (try child_type.abiSizeAdvanced(target, strat)) { @@ -3747,28 +3735,10 @@ pub const Type = extern union { .int_signed, .int_unsigned => return ty.cast(Payload.Bits).?.data, - .optional => { - var buf: Payload.ElemType = undefined; - const child_type = ty.optionalChild(&buf); - if (!child_type.hasRuntimeBits()) return 8; - - if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr() and !child_type.isSlice()) - return target.cpu.arch.ptrBitWidth(); - - // Optional types are represented as a struct with the child type as the first - // field and a boolean as the second. Since the child type's abi alignment is - // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal - // to the child type's ABI alignment. - const child_bit_size = try bitSizeAdvanced(child_type, target, sema_kit); - return child_bit_size + 1; - }, - - .error_union => { - const payload = ty.castTag(.error_union).?.data; - if (!payload.payload.hasRuntimeBits()) { - return payload.error_set.bitSizeAdvanced(target, sema_kit); - } - @panic("TODO bitSize error union"); + .optional, .error_union => { + // Optionals and error unions are not packed so their bitsize + // includes padding bits. + return (try abiSizeAdvanced(ty, target, if (sema_kit) |sk| .{ .sema_kit = sk } else .eager)).scalar * 8; }, .atomic_order, @@ -4045,8 +4015,8 @@ pub const Type = extern union { .Pointer => { const info = child_ty.ptrInfo().data; switch (info.size) { - .Slice, .C => return false, - .Many, .One => return !info.@"allowzero", + .C => return false, + .Slice, .Many, .One => return !info.@"allowzero", } }, .ErrorSet => return true, diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 9a02e74853..cb76f86820 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -1170,6 +1170,7 @@ test "implicitly cast from [N]T to ?[]const T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO try expect(mem.eql(u8, castToOptionalSlice().?, "hi")); comptime try expect(mem.eql(u8, castToOptionalSlice().?, "hi")); @@ -1256,6 +1257,7 @@ test "*const [N]null u8 to ?[]const u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -1394,6 +1396,8 @@ test "cast i8 fn call peers to i32 result" { test "cast compatible optional types" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO var a: ?[:0]const u8 = null; var b: ?[]const u8 = a; diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig index eb693147e6..9c9211f777 100644 --- a/test/behavior/optional.zig +++ b/test/behavior/optional.zig @@ -3,6 +3,7 @@ const std = @import("std"); const testing = std.testing; const expect = testing.expect; const expectEqual = testing.expectEqual; +const expectEqualStrings = std.testing.expectEqualStrings; test "passing an optional integer as a parameter" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; @@ -428,3 +429,18 @@ test "alignment of wrapping an optional payload" { }; try expect(S.foo().?.x == 1234); } + +test "Optional slice size is optimized" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + + try expect(@sizeOf(?[]u8) == @sizeOf([]u8)); + var a: ?[]const u8 = null; + try expect(a == null); + a = "hello"; + try expectEqualStrings(a.?, "hello"); +} -- cgit v1.2.3 From 78a7bb108ad9f7bf59061675bcae8947d65afc3a Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Mon, 24 Oct 2022 14:46:14 +0300 Subject: llvm: handle namespace like packed structs Closes #13159 Closes #13188 --- src/codegen/llvm.zig | 2 +- test/behavior.zig | 1 + test/behavior/bugs/13159.zig | 14 ++++++++++++++ 3 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 test/behavior/bugs/13159.zig (limited to 'src') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index d4a94d1308..18b89eef78 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1916,7 +1916,7 @@ pub const Object = struct { if (ty.castTag(.@"struct")) |payload| { const struct_obj = payload.data; - if (struct_obj.layout == .Packed) { + if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) { const info = struct_obj.backing_int_ty.intInfo(target); const dwarf_encoding: c_uint = switch (info.signedness) { .signed => DW.ATE.signed, diff --git a/test/behavior.zig b/test/behavior.zig index 2f5d752087..442b27c09f 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -109,6 +109,7 @@ test { _ = @import("behavior/bugs/13128.zig"); _ = @import("behavior/bugs/13164.zig"); _ = @import("behavior/bugs/13171.zig"); + _ = @import("behavior/bugs/13159.zig"); _ = @import("behavior/byteswap.zig"); _ = @import("behavior/byval_arg_var.zig"); _ = @import("behavior/call.zig"); diff --git a/test/behavior/bugs/13159.zig b/test/behavior/bugs/13159.zig new file mode 100644 index 0000000000..6119c498a9 --- /dev/null +++ b/test/behavior/bugs/13159.zig @@ -0,0 +1,14 @@ +const std = @import("std"); +const expect = std.testing.expect; + +const Bar = packed struct { + const Baz = enum { + fizz, + buzz, + }; +}; + +test { + var foo = Bar.Baz.fizz; + try expect(foo == .fizz); +} -- cgit v1.2.3 From d773b7e71f8d9fd3d69c1f90cec9a941fc9f9a12 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Mon, 24 Oct 2022 15:16:25 +0300 Subject: translate-c: cleanup unused parameters --- src/translate_c.zig | 182 ++++++++++++++++++++++------------------------------ 1 file changed, 75 insertions(+), 107 deletions(-) (limited to 'src') diff --git a/src/translate_c.zig b/src/translate_c.zig index d71e5f30e2..219635859c 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -224,8 +224,7 @@ const Scope = struct { } } - fn findBlockReturnType(inner: *Scope, c: *Context) clang.QualType { - _ = c; + fn findBlockReturnType(inner: *Scope) clang.QualType { var scope = inner; while (true) { switch (scope.id) { @@ -833,7 +832,7 @@ fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]co if (has_init) trans_init: { if (decl_init) |expr| { const node_or_error = if (expr.getStmtClass() == .StringLiteralClass) - transStringLiteralInitializer(c, scope, @ptrCast(*const clang.StringLiteral, expr), type_node) + transStringLiteralInitializer(c, @ptrCast(*const clang.StringLiteral, expr), type_node) else transExprCoercing(c, scope, expr, .used); init_node = node_or_error catch |err| switch (err) { @@ -1319,10 +1318,10 @@ fn transStmt( .StringLiteralClass => return transStringLiteral(c, scope, @ptrCast(*const clang.StringLiteral, stmt), result_used), .ParenExprClass => { const expr = try transExpr(c, scope, @ptrCast(*const clang.ParenExpr, stmt).getSubExpr(), .used); - return maybeSuppressResult(c, scope, result_used, expr); + return maybeSuppressResult(c, result_used, expr); }, .InitListExprClass => return transInitListExpr(c, scope, @ptrCast(*const clang.InitListExpr, stmt), result_used), - .ImplicitValueInitExprClass => return transImplicitValueInitExpr(c, scope, @ptrCast(*const clang.Expr, stmt), result_used), + .ImplicitValueInitExprClass => return transImplicitValueInitExpr(c, scope, @ptrCast(*const clang.Expr, stmt)), .IfStmtClass => return transIfStmt(c, scope, @ptrCast(*const clang.IfStmt, stmt)), .WhileStmtClass => return transWhileLoop(c, scope, @ptrCast(*const clang.WhileStmt, stmt)), .DoStmtClass => return transDoWhileLoop(c, scope, @ptrCast(*const clang.DoStmt, stmt)), @@ -1332,7 +1331,7 @@ fn transStmt( .ContinueStmtClass => return Tag.@"continue".init(), .BreakStmtClass => return Tag.@"break".init(), .ForStmtClass => return transForLoop(c, scope, @ptrCast(*const clang.ForStmt, stmt)), - .FloatingLiteralClass => return transFloatingLiteral(c, scope, @ptrCast(*const clang.FloatingLiteral, stmt), result_used), + .FloatingLiteralClass => return transFloatingLiteral(c, @ptrCast(*const clang.FloatingLiteral, stmt), result_used), .ConditionalOperatorClass => { return transConditionalOperator(c, scope, @ptrCast(*const clang.ConditionalOperator, stmt), result_used); }, @@ -1356,9 +1355,9 @@ fn transStmt( .OpaqueValueExprClass => { const source_expr = @ptrCast(*const clang.OpaqueValueExpr, stmt).getSourceExpr().?; const expr = try transExpr(c, scope, source_expr, .used); - return maybeSuppressResult(c, scope, result_used, expr); + return maybeSuppressResult(c, result_used, expr); }, - .OffsetOfExprClass => return transOffsetOfExpr(c, scope, @ptrCast(*const clang.OffsetOfExpr, stmt), result_used), + .OffsetOfExprClass => return transOffsetOfExpr(c, @ptrCast(*const clang.OffsetOfExpr, stmt), result_used), .CompoundLiteralExprClass => { const compound_literal = @ptrCast(*const clang.CompoundLiteralExpr, stmt); return transExpr(c, scope, compound_literal.getInitializer(), result_used); @@ -1369,13 +1368,13 @@ fn transStmt( }, .ConvertVectorExprClass => { const conv_vec = @ptrCast(*const clang.ConvertVectorExpr, stmt); - const conv_vec_node = try transConvertVectorExpr(c, scope, stmt.getBeginLoc(), conv_vec); - return maybeSuppressResult(c, scope, result_used, conv_vec_node); + const conv_vec_node = try transConvertVectorExpr(c, scope, conv_vec); + return maybeSuppressResult(c, result_used, conv_vec_node); }, .ShuffleVectorExprClass => { const shuffle_vec_expr = @ptrCast(*const clang.ShuffleVectorExpr, stmt); const shuffle_vec_node = try transShuffleVectorExpr(c, scope, shuffle_vec_expr); - return maybeSuppressResult(c, scope, result_used, shuffle_vec_node); + return maybeSuppressResult(c, result_used, shuffle_vec_node); }, .ChooseExprClass => { const choose_expr = @ptrCast(*const clang.ChooseExpr, stmt); @@ -1402,10 +1401,8 @@ fn transStmt( fn transConvertVectorExpr( c: *Context, scope: *Scope, - source_loc: clang.SourceLocation, expr: *const clang.ConvertVectorExpr, ) TransError!Node { - _ = source_loc; const base_stmt = @ptrCast(*const clang.Stmt, expr); var block_scope = try Scope.Block.init(c, scope, true); @@ -1521,12 +1518,7 @@ fn transShuffleVectorExpr( /// Translate a "simple" offsetof expression containing exactly one component, /// when that component is of kind .Field - e.g. offsetof(mytype, myfield) -fn transSimpleOffsetOfExpr( - c: *Context, - scope: *Scope, - expr: *const clang.OffsetOfExpr, -) TransError!Node { - _ = scope; +fn transSimpleOffsetOfExpr(c: *Context, expr: *const clang.OffsetOfExpr) TransError!Node { assert(expr.getNumComponents() == 1); const component = expr.getComponent(0); if (component.getKind() == .Field) { @@ -1551,13 +1543,12 @@ fn transSimpleOffsetOfExpr( fn transOffsetOfExpr( c: *Context, - scope: *Scope, expr: *const clang.OffsetOfExpr, result_used: ResultUsed, ) TransError!Node { if (expr.getNumComponents() == 1) { - const offsetof_expr = try transSimpleOffsetOfExpr(c, scope, expr); - return maybeSuppressResult(c, scope, result_used, offsetof_expr); + const offsetof_expr = try transSimpleOffsetOfExpr(c, expr); + return maybeSuppressResult(c, result_used, offsetof_expr); } // TODO implement OffsetOfExpr with more than 1 component @@ -1613,7 +1604,6 @@ fn transCreatePointerArithmeticSignedOp( return transCreateNodeInfixOp( c, - scope, if (is_add) .add else .sub, lhs_node, bitcast_node, @@ -1629,7 +1619,7 @@ fn transBinaryOperator( ) TransError!Node { const op = stmt.getOpcode(); const qt = stmt.getType(); - const isPointerDiffExpr = cIsPointerDiffExpr(c, stmt); + const isPointerDiffExpr = cIsPointerDiffExpr(stmt); switch (op) { .Assign => return try transCreateNodeAssign(c, scope, result_used, stmt.getLHS(), stmt.getRHS()), .Comma => { @@ -1646,7 +1636,7 @@ fn transBinaryOperator( }); try block_scope.statements.append(break_node); const block_node = try block_scope.complete(c); - return maybeSuppressResult(c, scope, result_used, block_node); + return maybeSuppressResult(c, result_used, block_node); }, .Div => { if (cIsSignedInteger(qt)) { @@ -1654,7 +1644,7 @@ fn transBinaryOperator( const lhs = try transExpr(c, scope, stmt.getLHS(), .used); const rhs = try transExpr(c, scope, stmt.getRHS(), .used); const div_trunc = try Tag.div_trunc.create(c.arena, .{ .lhs = lhs, .rhs = rhs }); - return maybeSuppressResult(c, scope, result_used, div_trunc); + return maybeSuppressResult(c, result_used, div_trunc); } }, .Rem => { @@ -1663,7 +1653,7 @@ fn transBinaryOperator( const lhs = try transExpr(c, scope, stmt.getLHS(), .used); const rhs = try transExpr(c, scope, stmt.getRHS(), .used); const rem = try Tag.signed_remainder.create(c.arena, .{ .lhs = lhs, .rhs = rhs }); - return maybeSuppressResult(c, scope, result_used, rem); + return maybeSuppressResult(c, result_used, rem); } }, .Shl => { @@ -1764,7 +1754,7 @@ fn transBinaryOperator( else rhs_uncasted; - const infixOpNode = try transCreateNodeInfixOp(c, scope, op_id, lhs, rhs, result_used); + const infixOpNode = try transCreateNodeInfixOp(c, op_id, lhs, rhs, result_used); if (isPointerDiffExpr) { // @divExact(@bitCast(, @ptrToInt(lhs) -% @ptrToInt(rhs)), @sizeOf()) const ptrdiff_type = try transQualTypeIntWidthOf(c, qt, true); @@ -1843,7 +1833,7 @@ fn transCStyleCastExprClass( src_type, sub_expr_node, )); - return maybeSuppressResult(c, scope, result_used, cast_node); + return maybeSuppressResult(c, result_used, cast_node); } /// The alignment of a variable or field @@ -1933,7 +1923,7 @@ fn transDeclStmtOne( var init_node = if (decl_init) |expr| if (expr.getStmtClass() == .StringLiteralClass) - try transStringLiteralInitializer(c, scope, @ptrCast(*const clang.StringLiteral, expr), type_node) + try transStringLiteralInitializer(c, @ptrCast(*const clang.StringLiteral, expr), type_node) else try transExprCoercing(c, scope, expr, .used) else if (is_static_local) @@ -2051,21 +2041,21 @@ fn transImplicitCastExpr( .BitCast, .FloatingCast, .FloatingToIntegral, .IntegralToFloating, .IntegralCast, .PointerToIntegral, .IntegralToPointer => { const sub_expr_node = try transExpr(c, scope, sub_expr, .used); const casted = try transCCast(c, scope, expr.getBeginLoc(), dest_type, src_type, sub_expr_node); - return maybeSuppressResult(c, scope, result_used, casted); + return maybeSuppressResult(c, result_used, casted); }, .LValueToRValue, .NoOp, .FunctionToPointerDecay => { const sub_expr_node = try transExpr(c, scope, sub_expr, .used); - return maybeSuppressResult(c, scope, result_used, sub_expr_node); + return maybeSuppressResult(c, result_used, sub_expr_node); }, .ArrayToPointerDecay => { const sub_expr_node = try transExpr(c, scope, sub_expr, .used); if (exprIsNarrowStringLiteral(sub_expr) or exprIsFlexibleArrayRef(c, sub_expr)) { - return maybeSuppressResult(c, scope, result_used, sub_expr_node); + return maybeSuppressResult(c, result_used, sub_expr_node); } const addr = try Tag.address_of.create(c.arena, sub_expr_node); const casted = try transCPtrCast(c, scope, expr.getBeginLoc(), dest_type, src_type, addr); - return maybeSuppressResult(c, scope, result_used, casted); + return maybeSuppressResult(c, result_used, casted); }, .NullToPointer => { return Tag.null_literal.init(); @@ -2076,18 +2066,18 @@ fn transImplicitCastExpr( const ptr_to_int = try Tag.ptr_to_int.create(c.arena, ptr_node); const ne = try Tag.not_equal.create(c.arena, .{ .lhs = ptr_to_int, .rhs = Tag.zero_literal.init() }); - return maybeSuppressResult(c, scope, result_used, ne); + return maybeSuppressResult(c, result_used, ne); }, .IntegralToBoolean, .FloatingToBoolean => { const sub_expr_node = try transExpr(c, scope, sub_expr, .used); // The expression is already a boolean one, return it as-is if (isBoolRes(sub_expr_node)) - return maybeSuppressResult(c, scope, result_used, sub_expr_node); + return maybeSuppressResult(c, result_used, sub_expr_node); // val != 0 const ne = try Tag.not_equal.create(c.arena, .{ .lhs = sub_expr_node, .rhs = Tag.zero_literal.init() }); - return maybeSuppressResult(c, scope, result_used, ne); + return maybeSuppressResult(c, result_used, ne); }, .BuiltinFnToFnPtr => { return transBuiltinFnExpr(c, scope, sub_expr, result_used); @@ -2140,13 +2130,13 @@ fn transBoolExpr( var res = try transExpr(c, scope, expr, used); if (isBoolRes(res)) { - return maybeSuppressResult(c, scope, used, res); + return maybeSuppressResult(c, used, res); } const ty = getExprQualType(c, expr).getTypePtr(); const node = try finishBoolExpr(c, scope, expr.getBeginLoc(), ty, res, used); - return maybeSuppressResult(c, scope, used, node); + return maybeSuppressResult(c, used, node); } fn exprIsBooleanType(expr: *const clang.Expr) bool { @@ -2299,7 +2289,7 @@ fn transIntegerLiteral( if (suppress_as == .no_as) { const int_lit_node = try transCreateNodeAPInt(c, eval_result.Val.getInt()); - return maybeSuppressResult(c, scope, result_used, int_lit_node); + return maybeSuppressResult(c, result_used, int_lit_node); } // Integer literals in C have types, and this can matter for several reasons. @@ -2317,7 +2307,7 @@ fn transIntegerLiteral( const ty_node = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc()); const rhs = try transCreateNodeAPInt(c, eval_result.Val.getInt()); const as = try Tag.as.create(c.arena, .{ .lhs = ty_node, .rhs = rhs }); - return maybeSuppressResult(c, scope, result_used, as); + return maybeSuppressResult(c, result_used, as); } fn transReturnStmt( @@ -2329,7 +2319,7 @@ fn transReturnStmt( return Tag.return_void.init(); var rhs = try transExprCoercing(c, scope, val_expr, .used); - const return_qt = scope.findBlockReturnType(c); + const return_qt = scope.findBlockReturnType(); if (isBoolRes(rhs) and !qualTypeIsBoolean(return_qt)) { rhs = try Tag.bool_to_int.create(c.arena, rhs); } @@ -2338,7 +2328,6 @@ fn transReturnStmt( fn transNarrowStringLiteral( c: *Context, - scope: *Scope, stmt: *const clang.StringLiteral, result_used: ResultUsed, ) TransError!Node { @@ -2347,7 +2336,7 @@ fn transNarrowStringLiteral( const str = try std.fmt.allocPrint(c.arena, "\"{}\"", .{std.zig.fmtEscapes(bytes_ptr[0..len])}); const node = try Tag.string_literal.create(c.arena, str); - return maybeSuppressResult(c, scope, result_used, node); + return maybeSuppressResult(c, result_used, node); } fn transStringLiteral( @@ -2358,18 +2347,18 @@ fn transStringLiteral( ) TransError!Node { const kind = stmt.getKind(); switch (kind) { - .Ascii, .UTF8 => return transNarrowStringLiteral(c, scope, stmt, result_used), + .Ascii, .UTF8 => return transNarrowStringLiteral(c, stmt, result_used), .UTF16, .UTF32, .Wide => { const str_type = @tagName(stmt.getKind()); const name = try std.fmt.allocPrint(c.arena, "zig.{s}_string_{d}", .{ str_type, c.getMangle() }); const expr_base = @ptrCast(*const clang.Expr, stmt); const array_type = try transQualTypeInitialized(c, scope, expr_base.getType(), expr_base, expr_base.getBeginLoc()); - const lit_array = try transStringLiteralInitializer(c, scope, stmt, array_type); + const lit_array = try transStringLiteralInitializer(c, stmt, array_type); const decl = try Tag.var_simple.create(c.arena, .{ .name = name, .init = lit_array }); try scope.appendNode(decl); const node = try Tag.identifier.create(c.arena, name); - return maybeSuppressResult(c, scope, result_used, node); + return maybeSuppressResult(c, result_used, node); }, } } @@ -2384,7 +2373,6 @@ fn getArrayPayload(array_type: Node) ast.Payload.Array.ArrayTypeInfo { /// the appropriate length, if necessary. fn transStringLiteralInitializer( c: *Context, - scope: *Scope, stmt: *const clang.StringLiteral, array_type: Node, ) TransError!Node { @@ -2403,7 +2391,7 @@ fn transStringLiteralInitializer( const init_node = if (num_inits > 0) blk: { if (is_narrow) { // "string literal".* or string literal"[0..num_inits].* - var str = try transNarrowStringLiteral(c, scope, stmt, .used); + var str = try transNarrowStringLiteral(c, stmt, .used); if (str_length != array_size) str = try Tag.string_slice.create(c.arena, .{ .string = str, .end = num_inits }); break :blk try Tag.deref.create(c.arena, str); } else { @@ -2440,8 +2428,7 @@ fn transStringLiteralInitializer( /// determine whether `stmt` is a "pointer subtraction expression" - a subtraction where /// both operands resolve to addresses. The C standard requires that both operands /// point to elements of the same array object, but we do not verify that here. -fn cIsPointerDiffExpr(c: *Context, stmt: *const clang.BinaryOperator) bool { - _ = c; +fn cIsPointerDiffExpr(stmt: *const clang.BinaryOperator) bool { const lhs = @ptrCast(*const clang.Stmt, stmt.getLHS()); const rhs = @ptrCast(*const clang.Stmt, stmt.getRHS()); return stmt.getOpcode() == .Sub and @@ -2748,9 +2735,7 @@ fn transInitListExprVector( scope: *Scope, loc: clang.SourceLocation, expr: *const clang.InitListExpr, - ty: *const clang.Type, ) TransError!Node { - _ = ty; const qt = getExprQualType(c, @ptrCast(*const clang.Expr, expr)); const vector_ty = @ptrCast(*const clang.VectorType, qualTypeCanon(qt)); @@ -2829,7 +2814,7 @@ fn transInitListExpr( } if (qual_type.isRecordType()) { - return maybeSuppressResult(c, scope, used, try transInitListExprRecord( + return maybeSuppressResult(c, used, try transInitListExprRecord( c, scope, source_loc, @@ -2837,7 +2822,7 @@ fn transInitListExpr( qual_type, )); } else if (qual_type.isArrayType()) { - return maybeSuppressResult(c, scope, used, try transInitListExprArray( + return maybeSuppressResult(c, used, try transInitListExprArray( c, scope, source_loc, @@ -2845,13 +2830,7 @@ fn transInitListExpr( qual_type, )); } else if (qual_type.isVectorType()) { - return maybeSuppressResult(c, scope, used, try transInitListExprVector( - c, - scope, - source_loc, - expr, - qual_type, - )); + return maybeSuppressResult(c, used, try transInitListExprVector(c, scope, source_loc, expr)); } else { const type_name = try c.str(qual_type.getTypeClassName()); return fail(c, error.UnsupportedType, source_loc, "unsupported initlist type: '{s}'", .{type_name}); @@ -2912,9 +2891,7 @@ fn transImplicitValueInitExpr( c: *Context, scope: *Scope, expr: *const clang.Expr, - used: ResultUsed, ) TransError!Node { - _ = used; const source_loc = expr.getBeginLoc(); const qt = getExprQualType(c, expr); const ty = qt.getTypePtr(); @@ -3354,7 +3331,7 @@ fn transConstantExpr(c: *Context, scope: *Scope, expr: *const clang.Expr, used: .lhs = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc()), .rhs = try transCreateNodeAPInt(c, result.Val.getInt()), }); - return maybeSuppressResult(c, scope, used, as_node); + return maybeSuppressResult(c, used, as_node); }, else => |kind| { return fail(c, error.UnsupportedTranslation, expr.getBeginLoc(), "unsupported constant expression kind '{}'", .{kind}); @@ -3391,7 +3368,7 @@ fn transCharLiteral( try transCreateCharLitNode(c, narrow, val); if (suppress_as == .no_as) { - return maybeSuppressResult(c, scope, result_used, int_lit_node); + return maybeSuppressResult(c, result_used, int_lit_node); } // See comment in `transIntegerLiteral` for why this code is here. // @as(T, x) @@ -3400,7 +3377,7 @@ fn transCharLiteral( .lhs = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc()), .rhs = int_lit_node, }); - return maybeSuppressResult(c, scope, result_used, as_node); + return maybeSuppressResult(c, result_used, as_node); } fn transStmtExpr(c: *Context, scope: *Scope, stmt: *const clang.StmtExpr, used: ResultUsed) TransError!Node { @@ -3426,7 +3403,7 @@ fn transStmtExpr(c: *Context, scope: *Scope, stmt: *const clang.StmtExpr, used: }); try block_scope.statements.append(break_node); const res = try block_scope.complete(c); - return maybeSuppressResult(c, scope, used, res); + return maybeSuppressResult(c, used, res); } fn transMemberExpr(c: *Context, scope: *Scope, stmt: *const clang.MemberExpr, result_used: ResultUsed) TransError!Node { @@ -3455,7 +3432,7 @@ fn transMemberExpr(c: *Context, scope: *Scope, stmt: *const clang.MemberExpr, re if (exprIsFlexibleArrayRef(c, @ptrCast(*const clang.Expr, stmt))) { node = try Tag.call.create(c.arena, .{ .lhs = node, .args = &.{} }); } - return maybeSuppressResult(c, scope, result_used, node); + return maybeSuppressResult(c, result_used, node); } /// ptr[subscr] (`subscr` is a signed integer expression, `ptr` a pointer) becomes: @@ -3533,7 +3510,7 @@ fn transSignedArrayAccess( const derefed = try Tag.deref.create(c.arena, block_node); - return maybeSuppressResult(c, &block_scope.base, result_used, derefed); + return maybeSuppressResult(c, result_used, derefed); } fn transArrayAccess(c: *Context, scope: *Scope, stmt: *const clang.ArraySubscriptExpr, result_used: ResultUsed) TransError!Node { @@ -3574,7 +3551,7 @@ fn transArrayAccess(c: *Context, scope: *Scope, stmt: *const clang.ArraySubscrip .lhs = container_node, .rhs = rhs, }); - return maybeSuppressResult(c, scope, result_used, node); + return maybeSuppressResult(c, result_used, node); } /// Check if an expression is ultimately a reference to a function declaration @@ -3665,7 +3642,7 @@ fn transCallExpr(c: *Context, scope: *Scope, stmt: *const clang.CallExpr, result } } - return maybeSuppressResult(c, scope, result_used, node); + return maybeSuppressResult(c, result_used, node); } const ClangFunctionType = union(enum) { @@ -3705,14 +3682,13 @@ fn transUnaryExprOrTypeTraitExpr( stmt: *const clang.UnaryExprOrTypeTraitExpr, result_used: ResultUsed, ) TransError!Node { - _ = result_used; const loc = stmt.getBeginLoc(); const type_node = try transQualType(c, scope, stmt.getTypeOfArgument(), loc); const kind = stmt.getKind(); - switch (kind) { - .SizeOf => return Tag.sizeof.create(c.arena, type_node), - .AlignOf => return Tag.alignof.create(c.arena, type_node), + const node = switch (kind) { + .SizeOf => try Tag.sizeof.create(c.arena, type_node), + .AlignOf => try Tag.alignof.create(c.arena, type_node), .PreferredAlignOf, .VecStep, .OpenMPRequiredSimdAlign, @@ -3723,7 +3699,8 @@ fn transUnaryExprOrTypeTraitExpr( "unsupported type trait kind {}", .{kind}, ), - } + }; + return maybeSuppressResult(c, result_used, node); } fn qualTypeHasWrappingOverflow(qt: clang.QualType) bool { @@ -3812,7 +3789,7 @@ fn transCreatePreCrement( // zig: expr += 1 const lhs = try transExpr(c, scope, op_expr, .used); const rhs = Tag.one_literal.init(); - return transCreateNodeInfixOp(c, scope, op, lhs, rhs, .used); + return transCreateNodeInfixOp(c, op, lhs, rhs, .used); } // worst case // c: ++expr @@ -3832,7 +3809,7 @@ fn transCreatePreCrement( const lhs_node = try Tag.identifier.create(c.arena, ref); const ref_node = try Tag.deref.create(c.arena, lhs_node); - const node = try transCreateNodeInfixOp(c, &block_scope.base, op, ref_node, Tag.one_literal.init(), .used); + const node = try transCreateNodeInfixOp(c, op, ref_node, Tag.one_literal.init(), .used); try block_scope.statements.append(node); const break_node = try Tag.break_val.create(c.arena, .{ @@ -3858,7 +3835,7 @@ fn transCreatePostCrement( // zig: expr += 1 const lhs = try transExpr(c, scope, op_expr, .used); const rhs = Tag.one_literal.init(); - return transCreateNodeInfixOp(c, scope, op, lhs, rhs, .used); + return transCreateNodeInfixOp(c, op, lhs, rhs, .used); } // worst case // c: expr++ @@ -3884,7 +3861,7 @@ fn transCreatePostCrement( const tmp_decl = try Tag.var_simple.create(c.arena, .{ .name = tmp, .init = ref_node }); try block_scope.statements.append(tmp_decl); - const node = try transCreateNodeInfixOp(c, &block_scope.base, op, ref_node, Tag.one_literal.init(), .used); + const node = try transCreateNodeInfixOp(c, op, ref_node, Tag.one_literal.init(), .used); try block_scope.statements.append(node); const break_node = try Tag.break_val.create(c.arena, .{ @@ -3965,7 +3942,7 @@ fn transCreateCompoundAssign( else try Tag.div_trunc.create(c.arena, operands); - return transCreateNodeInfixOp(c, scope, .assign, lhs_node, builtin, .used); + return transCreateNodeInfixOp(c, .assign, lhs_node, builtin, .used); } if (is_shift) { @@ -3974,7 +3951,7 @@ fn transCreateCompoundAssign( } else if (requires_int_cast) { rhs_node = try transCCast(c, scope, loc, lhs_qt, rhs_qt, rhs_node); } - return transCreateNodeInfixOp(c, scope, op, lhs_node, rhs_node, .used); + return transCreateNodeInfixOp(c, op, lhs_node, rhs_node, .used); } // worst case // c: lhs += rhs @@ -4005,7 +3982,7 @@ fn transCreateCompoundAssign( else try Tag.div_trunc.create(c.arena, operands); - const assign = try transCreateNodeInfixOp(c, &block_scope.base, .assign, ref_node, builtin, .used); + const assign = try transCreateNodeInfixOp(c, .assign, ref_node, builtin, .used); try block_scope.statements.append(assign); } else { if (is_shift) { @@ -4015,7 +3992,7 @@ fn transCreateCompoundAssign( rhs_node = try transCCast(c, &block_scope.base, loc, lhs_qt, rhs_qt, rhs_node); } - const assign = try transCreateNodeInfixOp(c, &block_scope.base, op, ref_node, rhs_node, .used); + const assign = try transCreateNodeInfixOp(c, op, ref_node, rhs_node, .used); try block_scope.statements.append(assign); } @@ -4071,7 +4048,7 @@ fn transCPtrCast( } } -fn transFloatingLiteral(c: *Context, scope: *Scope, expr: *const clang.FloatingLiteral, used: ResultUsed) TransError!Node { +fn transFloatingLiteral(c: *Context, expr: *const clang.FloatingLiteral, used: ResultUsed) TransError!Node { switch (expr.getRawSemantics()) { .IEEEhalf, // f16 .IEEEsingle, // f32 @@ -4095,7 +4072,7 @@ fn transFloatingLiteral(c: *Context, scope: *Scope, expr: *const clang.FloatingL try std.fmt.allocPrint(c.arena, "{d}", .{dbl}); var node = try Tag.float_literal.create(c.arena, str); if (is_negative) node = try Tag.negate.create(c.arena, node); - return maybeSuppressResult(c, scope, used, node); + return maybeSuppressResult(c, used, node); } fn transBinaryConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang.BinaryConditionalOperator, used: ResultUsed) TransError!Node { @@ -4151,7 +4128,7 @@ fn transBinaryConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang }); try block_scope.statements.append(break_node); const res = try block_scope.complete(c); - return maybeSuppressResult(c, scope, used, res); + return maybeSuppressResult(c, used, res); } fn transConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang.ConditionalOperator, used: ResultUsed) TransError!Node { @@ -4191,13 +4168,7 @@ fn transConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang.Condi return if_node; } -fn maybeSuppressResult( - c: *Context, - scope: *Scope, - used: ResultUsed, - result: Node, -) TransError!Node { - _ = scope; +fn maybeSuppressResult(c: *Context, used: ResultUsed, result: Node) TransError!Node { if (used == .used) return result; return Tag.discard.create(c.arena, .{ .should_skip = false, .value = result }); } @@ -4551,7 +4522,7 @@ fn transCreateNodeAssign( if (!exprIsBooleanType(lhs) and isBoolRes(rhs_node)) { rhs_node = try Tag.bool_to_int.create(c.arena, rhs_node); } - return transCreateNodeInfixOp(c, scope, .assign, lhs_node, rhs_node, .used); + return transCreateNodeInfixOp(c, .assign, lhs_node, rhs_node, .used); } // worst case @@ -4571,7 +4542,7 @@ fn transCreateNodeAssign( const lhs_node = try transExpr(c, &block_scope.base, lhs, .used); const tmp_ident = try Tag.identifier.create(c.arena, tmp); - const assign = try transCreateNodeInfixOp(c, &block_scope.base, .assign, lhs_node, tmp_ident, .used); + const assign = try transCreateNodeInfixOp(c, .assign, lhs_node, tmp_ident, .used); try block_scope.statements.append(assign); const break_node = try Tag.break_val.create(c.arena, .{ @@ -4584,7 +4555,6 @@ fn transCreateNodeAssign( fn transCreateNodeInfixOp( c: *Context, - scope: *Scope, op: Tag, lhs: Node, rhs: Node, @@ -4598,7 +4568,7 @@ fn transCreateNodeInfixOp( .rhs = rhs, }, }; - return maybeSuppressResult(c, scope, used, Node.initPayload(&payload.base)); + return maybeSuppressResult(c, used, Node.initPayload(&payload.base)); } fn transCreateNodeBoolInfixOp( @@ -4613,7 +4583,7 @@ fn transCreateNodeBoolInfixOp( const lhs = try transBoolExpr(c, scope, stmt.getLHS(), .used); const rhs = try transBoolExpr(c, scope, stmt.getRHS(), .used); - return transCreateNodeInfixOp(c, scope, op, lhs, rhs, used); + return transCreateNodeInfixOp(c, op, lhs, rhs, used); } fn transCreateNodeAPInt(c: *Context, int: *const clang.APSInt) !Node { @@ -4730,7 +4700,7 @@ fn transCreateNodeShiftOp( const rhs = try transExprCoercing(c, scope, rhs_expr, .used); const rhs_casted = try Tag.int_cast.create(c.arena, .{ .lhs = rhs_type, .rhs = rhs }); - return transCreateNodeInfixOp(c, scope, op, lhs, rhs_casted, used); + return transCreateNodeInfixOp(c, op, lhs, rhs_casted, used); } fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clang.SourceLocation) TypeError!Node { @@ -6298,7 +6268,7 @@ fn parseCCastExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node { // allow_fail is set when unsure if we are parsing a type-name fn parseCTypeName(c: *Context, m: *MacroCtx, scope: *Scope, allow_fail: bool) ParseError!?Node { if (try parseCSpecifierQualifierList(c, m, scope, allow_fail)) |node| { - return try parseCAbstractDeclarator(c, m, scope, node); + return try parseCAbstractDeclarator(c, m, node); } else { return null; } @@ -6327,7 +6297,7 @@ fn parseCSpecifierQualifierList(c: *Context, m: *MacroCtx, scope: *Scope, allow_ .Keyword_complex, => { m.i -= 1; - return try parseCNumericType(c, m, scope); + return try parseCNumericType(c, m); }, .Keyword_enum, .Keyword_struct, .Keyword_union => { // struct Foo will be declared as struct_Foo by transRecordDecl @@ -6349,8 +6319,7 @@ fn parseCSpecifierQualifierList(c: *Context, m: *MacroCtx, scope: *Scope, allow_ } } -fn parseCNumericType(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node { - _ = scope; +fn parseCNumericType(c: *Context, m: *MacroCtx) ParseError!Node { const KwCounter = struct { double: u8 = 0, long: u8 = 0, @@ -6451,8 +6420,7 @@ fn parseCNumericType(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node { return error.ParseError; } -fn parseCAbstractDeclarator(c: *Context, m: *MacroCtx, scope: *Scope, node: Node) ParseError!Node { - _ = scope; +fn parseCAbstractDeclarator(c: *Context, m: *MacroCtx, node: Node) ParseError!Node { switch (m.next().?) { .Asterisk => { // last token of `node` -- cgit v1.2.3 From 4fc944dde813638410850515b0d1b156e5b6e920 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Mon, 24 Oct 2022 15:17:01 +0300 Subject: translate-c: fix redefinition of label on left recursive comma operator Closes #13239 --- src/translate_c.zig | 7 ++++--- test/behavior/translate_c_macros.h | 1 + test/behavior/translate_c_macros.zig | 1 + test/translate_c.zig | 8 ++++---- 4 files changed, 10 insertions(+), 7 deletions(-) (limited to 'src') diff --git a/src/translate_c.zig b/src/translate_c.zig index 219635859c..7cc843e17c 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -5651,13 +5651,14 @@ const ParseError = Error || error{ParseError}; fn parseCExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node { // TODO parseCAssignExpr here - const node = try parseCCondExpr(c, m, scope); + var block_scope = try Scope.Block.init(c, scope, true); + defer block_scope.deinit(); + + const node = try parseCCondExpr(c, m, &block_scope.base); if (m.next().? != .Comma) { m.i -= 1; return node; } - var block_scope = try Scope.Block.init(c, scope, true); - defer block_scope.deinit(); var last = node; while (true) { diff --git a/test/behavior/translate_c_macros.h b/test/behavior/translate_c_macros.h index 439577fecc..5d4cf3473d 100644 --- a/test/behavior/translate_c_macros.h +++ b/test/behavior/translate_c_macros.h @@ -40,6 +40,7 @@ union U { #define CAST_OR_CALL_WITH_PARENS(type_or_fn, val) ((type_or_fn)(val)) #define NESTED_COMMA_OPERATOR (1, (2, 3)) +#define NESTED_COMMA_OPERATOR_LHS (1, 2), 3 #include #if !defined(__UINTPTR_MAX__) diff --git a/test/behavior/translate_c_macros.zig b/test/behavior/translate_c_macros.zig index 04d217f488..deda45df91 100644 --- a/test/behavior/translate_c_macros.zig +++ b/test/behavior/translate_c_macros.zig @@ -100,6 +100,7 @@ test "nested comma operator" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try expectEqual(@as(c_int, 3), h.NESTED_COMMA_OPERATOR); + try expectEqual(@as(c_int, 3), h.NESTED_COMMA_OPERATOR_LHS); } test "cast functions" { diff --git a/test/translate_c.zig b/test/translate_c.zig index d6b6bcbbba..81d1308c95 100644 --- a/test/translate_c.zig +++ b/test/translate_c.zig @@ -499,20 +499,20 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\int baz(int x, int y) { return 0; } \\#define bar(x) (&x, +3, 4 == 4, 5 * 6, baz(1, 2), 2 % 2, baz(1,2)) , &[_][]const u8{ - \\pub const foo = blk: { + \\pub const foo = blk_1: { \\ _ = @TypeOf(foo); - \\ break :blk bar; + \\ break :blk_1 bar; \\}; , \\pub inline fn bar(x: anytype) @TypeOf(baz(@as(c_int, 1), @as(c_int, 2))) { - \\ return blk: { + \\ return blk_1: { \\ _ = &x; \\ _ = @as(c_int, 3); \\ _ = @as(c_int, 4) == @as(c_int, 4); \\ _ = @as(c_int, 5) * @as(c_int, 6); \\ _ = baz(@as(c_int, 1), @as(c_int, 2)); \\ _ = @as(c_int, 2) % @as(c_int, 2); - \\ break :blk baz(@as(c_int, 1), @as(c_int, 2)); + \\ break :blk_1 baz(@as(c_int, 1), @as(c_int, 2)); \\ }; \\} }); -- cgit v1.2.3 From 4ac8ec4c5c80f6eca0ac7d7955c5486ef55ce042 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Mon, 24 Oct 2022 17:30:47 +0300 Subject: AstGen: fix `ref`ing inferred allocs Closes #13285 --- src/AstGen.zig | 9 ++++++++- test/behavior.zig | 3 ++- test/behavior/bugs/13285.zig | 11 +++++++++++ 3 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 test/behavior/bugs/13285.zig (limited to 'src') diff --git a/src/AstGen.zig b/src/AstGen.zig index 07a972eaab..48e6a480f3 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -9709,7 +9709,7 @@ fn rvalue( const result_index = refToIndex(result) orelse return gz.addUnTok(.ref, result, src_token); const zir_tags = gz.astgen.instructions.items(.tag); - if (zir_tags[result_index].isParam()) + if (zir_tags[result_index].isParam() or astgen.isInferred(result)) return gz.addUnTok(.ref, result, src_token); const gop = try astgen.ref_table.getOrPut(astgen.gpa, result_index); if (!gop.found_existing) { @@ -12196,6 +12196,13 @@ fn isInferred(astgen: *AstGen, ref: Zir.Inst.Ref) bool { .alloc_inferred_comptime_mut, => true, + .extended => { + const zir_data = astgen.instructions.items(.data); + if (zir_data[inst].extended.opcode != .alloc) return false; + const small = @bitCast(Zir.Inst.AllocExtended.Small, zir_data[inst].extended.small); + return !small.has_type; + }, + else => false, }; } diff --git a/test/behavior.zig b/test/behavior.zig index 442b27c09f..aa59fb32b0 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -108,8 +108,9 @@ test { _ = @import("behavior/bugs/13112.zig"); _ = @import("behavior/bugs/13128.zig"); _ = @import("behavior/bugs/13164.zig"); - _ = @import("behavior/bugs/13171.zig"); _ = @import("behavior/bugs/13159.zig"); + _ = @import("behavior/bugs/13171.zig"); + _ = @import("behavior/bugs/13285.zig"); _ = @import("behavior/byteswap.zig"); _ = @import("behavior/byval_arg_var.zig"); _ = @import("behavior/call.zig"); diff --git a/test/behavior/bugs/13285.zig b/test/behavior/bugs/13285.zig new file mode 100644 index 0000000000..ad37f9876a --- /dev/null +++ b/test/behavior/bugs/13285.zig @@ -0,0 +1,11 @@ +const Crasher = struct { + lets_crash: u64 = 0, +}; + +test { + var a: Crasher = undefined; + var crasher_ptr = &a; + var crasher_local = crasher_ptr.*; + const crasher_local_ptr = &crasher_local; + crasher_local_ptr.lets_crash = 1; +} -- cgit v1.2.3 From 9dcfc829e650bc9c0a89e9f7778744c774120c09 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Mon, 24 Oct 2022 17:47:32 +0300 Subject: Sema: fix some edge cases with error return traces and typeof blocks Closes #13293 --- src/Sema.zig | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/Sema.zig b/src/Sema.zig index 4c2f72034e..9c52fd91a3 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5031,6 +5031,7 @@ fn zirBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErro .label = &label, .inlining = parent_block.inlining, .is_comptime = parent_block.is_comptime, + .is_typeof = parent_block.is_typeof, .want_safety = parent_block.want_safety, .float_mode = parent_block.float_mode, .runtime_cond = parent_block.runtime_cond, @@ -5923,7 +5924,7 @@ fn zirCall( const backend_supports_error_return_tracing = sema.mod.comp.bin_file.options.use_llvm; if (backend_supports_error_return_tracing and sema.mod.comp.bin_file.options.error_return_tracing and - !block.is_comptime and (input_is_error or pop_error_return_trace)) + !block.is_comptime and !block.is_typeof and (input_is_error or pop_error_return_trace)) { const call_inst: Air.Inst.Ref = if (modifier == .always_tail) undefined else b: { break :b try sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args, bound_arg_src); @@ -6403,7 +6404,7 @@ fn analyzeCall( } const new_func_resolved_ty = try Type.Tag.function.create(sema.arena, new_fn_info); - if (!is_comptime_call) { + if (!is_comptime_call and !block.is_typeof) { try sema.emitDbgInline(block, parent_func.?, module_fn, new_func_resolved_ty, .dbg_inline_begin); const zir_tags = sema.code.instructions.items(.tag); @@ -6441,7 +6442,7 @@ fn analyzeCall( break :result try sema.analyzeBlockBody(block, call_src, &child_block, merges); }; - if (!is_comptime_call and sema.typeOf(result).zigTypeTag() != .NoReturn) { + if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag() != .NoReturn) { try sema.emitDbgInline( block, module_fn, @@ -10210,6 +10211,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .label = &label, .inlining = block.inlining, .is_comptime = block.is_comptime, + .is_typeof = block.is_typeof, .switch_else_err_ty = else_error_ty, .runtime_cond = block.runtime_cond, .runtime_loop = block.runtime_loop, @@ -16401,7 +16403,7 @@ fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE if (!ok) return; // This is only relevant at runtime. - if (block.is_comptime) return; + if (block.is_comptime or block.is_typeof) return; // This is only relevant within functions. if (sema.func == null) return; @@ -16421,7 +16423,7 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) const src = sema.src; // TODO // This is only relevant at runtime. - if (start_block.is_comptime) return; + if (start_block.is_comptime or start_block.is_typeof) return; const backend_supports_error_return_tracing = sema.mod.comp.bin_file.options.use_llvm; const ok = sema.owner_func.?.calls_or_awaits_errorable_fn and -- cgit v1.2.3 From d9fe5ba7f805c82f14c162945ac851ebb570ec89 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Mon, 24 Oct 2022 22:19:24 +0300 Subject: Sema: add error for too big packed struct --- src/Sema.zig | 27 ++++++++++++++++++++++ .../cases/compile_errors/too_big_packed_struct.zig | 13 +++++++++++ 2 files changed, 40 insertions(+) create mode 100644 test/cases/compile_errors/too_big_packed_struct.zig (limited to 'src') diff --git a/src/Sema.zig b/src/Sema.zig index 9c52fd91a3..d1a558d15b 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -29075,6 +29075,33 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi struct_obj.backing_int_ty = try backing_int_ty.copy(decl_arena_allocator); try wip_captures.finalize(); } else { + if (fields_bit_sum > std.math.maxInt(u16)) { + var sema: Sema = .{ + .mod = mod, + .gpa = gpa, + .arena = undefined, + .perm_arena = decl_arena_allocator, + .code = zir, + .owner_decl = decl, + .owner_decl_index = decl_index, + .func = null, + .fn_ret_ty = Type.void, + .owner_func = null, + }; + defer sema.deinit(); + + var block: Block = .{ + .parent = null, + .sema = &sema, + .src_decl = decl_index, + .namespace = &struct_obj.namespace, + .wip_capture_scope = undefined, + .instructions = .{}, + .inlining = null, + .is_comptime = true, + }; + return sema.fail(&block, LazySrcLoc.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum}); + } var buf: Type.Payload.Bits = .{ .base = .{ .tag = .int_unsigned }, .data = @intCast(u16, fields_bit_sum), diff --git a/test/cases/compile_errors/too_big_packed_struct.zig b/test/cases/compile_errors/too_big_packed_struct.zig new file mode 100644 index 0000000000..bedc4a72a6 --- /dev/null +++ b/test/cases/compile_errors/too_big_packed_struct.zig @@ -0,0 +1,13 @@ +pub export fn entry() void { + const T = packed struct { + a: u65535, + b: u65535, + }; + @compileLog(@sizeOf(T)); +} + +// error +// backend=stage2 +// target=native +// +// :2:22: error: size of packed struct '131070' exceeds maximum bit width of 65535 -- cgit v1.2.3 From 595ccecd88d82874e7f677ee1809c04fc29424ff Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Tue, 25 Oct 2022 16:10:09 +0300 Subject: llvm: do not return undefined pointers from array_to_slice --- src/codegen/llvm.zig | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 18b89eef78..2eb95dc5e3 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -5434,10 +5434,11 @@ pub const FuncGen = struct { const llvm_usize = try self.dg.lowerType(Type.usize); const len = llvm_usize.constInt(array_ty.arrayLen(), .False); const slice_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); + const operand = try self.resolveInst(ty_op.operand); if (!array_ty.hasRuntimeBitsIgnoreComptime()) { - return self.builder.buildInsertValue(slice_llvm_ty.getUndef(), len, 1, ""); + const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), operand, 0, ""); + return self.builder.buildInsertValue(partial, len, 1, ""); } - const operand = try self.resolveInst(ty_op.operand); const indices: [2]*llvm.Value = .{ llvm_usize.constNull(), llvm_usize.constNull(), }; -- cgit v1.2.3 From f3a3fb3d880528e8b6404648c605ed092ef1412c Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Thu, 27 Oct 2022 00:23:43 +0300 Subject: llvm: pass optional slices like regular slices --- src/codegen/llvm.zig | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 2eb95dc5e3..2660b19bf3 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1027,7 +1027,9 @@ pub const Object = struct { dg.addArgAttr(llvm_func, llvm_arg_i, "noalias"); } } - dg.addArgAttr(llvm_func, llvm_arg_i, "nonnull"); + if (param_ty.zigTypeTag() != .Optional) { + dg.addArgAttr(llvm_func, llvm_arg_i, "nonnull"); + } if (!ptr_info.mutable) { dg.addArgAttr(llvm_func, llvm_arg_i, "readonly"); } @@ -3117,7 +3119,11 @@ pub const DeclGen = struct { .slice => { const param_ty = fn_info.param_types[it.zig_index - 1]; var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = param_ty.slicePtrFieldType(&buf); + var opt_buf: Type.Payload.ElemType = undefined; + const ptr_ty = if (param_ty.zigTypeTag() == .Optional) + param_ty.optionalChild(&opt_buf).slicePtrFieldType(&buf) + else + param_ty.slicePtrFieldType(&buf); const ptr_llvm_ty = try dg.lowerType(ptr_ty); const len_llvm_ty = try dg.lowerType(Type.usize); @@ -10358,7 +10364,8 @@ const ParamTypeIterator = struct { .Unspecified, .Inline => { it.zig_index += 1; it.llvm_index += 1; - if (ty.isSlice()) { + var buf: Type.Payload.ElemType = undefined; + if (ty.isSlice() or (ty.zigTypeTag() == .Optional and ty.optionalChild(&buf).isSlice())) { return .slice; } else if (isByRef(ty)) { return .byref; -- cgit v1.2.3 From b937a045607deae158ccb6a00f5defaf36510e61 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Thu, 27 Oct 2022 00:48:56 +0300 Subject: Sema: check `coerceInMemoryAllowed` earlier in `resolvePeerTypes` Closes #13310 --- src/Sema.zig | 21 +++++++++------------ test/behavior/cast.zig | 7 +++++++ 2 files changed, 16 insertions(+), 12 deletions(-) (limited to 'src') diff --git a/src/Sema.zig b/src/Sema.zig index d1a558d15b..b41dd21b81 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -28340,8 +28340,16 @@ fn resolvePeerTypes( const candidate_ty_tag = try candidate_ty.zigTypeTagOrPoison(); const chosen_ty_tag = try chosen_ty.zigTypeTagOrPoison(); - if (candidate_ty.eql(chosen_ty, sema.mod)) + // If the candidate can coerce into our chosen type, we're done. + // If the chosen type can coerce into the candidate, use that. + if ((try sema.coerceInMemoryAllowed(block, chosen_ty, candidate_ty, false, target, src, src)) == .ok) { + continue; + } + if ((try sema.coerceInMemoryAllowed(block, candidate_ty, chosen_ty, false, target, src, src)) == .ok) { + chosen = candidate; + chosen_i = candidate_i + 1; continue; + } switch (candidate_ty_tag) { .NoReturn, .Undefined => continue, @@ -28741,17 +28749,6 @@ fn resolvePeerTypes( else => {}, } - // If the candidate can coerce into our chosen type, we're done. - // If the chosen type can coerce into the candidate, use that. - if ((try sema.coerceInMemoryAllowed(block, chosen_ty, candidate_ty, false, target, src, src)) == .ok) { - continue; - } - if ((try sema.coerceInMemoryAllowed(block, candidate_ty, chosen_ty, false, target, src, src)) == .ok) { - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - } - // At this point, we hit a compile error. We need to recover // the source locations. const chosen_src = candidate_srcs.resolve( diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index cb76f86820..8473abc3ef 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -1444,3 +1444,10 @@ test "coerce between pointers of compatible differently-named floats" { f2.* += 1; try expect(f1 == @as(F, 12.34) + 1); } + +test "peer type resolution of const and non-const pointer to array" { + const a = @intToPtr(*[1024]u8, 42); + const b = @intToPtr(*const [1024]u8, 42); + try std.testing.expect(@TypeOf(a, b) == *const [1024]u8); + try std.testing.expect(a == b); +} -- cgit v1.2.3 From 710e2e7f1048cb7d5a7c8d2bdd954f108dd94e7a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 26 Oct 2022 12:56:29 -0700 Subject: libcxx: only pass -DHAVE___CXA_THREAD_ATEXIT_IMPL for glibc This definition communicates to libcxxabi that the libc will provide the `__cxa_thread_atexit_impl` symbol. This is true for glibc but not true for other libcs, such as musl. --- src/libcxx.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/libcxx.zig b/src/libcxx.zig index b0261aaed6..850da698c5 100644 --- a/src/libcxx.zig +++ b/src/libcxx.zig @@ -320,7 +320,7 @@ pub fn buildLibCXXABI(comp: *Compilation) !void { } try cflags.append("-D_LIBCXXABI_HAS_NO_THREADS"); try cflags.append("-D_LIBCPP_HAS_NO_THREADS"); - } else { + } else if (target.abi.isGnu()) { try cflags.append("-DHAVE___CXA_THREAD_ATEXIT_IMPL"); } -- cgit v1.2.3 From 648d34d8eacaf2e35e336abd5b0c50c2ab9bfc94 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Thu, 27 Oct 2022 13:40:20 +0300 Subject: Sema: coerce zero-bit generic args are coerced properly Closes #13307 --- src/Sema.zig | 2 ++ .../zero-bit_generic_args_are_coerced_to_param_type.zig | 10 ++++++++++ 2 files changed, 12 insertions(+) create mode 100644 test/cases/compile_errors/zero-bit_generic_args_are_coerced_to_param_type.zig (limited to 'src') diff --git a/src/Sema.zig b/src/Sema.zig index b41dd21b81..43c8b88372 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -6738,6 +6738,8 @@ fn analyzeGenericCallArg( try sema.queueFullTypeResolution(param_ty); runtime_args[runtime_i.*] = casted_arg; runtime_i.* += 1; + } else if (try sema.typeHasOnePossibleValue(block, arg_src, comptime_arg.ty)) |_| { + _ = try sema.coerce(block, comptime_arg.ty, uncasted_arg, arg_src); } } diff --git a/test/cases/compile_errors/zero-bit_generic_args_are_coerced_to_param_type.zig b/test/cases/compile_errors/zero-bit_generic_args_are_coerced_to_param_type.zig new file mode 100644 index 0000000000..0288979084 --- /dev/null +++ b/test/cases/compile_errors/zero-bit_generic_args_are_coerced_to_param_type.zig @@ -0,0 +1,10 @@ +fn bar(a: anytype, _: @TypeOf(a)) void {} +pub export fn entry() void { + bar(@as(u0, 0), "fooo"); +} + +// error +// backend=stage2 +// target=native +// +// :3:21: error: expected type 'u0', found '*const [4:0]u8' -- cgit v1.2.3 From d03c47bf85b17f7727d2f1fe5bd497b311c9eba7 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Wed, 26 Oct 2022 00:30:17 +0300 Subject: Sema: use `runtime_value` instead of creating allocs --- src/Sema.zig | 76 +++++++++++++++++++++++++------------------- src/TypedValue.zig | 2 +- src/arch/aarch64/CodeGen.zig | 6 +++- src/arch/arm/CodeGen.zig | 6 +++- src/arch/wasm/CodeGen.zig | 6 +++- src/arch/x86_64/CodeGen.zig | 6 +++- src/codegen.zig | 7 +++- src/codegen/c.zig | 6 +++- src/codegen/llvm.zig | 6 +++- src/value.zig | 19 +++++------ test/behavior/bugs/13164.zig | 1 + test/behavior/vector.zig | 37 +++++++++++++++++++++ 12 files changed, 126 insertions(+), 52 deletions(-) (limited to 'src') diff --git a/src/Sema.zig b/src/Sema.zig index 4c2f72034e..931e06724b 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1827,6 +1827,22 @@ fn resolveMaybeUndefValAllowVariables( block: *Block, src: LazySrcLoc, inst: Air.Inst.Ref, +) CompileError!?Value { + var make_runtime = false; + if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(block, src, inst, &make_runtime)) |val| { + if (make_runtime) return null; + return val; + } + return null; +} + +/// Returns all Value tags including `variable`, `undef` and `runtime_value`. +fn resolveMaybeUndefValAllowVariablesMaybeRuntime( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + inst: Air.Inst.Ref, + make_runtime: *bool, ) CompileError!?Value { // First section of indexes correspond to a set number of constant values. var i: usize = @enumToInt(inst); @@ -1843,7 +1859,7 @@ fn resolveMaybeUndefValAllowVariables( .constant => { const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; const val = sema.air_values.items[ty_pl.payload]; - if (val.tag() == .runtime_int) return null; + if (val.tag() == .runtime_value) make_runtime.* = true; return val; }, .const_ty => { @@ -3896,6 +3912,7 @@ fn validateUnionInit( var first_block_index = block.instructions.items.len; var block_index = block.instructions.items.len - 1; var init_val: ?Value = null; + var make_runtime = false; while (block_index > 0) : (block_index -= 1) { const store_inst = block.instructions.items[block_index]; if (store_inst == field_ptr_air_inst) break; @@ -3920,7 +3937,7 @@ fn validateUnionInit( } else { first_block_index = @min(first_block_index, block_index); } - init_val = try sema.resolveMaybeUndefValAllowVariables(block, init_src, bin_op.rhs); + init_val = try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(block, init_src, bin_op.rhs, &make_runtime); break; } @@ -3933,10 +3950,11 @@ fn validateUnionInit( // instead a single `store` to the result ptr with a comptime union value. block.instructions.shrinkRetainingCapacity(first_block_index); - const union_val = try Value.Tag.@"union".create(sema.arena, .{ + var union_val = try Value.Tag.@"union".create(sema.arena, .{ .tag = tag_val, .val = val, }); + if (make_runtime) union_val = try Value.Tag.runtime_value.create(sema.arena, union_val); const union_init = try sema.addConstant(union_ty, union_val); try sema.storePtr2(block, init_src, union_ptr, init_src, union_init, init_src, .store); return; @@ -4054,6 +4072,7 @@ fn validateStructInit( var struct_is_comptime = true; var first_block_index = block.instructions.items.len; + var make_runtime = false; const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); @@ -4130,7 +4149,7 @@ fn validateStructInit( } else { first_block_index = @min(first_block_index, block_index); } - if (try sema.resolveMaybeUndefValAllowVariables(block, field_src, bin_op.rhs)) |val| { + if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(block, field_src, bin_op.rhs, &make_runtime)) |val| { field_values[i] = val; } else { struct_is_comptime = false; @@ -4185,7 +4204,8 @@ fn validateStructInit( // instead a single `store` to the struct_ptr with a comptime struct value. block.instructions.shrinkRetainingCapacity(first_block_index); - const struct_val = try Value.Tag.aggregate.create(sema.arena, field_values); + var struct_val = try Value.Tag.aggregate.create(sema.arena, field_values); + if (make_runtime) struct_val = try Value.Tag.runtime_value.create(sema.arena, struct_val); const struct_init = try sema.addConstant(struct_ty, struct_val); try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store); return; @@ -4265,6 +4285,7 @@ fn zirValidateArrayInit( var array_is_comptime = true; var first_block_index = block.instructions.items.len; + var make_runtime = false; // Collect the comptime element values in case the array literal ends up // being comptime-known. @@ -4326,7 +4347,7 @@ fn zirValidateArrayInit( array_is_comptime = false; continue; } - if (try sema.resolveMaybeUndefValAllowVariables(block, elem_src, bin_op.rhs)) |val| { + if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(block, elem_src, bin_op.rhs, &make_runtime)) |val| { element_vals[i] = val; } else { array_is_comptime = false; @@ -4352,7 +4373,7 @@ fn zirValidateArrayInit( array_is_comptime = false; continue; } - if (try sema.resolveMaybeUndefValAllowVariables(block, elem_src, bin_op.rhs)) |val| { + if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(block, elem_src, bin_op.rhs, &make_runtime)) |val| { element_vals[i] = val; } else { array_is_comptime = false; @@ -4383,7 +4404,8 @@ fn zirValidateArrayInit( block.instructions.shrinkRetainingCapacity(first_block_index); - const array_val = try Value.Tag.aggregate.create(sema.arena, element_vals); + var array_val = try Value.Tag.aggregate.create(sema.arena, element_vals); + if (make_runtime) array_val = try Value.Tag.runtime_value.create(sema.arena, array_val); const array_init = try sema.addConstant(array_ty, array_val); try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store); } @@ -6635,20 +6657,14 @@ fn analyzeInlineCallArg( .ty = param_ty, .val = arg_val, }; - } else if (((try sema.resolveMaybeUndefVal(arg_block, arg_src, casted_arg)) == null) or - try sema.typeRequiresComptime(param_ty) or zir_tags[inst] == .param_comptime) - { + } else if (zir_tags[inst] == .param_comptime or try sema.typeRequiresComptime(param_ty)) { try sema.inst_map.putNoClobber(sema.gpa, inst, casted_arg); - } else { + } else if (try sema.resolveMaybeUndefVal(arg_block, arg_src, casted_arg)) |val| { // We have a comptime value but we need a runtime value to preserve inlining semantics, - const ptr_type = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = param_ty, - .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), - }); - const alloc = try arg_block.addTy(.alloc, ptr_type); - _ = try arg_block.addBinOp(.store, alloc, casted_arg); - const loaded = try arg_block.addTyOp(.load, param_ty, alloc); - try sema.inst_map.putNoClobber(sema.gpa, inst, loaded); + const wrapped = try sema.addConstant(param_ty, try Value.Tag.runtime_value.create(sema.arena, val)); + try sema.inst_map.putNoClobber(sema.gpa, inst, wrapped); + } else { + try sema.inst_map.putNoClobber(sema.gpa, inst, casted_arg); } arg_i.* += 1; @@ -6685,20 +6701,14 @@ fn analyzeInlineCallArg( .ty = sema.typeOf(uncasted_arg), .val = arg_val, }; - } else if ((try sema.resolveMaybeUndefVal(arg_block, arg_src, uncasted_arg)) == null or - try sema.typeRequiresComptime(param_ty) or zir_tags[inst] == .param_anytype_comptime) - { + } else if (zir_tags[inst] == .param_anytype_comptime or try sema.typeRequiresComptime(param_ty)) { try sema.inst_map.putNoClobber(sema.gpa, inst, uncasted_arg); - } else { + } else if (try sema.resolveMaybeUndefVal(arg_block, arg_src, uncasted_arg)) |val| { // We have a comptime value but we need a runtime value to preserve inlining semantics, - const ptr_type = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = param_ty, - .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), - }); - const alloc = try arg_block.addTy(.alloc, ptr_type); - _ = try arg_block.addBinOp(.store, alloc, uncasted_arg); - const loaded = try arg_block.addTyOp(.load, param_ty, alloc); - try sema.inst_map.putNoClobber(sema.gpa, inst, loaded); + const wrapped = try sema.addConstant(param_ty, try Value.Tag.runtime_value.create(sema.arena, val)); + try sema.inst_map.putNoClobber(sema.gpa, inst, wrapped); + } else { + try sema.inst_map.putNoClobber(sema.gpa, inst, uncasted_arg); } arg_i.* += 1; @@ -14826,7 +14836,7 @@ fn zirBuiltinSrc( // fn_name: [:0]const u8, field_values[1] = func_name_val; // line: u32 - field_values[2] = try Value.Tag.runtime_int.create(sema.arena, extra.line + 1); + field_values[2] = try Value.Tag.runtime_value.create(sema.arena, try Value.Tag.int_u64.create(sema.arena, extra.line + 1)); // column: u32, field_values[3] = try Value.Tag.int_u64.create(sema.arena, extra.column + 1); diff --git a/src/TypedValue.zig b/src/TypedValue.zig index ba32e55f1e..619fb003f9 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -477,6 +477,6 @@ pub fn print( }, .generic_poison_type => return writer.writeAll("(generic poison type)"), .generic_poison => return writer.writeAll("(generic poison)"), - .runtime_int => return writer.writeAll("[runtime value]"), + .runtime_value => return writer.writeAll("[runtime value]"), }; } diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index eb8ca8e8f1..3bb5bbe0d3 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -5401,7 +5401,11 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue { } } -fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { +fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { + var typed_value = arg_tv; + if (typed_value.val.castTag(.runtime_value)) |rt| { + typed_value.val = rt.data; + } log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() }); if (typed_value.val.isUndef()) return MCValue{ .undef = {} }; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 1ebc348fc2..67cf899dc3 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -6047,7 +6047,11 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue { } } -fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { +fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { + var typed_value = arg_tv; + if (typed_value.val.castTag(.runtime_value)) |rt| { + typed_value.val = rt.data; + } log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() }); if (typed_value.val.isUndef()) return MCValue{ .undef = {} }; diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 538fcb13c1..69d5e38f65 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2582,7 +2582,11 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo( return @intCast(WantedT, result); } -fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { +fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { + var val = arg_val; + if (val.castTag(.runtime_value)) |rt| { + val = rt.data; + } if (val.isUndefDeep()) return func.emitUndefined(ty); if (val.castTag(.decl_ref)) |decl_ref| { const decl_index = decl_ref.data; diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 5f793aaeb9..965a34251c 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -6960,7 +6960,11 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue { } } -fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { +fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { + var typed_value = arg_tv; + if (typed_value.val.castTag(.runtime_value)) |rt| { + typed_value.val = rt.data; + } log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() }); if (typed_value.val.isUndef()) return MCValue{ .undef = {} }; diff --git a/src/codegen.zig b/src/codegen.zig index e7f927a2d6..757bd23b38 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -149,7 +149,7 @@ fn writeFloat(comptime F: type, f: F, target: Target, endian: std.builtin.Endian pub fn generateSymbol( bin_file: *link.File, src_loc: Module.SrcLoc, - typed_value: TypedValue, + arg_tv: TypedValue, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, reloc_info: RelocInfo, @@ -157,6 +157,11 @@ pub fn generateSymbol( const tracy = trace(@src()); defer tracy.end(); + var typed_value = arg_tv; + if (arg_tv.val.castTag(.runtime_value)) |rt| { + typed_value.val = rt.data; + } + const target = bin_file.options.target; const endian = target.cpu.arch.endian(); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index d6584d75ae..d0f76f0390 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -555,9 +555,13 @@ pub const DeclGen = struct { dg: *DeclGen, writer: anytype, ty: Type, - val: Value, + arg_val: Value, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { + var val = arg_val; + if (val.castTag(.runtime_value)) |rt| { + val = rt.data; + } const target = dg.module.getTarget(); if (val.isUndefDeep()) { switch (ty.zigTypeTag()) { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ffc19cb6f6..938770629f 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3187,7 +3187,11 @@ pub const DeclGen = struct { return llvm_elem_ty; } - fn lowerValue(dg: *DeclGen, tv: TypedValue) Error!*llvm.Value { + fn lowerValue(dg: *DeclGen, arg_tv: TypedValue) Error!*llvm.Value { + var tv = arg_tv; + if (tv.val.castTag(.runtime_value)) |rt| { + tv.val = rt.data; + } if (tv.val.isUndef()) { const llvm_type = try dg.lowerType(tv.ty); return llvm_type.getUndef(); diff --git a/src/value.zig b/src/value.zig index d24c5a1c17..28601c1723 100644 --- a/src/value.zig +++ b/src/value.zig @@ -111,10 +111,12 @@ pub const Value = extern union { int_i64, int_big_positive, int_big_negative, - runtime_int, function, extern_fn, variable, + /// A wrapper for values which are comptime-known but should + /// semantically be runtime-known. + runtime_value, /// Represents a pointer to a Decl. /// When machine codegen backend sees this, it must set the Decl's `alive` field to true. decl_ref, @@ -282,6 +284,7 @@ pub const Value = extern union { .eu_payload, .opt_payload, .empty_array_sentinel, + .runtime_value, => Payload.SubValue, .eu_payload_ptr, @@ -305,7 +308,6 @@ pub const Value = extern union { .int_type => Payload.IntType, .int_u64 => Payload.U64, .int_i64 => Payload.I64, - .runtime_int => Payload.U64, .function => Payload.Function, .variable => Payload.Variable, .decl_ref_mut => Payload.DeclRefMut, @@ -485,7 +487,6 @@ pub const Value = extern union { }, .int_type => return self.copyPayloadShallow(arena, Payload.IntType), .int_u64 => return self.copyPayloadShallow(arena, Payload.U64), - .runtime_int => return self.copyPayloadShallow(arena, Payload.U64), .int_i64 => return self.copyPayloadShallow(arena, Payload.I64), .int_big_positive, .int_big_negative => { const old_payload = self.cast(Payload.BigInt).?; @@ -567,6 +568,7 @@ pub const Value = extern union { .eu_payload, .opt_payload, .empty_array_sentinel, + .runtime_value, => { const payload = self.cast(Payload.SubValue).?; const new_payload = try arena.create(Payload.SubValue); @@ -765,7 +767,7 @@ pub const Value = extern union { .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, out_stream), .int_big_positive => return out_stream.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), .int_big_negative => return out_stream.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), - .runtime_int => return out_stream.writeAll("[runtime value]"), + .runtime_value => return out_stream.writeAll("[runtime value]"), .function => return out_stream.print("(function decl={d})", .{val.castTag(.function).?.data.owner_decl}), .extern_fn => return out_stream.writeAll("(extern function)"), .variable => return out_stream.writeAll("(variable)"), @@ -1081,8 +1083,6 @@ pub const Value = extern union { .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt(), .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt(), - .runtime_int => return BigIntMutable.init(&space.limbs, val.castTag(.runtime_int).?.data).toConst(), - .undef => unreachable, .lazy_align => { @@ -1138,8 +1138,6 @@ pub const Value = extern union { .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(u64) catch null, .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(u64) catch null, - .runtime_int => return val.castTag(.runtime_int).?.data, - .undef => unreachable, .lazy_align => { @@ -2357,6 +2355,8 @@ pub const Value = extern union { const zig_ty_tag = ty.zigTypeTag(); std.hash.autoHash(hasher, zig_ty_tag); if (val.isUndef()) return; + // The value is runtime-known and shouldn't affect the hash. + if (val.tag() == .runtime_value) return; switch (zig_ty_tag) { .BoundFn => unreachable, // TODO remove this from the language @@ -2632,9 +2632,6 @@ pub const Value = extern union { .lazy_size, => return hashInt(ptr_val, hasher, target), - // The value is runtime-known and shouldn't affect the hash. - .runtime_int => {}, - else => unreachable, } } diff --git a/test/behavior/bugs/13164.zig b/test/behavior/bugs/13164.zig index ee9fe1c120..37f5bdf805 100644 --- a/test/behavior/bugs/13164.zig +++ b/test/behavior/bugs/13164.zig @@ -10,6 +10,7 @@ inline fn setLimits(min: ?u32, max: ?u32) !void { test { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO var x: u32 = 42; try setLimits(x, null); diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 80fa2021d8..22e12d0808 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -1135,3 +1135,40 @@ test "array of vectors is copied" { points2[0..points.len].* = points; try std.testing.expectEqual(points2[6], Vec3{ -345, -311, 381 }); } + +test "byte vector initialized in inline function" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + + const S = struct { + inline fn boolx4(e0: bool, e1: bool, e2: bool, e3: bool) @Vector(4, bool) { + return .{ e0, e1, e2, e3 }; + } + + fn all(vb: @Vector(4, bool)) bool { + return @reduce(.And, vb); + } + }; + + try expect(S.all(S.boolx4(true, true, true, true))); +} + +test "byte vector initialized in inline function" { + // TODO https://github.com/ziglang/zig/issues/13279 + if (true) return error.SkipZigTest; + + const S = struct { + fn boolx4(e0: bool, e1: bool, e2: bool, e3: bool) @Vector(4, bool) { + return .{ e0, e1, e2, e3 }; + } + + fn all(vb: @Vector(4, bool)) bool { + return @reduce(.And, vb); + } + }; + + try expect(S.all(S.boolx4(true, true, true, true))); +} -- cgit v1.2.3 From bc72ae5e4e6d8f2253aed1316b053ad1022f9f67 Mon Sep 17 00:00:00 2001 From: InKryption Date: Fri, 16 Sep 2022 18:34:51 +0200 Subject: Sema: Prevent coercion from tuple pointer to mutable slice. Also fix some stdlib code affected by this. Co-authored by: topolarity --- lib/std/fs/wasi.zig | 2 +- lib/std/x/net/bpf.zig | 4 +- src/Sema.zig | 55 +++++++++++++--------- test/behavior/packed-struct.zig | 2 +- .../stage2/tuple_ptr_to_mut_slice.zig | 32 +++++++++++++ 5 files changed, 68 insertions(+), 27 deletions(-) create mode 100644 test/cases/compile_errors/stage2/tuple_ptr_to_mut_slice.zig (limited to 'src') diff --git a/lib/std/fs/wasi.zig b/lib/std/fs/wasi.zig index 81a43062dc..522731ef02 100644 --- a/lib/std/fs/wasi.zig +++ b/lib/std/fs/wasi.zig @@ -201,7 +201,7 @@ pub const PreopenList = struct { // If we were provided a CWD root to resolve against, we try to treat Preopen dirs as // POSIX paths, relative to "/" or `cwd_root` depending on whether they start with "." const path = if (cwd_root) |cwd| blk: { - const resolve_paths: [][]const u8 = if (raw_path[0] == '.') &.{ cwd, raw_path } else &.{ "/", raw_path }; + const resolve_paths: []const []const u8 = if (raw_path[0] == '.') &.{ cwd, raw_path } else &.{ "/", raw_path }; break :blk fs.path.resolve(self.buffer.allocator, resolve_paths) catch |err| switch (err) { error.CurrentWorkingDirectoryUnlinked => unreachable, // root is absolute, so CWD not queried else => |e| return e, diff --git a/lib/std/x/net/bpf.zig b/lib/std/x/net/bpf.zig index e8db9a3e0e..8fd318b03b 100644 --- a/lib/std/x/net/bpf.zig +++ b/lib/std/x/net/bpf.zig @@ -691,14 +691,14 @@ test "tcpdump filter" { ); } -fn expectPass(data: anytype, filter: []Insn) !void { +fn expectPass(data: anytype, filter: []const Insn) !void { try expectEqual( @as(u32, 0), try simulate(mem.asBytes(data), filter, .Big), ); } -fn expectFail(expected_error: anyerror, data: anytype, filter: []Insn) !void { +fn expectFail(expected_error: anyerror, data: anytype, filter: []const Insn) !void { try expectError( expected_error, simulate(mem.asBytes(data), filter, native_endian), diff --git a/src/Sema.zig b/src/Sema.zig index 931e06724b..608d6d986a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -18208,7 +18208,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.analyzeDeclVal(block, src, new_decl_index); }, .Fn => { - const struct_val = union_val.val.castTag(.aggregate).?.data; + const struct_val: []const Value = union_val.val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // calling_convention: CallingConvention, const cc = struct_val[0].toEnum(std.builtin.CallingConvention); @@ -18242,12 +18242,17 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in break :alignment alignment; } }; + const return_type = return_type_val.optionalValue() orelse + return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{}); + var buf: Value.ToTypeBuffer = undefined; const args_slice_val = args_val.castTag(.slice).?.data; const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod.getTarget())); - var param_types = try sema.arena.alloc(Type, args_len); - var comptime_params = try sema.arena.alloc(bool, args_len); + + const param_types = try sema.arena.alloc(Type, args_len); + const comptime_params = try sema.arena.alloc(bool, args_len); + var noalias_bits: u32 = 0; var i: usize = 0; while (i < args_len) : (i += 1) { @@ -18275,11 +18280,9 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{}); param_types[i] = try param_type.toType(&buf).copy(sema.arena); + comptime_params[i] = false; } - const return_type = return_type_val.optionalValue() orelse - return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{}); - var fn_info = Type.Payload.Function.Data{ .param_types = param_types, .comptime_params = comptime_params.ptr, @@ -24075,20 +24078,23 @@ fn coerceExtra( }, else => {}, }, - .Slice => { - // pointer to tuple to slice - if (inst_ty.isSinglePointer() and inst_ty.childType().isTuple() and dest_info.size == .Slice and - sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) - { - return sema.coerceTupleToSlicePtrs(block, dest_ty, dest_ty_src, inst, inst_src); + .Slice => to_slice: { + if (inst_ty.zigTypeTag() == .Array) { + return sema.fail( + block, + inst_src, + "array literal requires address-of operator (&) to coerce to slice type '{}'", + .{dest_ty.fmt(sema.mod)}, + ); } + if (!inst_ty.isSinglePointer()) break :to_slice; + const inst_child_ty = inst_ty.childType(); + if (!inst_child_ty.isTuple()) break :to_slice; + // empty tuple to zero-length slice // note that this allows coercing to a mutable slice. - if (inst_ty.isSinglePointer() and - inst_ty.childType().tag() == .empty_struct_literal and - dest_info.size == .Slice) - { + if (inst_child_ty.tupleFields().types.len == 0) { const slice_val = try Value.Tag.slice.create(sema.arena, .{ .ptr = Value.undef, .len = Value.zero, @@ -24096,14 +24102,17 @@ fn coerceExtra( return sema.addConstant(dest_ty, slice_val); } - if (inst_ty.zigTypeTag() == .Array) { - return sema.fail( - block, - inst_src, - "array literal requires address-of operator (&) to coerce to slice type '{}'", - .{dest_ty.fmt(sema.mod)}, - ); + // pointer to tuple to slice + if (dest_info.mutable) { + const err_msg = err_msg: { + const err_msg = try sema.errMsg(block, inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(sema.mod)}); + errdefer err_msg.deinit(sema.gpa); + try sema.errNote(block, dest_ty_src, err_msg, "pointers to tuples can only coerce to constant pointers", .{}); + break :err_msg err_msg; + }; + return sema.failWithOwnedErrorMsg(err_msg); } + return sema.coerceTupleToSlicePtrs(block, dest_ty, dest_ty_src, inst, inst_src); }, .Many => p: { if (!inst_ty.isSlice()) break :p; diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index 5a878112b5..b3ed2af536 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -410,7 +410,7 @@ test "load pointer from packed struct" { y: u32, }; var a: A = .{ .index = 123 }; - var b_list: []B = &.{.{ .x = &a, .y = 99 }}; + var b_list: []const B = &.{.{ .x = &a, .y = 99 }}; for (b_list) |b| { var i = b.x.index; try expect(i == 123); diff --git a/test/cases/compile_errors/stage2/tuple_ptr_to_mut_slice.zig b/test/cases/compile_errors/stage2/tuple_ptr_to_mut_slice.zig new file mode 100644 index 0000000000..24e4753563 --- /dev/null +++ b/test/cases/compile_errors/stage2/tuple_ptr_to_mut_slice.zig @@ -0,0 +1,32 @@ +export fn entry1() void { + var a = .{ 1, 2, 3 }; + _ = @as([]u8, &a); +} +export fn entry2() void { + var a = .{ @as(u8, 1), @as(u8, 2), @as(u8, 3) }; + _ = @as([]u8, &a); +} + +// runtime values +var vals = [_]u7{ 4, 5, 6 }; +export fn entry3() void { + var a = .{ vals[0], vals[1], vals[2] }; + _ = @as([]u8, &a); +} +export fn entry4() void { + var a = .{ @as(u8, vals[0]), @as(u8, vals[1]), @as(u8, vals[2]) }; + _ = @as([]u8, &a); +} + +// error +// backend=stage2 +// target=native +// +// :3:19: error: cannot cast pointer to tuple to '[]u8' +// :3:19: note: pointers to tuples can only coerce to constant pointers +// :7:19: error: cannot cast pointer to tuple to '[]u8' +// :7:19: note: pointers to tuples can only coerce to constant pointers +// :14:19: error: cannot cast pointer to tuple to '[]u8' +// :14:19: note: pointers to tuples can only coerce to constant pointers +// :18:19: error: cannot cast pointer to tuple to '[]u8' +// :18:19: note: pointers to tuples can only coerce to constant pointers -- cgit v1.2.3 From c3b85e4e2f3da02d78ccb814e3025ee4b78d541b Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Thu, 27 Oct 2022 19:31:45 +0300 Subject: Sema: further enhance explanation of why expr is evaluated at comptime --- src/Sema.zig | 185 +++++++++++++-------- .../condition_comptime_reason_explained.zig | 48 ++++++ .../explain_why_fn_is_called_at_comptime.zig | 2 +- ...xplain_why_generic_fn_is_called_at_comptime.zig | 2 +- .../unable_to_evaluate_expr_inside_cimport.zig | 15 ++ test/compile_errors.zig | 2 +- 6 files changed, 181 insertions(+), 73 deletions(-) create mode 100644 test/cases/compile_errors/condition_comptime_reason_explained.zig create mode 100644 test/cases/compile_errors/unable_to_evaluate_expr_inside_cimport.zig (limited to 'src') diff --git a/src/Sema.zig b/src/Sema.zig index 9f425b7bcf..d31c0745ed 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -151,6 +151,8 @@ pub const Block = struct { runtime_index: Value.RuntimeIndex = .zero, inline_block: Zir.Inst.Index = 0, + comptime_reason: ?*const ComptimeReason = null, + // TODO is_comptime and comptime_reason should probably be merged together. is_comptime: bool, is_typeof: bool = false, is_coerce_result_ptr: bool = false, @@ -173,6 +175,49 @@ pub const Block = struct { /// Value for switch_capture in an inline case inline_case_capture: Air.Inst.Ref = .none, + const ComptimeReason = union(enum) { + c_import: struct { + block: *Block, + src: LazySrcLoc, + }, + comptime_ret_ty: struct { + block: *Block, + func: Air.Inst.Ref, + func_src: LazySrcLoc, + return_ty: Type, + }, + + fn explain(cr: ComptimeReason, sema: *Sema, msg: ?*Module.ErrorMsg) !void { + const parent = msg orelse return; + const prefix = "expression is evaluated at comptime because "; + switch (cr) { + .c_import => |ci| { + try sema.errNote(ci.block, ci.src, parent, prefix ++ "it is inside a @cImport", .{}); + }, + .comptime_ret_ty => |rt| { + const src_loc = if (try sema.funcDeclSrc(rt.block, rt.func_src, rt.func)) |capture| blk: { + var src_loc = capture; + src_loc.lazy = .{ .node_offset_fn_type_ret_ty = 0 }; + break :blk src_loc; + } else blk: { + const src_decl = sema.mod.declPtr(rt.block.src_decl); + break :blk rt.func_src.toSrcLoc(src_decl); + }; + if (rt.return_ty.tag() == .generic_poison) { + return sema.mod.errNoteNonLazy(src_loc, parent, prefix ++ "the generic function was instantiated with a comptime-only return type", .{}); + } + try sema.mod.errNoteNonLazy( + src_loc, + parent, + prefix ++ "the function returns a comptime-only type '{}'", + .{rt.return_ty.fmt(sema.mod)}, + ); + try sema.explainWhyTypeIsComptime(rt.block, rt.func_src, parent, src_loc, rt.return_ty); + }, + } + } + }; + const Param = struct { /// `noreturn` means `anytype`. ty: Type, @@ -224,6 +269,7 @@ pub const Block = struct { .label = null, .inlining = parent.inlining, .is_comptime = parent.is_comptime, + .comptime_reason = parent.comptime_reason, .is_typeof = parent.is_typeof, .runtime_cond = parent.runtime_cond, .runtime_loop = parent.runtime_loop, @@ -1420,7 +1466,10 @@ fn analyzeBodyInner( const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition, "condition in comptime branch must be comptime-known"); + const cond = sema.resolveInstConst(block, cond_src, extra.data.condition, "condition in comptime branch must be comptime-known") catch |err| { + if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); + return err; + }; const inline_body = if (cond.val.toBool()) then_body else else_body; try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src); @@ -1438,7 +1487,10 @@ fn analyzeBodyInner( const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition, "condition in comptime branch must be comptime-known"); + const cond = sema.resolveInstConst(block, cond_src, extra.data.condition, "condition in comptime branch must be comptime-known") catch |err| { + if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); + return err; + }; const inline_body = if (cond.val.toBool()) then_body else else_body; const old_runtime_index = block.runtime_index; defer block.runtime_index = old_runtime_index; @@ -1460,7 +1512,10 @@ fn analyzeBodyInner( const err_union = try sema.resolveInst(extra.data.operand); const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); assert(is_non_err != .none); - const is_non_err_tv = try sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known"); + const is_non_err_tv = sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { + if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); + return err; + }; if (is_non_err_tv.val.toBool()) { const err_union_ty = sema.typeOf(err_union); break :blk try sema.analyzeErrUnionPayload(block, src, err_union_ty, err_union, operand_src, false); @@ -1516,7 +1571,10 @@ fn analyzeBodyInner( const err_union = try sema.analyzeLoad(block, src, operand, operand_src); const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); assert(is_non_err != .none); - const is_non_err_tv = try sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known"); + const is_non_err_tv = sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { + if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); + return err; + }; if (is_non_err_tv.val.toBool()) { break :blk try sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false); } @@ -1675,8 +1733,8 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) return; } + assert(!block.is_comptime); var err_trace_block = block.makeSubBlock(); - err_trace_block.is_comptime = false; defer err_trace_block.instructions.deinit(sema.gpa); const src: LazySrcLoc = .unneeded; @@ -4944,6 +5002,10 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr var c_import_buf = std.ArrayList(u8).init(sema.gpa); defer c_import_buf.deinit(); + var comptime_reason = .{ .c_import = .{ + .block = parent_block, + .src = src, + } }; var child_block: Block = .{ .parent = parent_block, .sema = sema, @@ -4952,7 +5014,8 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr .wip_capture_scope = parent_block.wip_capture_scope, .instructions = .{}, .inlining = parent_block.inlining, - .is_comptime = parent_block.is_comptime, + .is_comptime = true, + .comptime_reason = &comptime_reason, .c_import_buf = &c_import_buf, .runtime_cond = parent_block.runtime_cond, .runtime_loop = parent_block.runtime_loop, @@ -5053,6 +5116,7 @@ fn zirBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErro .label = &label, .inlining = parent_block.inlining, .is_comptime = parent_block.is_comptime, + .comptime_reason = parent_block.comptime_reason, .is_typeof = parent_block.is_typeof, .want_safety = parent_block.want_safety, .float_mode = parent_block.float_mode, @@ -5926,6 +5990,7 @@ fn zirCall( defer block.is_comptime = parent_comptime; if (arg_index < fn_params_len and func_ty_info.comptime_params[arg_index]) { block.is_comptime = true; + // TODO set comptime_reason } const param_ty_inst = try sema.addType(param_ty); @@ -6056,37 +6121,6 @@ const GenericCallAdapter = struct { } }; -fn addComptimeReturnTypeNote( - sema: *Sema, - block: *Block, - func: Air.Inst.Ref, - func_src: LazySrcLoc, - return_ty: Type, - parent: *Module.ErrorMsg, - requires_comptime: bool, -) !void { - if (!requires_comptime) return; - - const src_loc = if (try sema.funcDeclSrc(block, func_src, func)) |capture| blk: { - var src_loc = capture; - src_loc.lazy = .{ .node_offset_fn_type_ret_ty = 0 }; - break :blk src_loc; - } else blk: { - const src_decl = sema.mod.declPtr(block.src_decl); - break :blk func_src.toSrcLoc(src_decl); - }; - if (return_ty.tag() == .generic_poison) { - return sema.mod.errNoteNonLazy(src_loc, parent, "generic function is instantiated with a comptime-only return type", .{}); - } - try sema.mod.errNoteNonLazy( - src_loc, - parent, - "function is being called at comptime because it returns a comptime-only type '{}'", - .{return_ty.fmt(sema.mod)}, - ); - try sema.explainWhyTypeIsComptime(block, func_src, parent, src_loc, return_ty); -} - fn analyzeCall( sema: *Sema, block: *Block, @@ -6177,11 +6211,21 @@ fn analyzeCall( var is_generic_call = func_ty_info.is_generic; var is_comptime_call = block.is_comptime or modifier == .compile_time; - var comptime_only_ret_ty = false; + var comptime_reason_buf: Block.ComptimeReason = undefined; + var comptime_reason: ?*const Block.ComptimeReason = null; if (!is_comptime_call) { if (sema.typeRequiresComptime(func_ty_info.return_type)) |ct| { is_comptime_call = ct; - comptime_only_ret_ty = ct; + if (ct) { + // stage1 can't handle doing this directly + comptime_reason_buf = .{ .comptime_ret_ty = .{ + .block = block, + .func = func, + .func_src = func_src, + .return_ty = func_ty_info.return_type, + } }; + comptime_reason = &comptime_reason_buf; + } } else |err| switch (err) { error.GenericPoison => is_generic_call = true, else => |e| return e, @@ -6210,7 +6254,14 @@ fn analyzeCall( error.ComptimeReturn => { is_inline_call = true; is_comptime_call = true; - comptime_only_ret_ty = true; + // stage1 can't handle doing this directly + comptime_reason_buf = .{ .comptime_ret_ty = .{ + .block = block, + .func = func, + .func_src = func_src, + .return_ty = func_ty_info.return_type, + } }; + comptime_reason = &comptime_reason_buf; }, else => |e| return e, } @@ -6222,9 +6273,7 @@ fn analyzeCall( const result: Air.Inst.Ref = if (is_inline_call) res: { const func_val = sema.resolveConstValue(block, func_src, func, "function being called at comptime must be comptime-known") catch |err| { - if (err == error.AnalysisFail and sema.err != null) { - try sema.addComptimeReturnTypeNote(block, func, func_src, func_ty_info.return_type, sema.err.?, comptime_only_ret_ty); - } + if (err == error.AnalysisFail and comptime_reason != null) try comptime_reason.?.explain(sema, sema.err); return err; }; const module_fn = switch (func_val.tag()) { @@ -6292,6 +6341,7 @@ fn analyzeCall( .label = null, .inlining = &inlining, .is_comptime = is_comptime_call, + .comptime_reason = comptime_reason, .error_return_trace_index = block.error_return_trace_index, }; @@ -6344,11 +6394,6 @@ fn analyzeCall( is_comptime_call, &should_memoize, memoized_call_key, - // last 4 arguments are only used when reporting errors - undefined, - undefined, - undefined, - undefined, ) catch |err| switch (err) { error.NeededSourceLocation => { _ = sema.inst_map.remove(inst); @@ -6364,10 +6409,6 @@ fn analyzeCall( is_comptime_call, &should_memoize, memoized_call_key, - func, - func_src, - func_ty_info.return_type, - comptime_only_ret_ty, ); return error.AnalysisFail; }, @@ -6604,10 +6645,6 @@ fn analyzeInlineCallArg( is_comptime_call: bool, should_memoize: *bool, memoized_call_key: Module.MemoizedCall.Key, - func: Air.Inst.Ref, - func_src: LazySrcLoc, - ret_ty: Type, - comptime_only_ret_ty: bool, ) !void { const zir_tags = sema.code.instructions.items(.tag); switch (zir_tags[inst]) { @@ -6624,9 +6661,7 @@ fn analyzeInlineCallArg( const uncasted_arg = uncasted_args[arg_i.*]; if (try sema.typeRequiresComptime(param_ty)) { _ = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to parameter with comptime-only type must be comptime-known") catch |err| { - if (err == error.AnalysisFail and sema.err != null) { - try sema.addComptimeReturnTypeNote(arg_block, func, func_src, ret_ty, sema.err.?, comptime_only_ret_ty); - } + if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; }; } @@ -6635,9 +6670,7 @@ fn analyzeInlineCallArg( if (is_comptime_call) { try sema.inst_map.putNoClobber(sema.gpa, inst, casted_arg); const arg_val = sema.resolveConstMaybeUndefVal(arg_block, arg_src, casted_arg, "argument to function being called at comptime must be comptime-known") catch |err| { - if (err == error.AnalysisFail and sema.err != null) { - try sema.addComptimeReturnTypeNote(arg_block, func, func_src, ret_ty, sema.err.?, comptime_only_ret_ty); - } + if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; }; switch (arg_val.tag()) { @@ -6679,9 +6712,7 @@ fn analyzeInlineCallArg( if (is_comptime_call) { try sema.inst_map.putNoClobber(sema.gpa, inst, uncasted_arg); const arg_val = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to function being called at comptime must be comptime-known") catch |err| { - if (err == error.AnalysisFail and sema.err != null) { - try sema.addComptimeReturnTypeNote(arg_block, func, func_src, ret_ty, sema.err.?, comptime_only_ret_ty); - } + if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; }; switch (arg_val.tag()) { @@ -10223,6 +10254,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .label = &label, .inlining = block.inlining, .is_comptime = block.is_comptime, + .comptime_reason = block.comptime_reason, .is_typeof = block.is_typeof, .switch_else_err_ty = else_error_ty, .runtime_cond = block.runtime_cond, @@ -10333,7 +10365,13 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError return sema.resolveBlockBody(block, src, &child_block, special.body, inst, merges); } - try sema.requireRuntimeBlock(block, src, operand_src); + if (child_block.is_comptime) { + _ = sema.resolveConstValue(&child_block, operand_src, operand, "condition in comptime switch must be comptime-known") catch |err| { + if (err == error.AnalysisFail and child_block.comptime_reason != null) try child_block.comptime_reason.?.explain(sema, sema.err); + return err; + }; + unreachable; + } const estimated_cases_extra = (scalar_cases_len + multi_cases_len) * @typeInfo(Air.SwitchBr.Case).Struct.fields.len + 2; @@ -21469,6 +21507,9 @@ fn requireRuntimeBlock(sema: *Sema, block: *Block, src: LazySrcLoc, runtime_src: if (runtime_src) |some| { try sema.errNote(block, some, msg, "operation is runtime due to this operand", .{}); } + if (block.comptime_reason) |some| { + try some.explain(sema, msg); + } break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -21940,6 +21981,7 @@ fn addSafetyCheck( panic_id: PanicId, ) !void { const gpa = sema.gpa; + assert(!parent_block.is_comptime); var fail_block: Block = .{ .parent = parent_block, @@ -21949,7 +21991,7 @@ fn addSafetyCheck( .wip_capture_scope = parent_block.wip_capture_scope, .instructions = .{}, .inlining = parent_block.inlining, - .is_comptime = parent_block.is_comptime, + .is_comptime = false, }; defer fail_block.instructions.deinit(gpa); @@ -22061,6 +22103,7 @@ fn panicUnwrapError( unwrap_err_tag: Air.Inst.Tag, is_non_err_tag: Air.Inst.Tag, ) !void { + assert(!parent_block.is_comptime); const ok = try parent_block.addUnOp(is_non_err_tag, operand); const gpa = sema.gpa; @@ -22072,7 +22115,7 @@ fn panicUnwrapError( .wip_capture_scope = parent_block.wip_capture_scope, .instructions = .{}, .inlining = parent_block.inlining, - .is_comptime = parent_block.is_comptime, + .is_comptime = false, }; defer fail_block.instructions.deinit(gpa); @@ -22104,6 +22147,7 @@ fn panicIndexOutOfBounds( len: Air.Inst.Ref, cmp_op: Air.Inst.Tag, ) !void { + assert(!parent_block.is_comptime); const ok = try parent_block.addBinOp(cmp_op, index, len); const gpa = sema.gpa; @@ -22115,7 +22159,7 @@ fn panicIndexOutOfBounds( .wip_capture_scope = parent_block.wip_capture_scope, .instructions = .{}, .inlining = parent_block.inlining, - .is_comptime = parent_block.is_comptime, + .is_comptime = false, }; defer fail_block.instructions.deinit(gpa); @@ -22146,6 +22190,7 @@ fn panicSentinelMismatch( ptr: Air.Inst.Ref, sentinel_index: Air.Inst.Ref, ) !void { + assert(!parent_block.is_comptime); const expected_sentinel_val = maybe_sentinel orelse return; const expected_sentinel = try sema.addConstant(sentinel_ty, expected_sentinel_val); @@ -22186,7 +22231,7 @@ fn panicSentinelMismatch( .wip_capture_scope = parent_block.wip_capture_scope, .instructions = .{}, .inlining = parent_block.inlining, - .is_comptime = parent_block.is_comptime, + .is_comptime = false, }; defer fail_block.instructions.deinit(gpa); diff --git a/test/cases/compile_errors/condition_comptime_reason_explained.zig b/test/cases/compile_errors/condition_comptime_reason_explained.zig new file mode 100644 index 0000000000..332ae8afc8 --- /dev/null +++ b/test/cases/compile_errors/condition_comptime_reason_explained.zig @@ -0,0 +1,48 @@ +const S = struct { + fnPtr: fn () void, +}; +fn bar() void {} +fn baz() void {} +var runtime: bool = true; +fn ifExpr() S { + if (runtime) { + return .{ + .fnPtr = bar, + }; + } else { + return .{ + .fnPtr = baz, + }; + } +} +pub export fn entry1() void { + _ = ifExpr(); +} +fn switchExpr() S { + switch (runtime) { + true => return .{ + .fnPtr = bar, + }, + false => return .{ + .fnPtr = baz, + }, + } +} +pub export fn entry2() void { + _ = switchExpr(); +} + +// error +// backend=stage2 +// target=native +// +// :8:9: error: unable to resolve comptime value +// :8:9: note: condition in comptime branch must be comptime-known +// :7:13: note: expression is evaluated at comptime because the function returns a comptime-only type 'tmp.S' +// :2:12: note: struct requires comptime because of this field +// :2:12: note: use '*const fn() void' for a function pointer type +// :19:15: note: called from here +// :22:13: error: unable to resolve comptime value +// :22:13: note: condition in comptime switch must be comptime-known +// :21:17: note: expression is evaluated at comptime because the function returns a comptime-only type 'tmp.S' +// :32:19: note: called from here diff --git a/test/cases/compile_errors/explain_why_fn_is_called_at_comptime.zig b/test/cases/compile_errors/explain_why_fn_is_called_at_comptime.zig index d19ab31617..83d5d7e33f 100644 --- a/test/cases/compile_errors/explain_why_fn_is_called_at_comptime.zig +++ b/test/cases/compile_errors/explain_why_fn_is_called_at_comptime.zig @@ -18,6 +18,6 @@ pub export fn entry() void { // // :12:13: error: unable to resolve comptime value // :12:13: note: argument to function being called at comptime must be comptime-known -// :7:25: note: function is being called at comptime because it returns a comptime-only type 'tmp.S' +// :7:25: note: expression is evaluated at comptime because the function returns a comptime-only type 'tmp.S' // :2:12: note: struct requires comptime because of this field // :2:12: note: use '*const fn() void' for a function pointer type diff --git a/test/cases/compile_errors/explain_why_generic_fn_is_called_at_comptime.zig b/test/cases/compile_errors/explain_why_generic_fn_is_called_at_comptime.zig index 36aeb40479..701241a403 100644 --- a/test/cases/compile_errors/explain_why_generic_fn_is_called_at_comptime.zig +++ b/test/cases/compile_errors/explain_why_generic_fn_is_called_at_comptime.zig @@ -19,4 +19,4 @@ pub export fn entry() void { // // :14:13: error: unable to resolve comptime value // :14:13: note: argument to function being called at comptime must be comptime-known -// :9:38: note: generic function is instantiated with a comptime-only return type +// :9:38: note: expression is evaluated at comptime because the generic function was instantiated with a comptime-only return type diff --git a/test/cases/compile_errors/unable_to_evaluate_expr_inside_cimport.zig b/test/cases/compile_errors/unable_to_evaluate_expr_inside_cimport.zig new file mode 100644 index 0000000000..9460a58993 --- /dev/null +++ b/test/cases/compile_errors/unable_to_evaluate_expr_inside_cimport.zig @@ -0,0 +1,15 @@ +const c = @cImport({ + _ = 1 + foo; +}); +extern var foo: i32; +export fn entry() void { + _ = c; +} + +// error +// backend=llvm +// target=native +// +// :2:11: error: unable to evaluate comptime expression +// :2:13: note: operation is runtime due to this operand +// :1:11: note: expression is evaluated at comptime because it is inside a @cImport diff --git a/test/compile_errors.zig b/test/compile_errors.zig index e9b08565ce..5454124df8 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -204,7 +204,7 @@ pub fn addCases(ctx: *TestContext) !void { , &[_][]const u8{ ":3:12: error: unable to resolve comptime value", ":3:12: note: argument to function being called at comptime must be comptime-known", - ":2:55: note: generic function is instantiated with a comptime-only return type", + ":2:55: note: expression is evaluated at comptime because the generic function was instantiated with a comptime-only return type", }); } -- cgit v1.2.3 From 6fc71835c3075aff4792b63bc38698cbe542f028 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Fri, 28 Oct 2022 00:07:38 +0300 Subject: value: properly hash `null_value` pointer Closes #13325 --- src/value.zig | 1 + test/behavior/generics.zig | 9 +++++++++ 2 files changed, 10 insertions(+) (limited to 'src') diff --git a/src/value.zig b/src/value.zig index 28601c1723..7d01430103 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2621,6 +2621,7 @@ pub const Value = extern union { .zero, .one, + .null_value, .int_u64, .int_i64, .int_big_positive, diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index 4a483ec0d5..9513118c19 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -396,3 +396,12 @@ test "slice as parameter type" { try expect(S.internComptimeString(source_a[1..2]) == S.internComptimeString(source_a[1..2])); try expect(S.internComptimeString(source_a[2..4]) != S.internComptimeString(source_a[5..7])); } + +test "null sentinel pointer passed as generic argument" { + const S = struct { + fn doTheTest(a: anytype) !void { + try std.testing.expect(@ptrToInt(a) == 8); + } + }; + try S.doTheTest((@intToPtr([*:null]const [*c]const u8, 8))); +} -- cgit v1.2.3 From 3295fee9116789f144e6406493116c451aee7c57 Mon Sep 17 00:00:00 2001 From: Cody Tapscott Date: Tue, 18 Oct 2022 11:37:43 -0700 Subject: stage2: Use mem.readPackedInt etc. for packed bitcasts Packed memory has a well-defined layout that doesn't require conversion from an integer to read from. Let's use it :-) This change means that for bitcasting to/from a packed value that is N layers deep, we no longer have to create N temporary big-ints and perform N copies. Other miscellaneous improvements: - Adds support for casting to packed enums and vectors - Fixes bitcasting to/from vectors outside of a packed struct - Adds a fast path for bitcasting <= u/i64 - Fixes bug when bitcasting f80 which would clear following fields This also changes the bitcast memory layout of exotic integers on big-endian systems to match what's empirically observed on our targets. Technically, this layout is not guaranteed by LLVM so we should probably ban bitcasts that reveal these padding bits, but for now this is an improvement. --- lib/std/math/big/int.zig | 161 ++++++-------- lib/std/math/big/int_test.zig | 102 +++++---- src/Sema.zig | 42 ---- src/codegen.zig | 2 +- src/value.zig | 489 +++++++++++++++++++++--------------------- test/behavior/bitcast.zig | 87 +++++++- 6 files changed, 461 insertions(+), 422 deletions(-) (limited to 'src') diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index b875f73b2e..ac2f089ea1 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -1762,16 +1762,32 @@ pub const Mutable = struct { } /// Read the value of `x` from `buffer` - /// Asserts that `buffer`, `abi_size`, and `bit_count` are large enough to store the value. + /// Asserts that `buffer` is large enough to contain a value of bit-size `bit_count`. /// /// The contents of `buffer` are interpreted as if they were the contents of - /// @ptrCast(*[abi_size]const u8, &x). Byte ordering is determined by `endian` + /// @ptrCast(*[buffer.len]const u8, &x). Byte ordering is determined by `endian` /// and any required padding bits are expected on the MSB end. pub fn readTwosComplement( x: *Mutable, buffer: []const u8, bit_count: usize, - abi_size: usize, + endian: Endian, + signedness: Signedness, + ) void { + return readPackedTwosComplement(x, buffer, 0, bit_count, endian, signedness); + } + + /// Read the value of `x` from a packed memory `buffer`. + /// Asserts that `buffer` is large enough to contain a value of bit-size `bit_count` + /// at offset `bit_offset`. + /// + /// This is equivalent to loading the value of an integer with `bit_count` bits as + /// if it were a field in packed memory at the provided bit offset. + pub fn readPackedTwosComplement( + x: *Mutable, + bytes: []const u8, + bit_offset: usize, + bit_count: usize, endian: Endian, signedness: Signedness, ) void { @@ -1782,75 +1798,54 @@ pub const Mutable = struct { return; } - // byte_count is our total read size: it cannot exceed abi_size, - // but may be less as long as it includes the required bits - const limb_count = calcTwosCompLimbCount(bit_count); - const byte_count = std.math.min(abi_size, @sizeOf(Limb) * limb_count); - assert(8 * byte_count >= bit_count); - // Check whether the input is negative var positive = true; if (signedness == .signed) { + const total_bits = bit_offset + bit_count; var last_byte = switch (endian) { - .Little => ((bit_count + 7) / 8) - 1, - .Big => abi_size - ((bit_count + 7) / 8), + .Little => ((total_bits + 7) / 8) - 1, + .Big => bytes.len - ((total_bits + 7) / 8), }; - const sign_bit = @as(u8, 1) << @intCast(u3, (bit_count - 1) % 8); - positive = ((buffer[last_byte] & sign_bit) == 0); + const sign_bit = @as(u8, 1) << @intCast(u3, (total_bits - 1) % 8); + positive = ((bytes[last_byte] & sign_bit) == 0); } // Copy all complete limbs - var carry: u1 = if (positive) 0 else 1; + var carry: u1 = 1; var limb_index: usize = 0; + var bit_index: usize = 0; while (limb_index < bit_count / @bitSizeOf(Limb)) : (limb_index += 1) { - var buf_index = switch (endian) { - .Little => @sizeOf(Limb) * limb_index, - .Big => abi_size - (limb_index + 1) * @sizeOf(Limb), - }; - - const limb_buf = @ptrCast(*const [@sizeOf(Limb)]u8, buffer[buf_index..]); - var limb = mem.readInt(Limb, limb_buf, endian); + // Read one Limb of bits + var limb = mem.readPackedInt(Limb, bytes, bit_index + bit_offset, endian); + bit_index += @bitSizeOf(Limb); // 2's complement (bitwise not, then add carry bit) if (!positive) carry = @boolToInt(@addWithOverflow(Limb, ~limb, carry, &limb)); x.limbs[limb_index] = limb; } - // Copy the remaining N bytes (N <= @sizeOf(Limb)) - var bytes_read = limb_index * @sizeOf(Limb); - if (bytes_read != byte_count) { - var limb: Limb = 0; - - while (bytes_read != byte_count) { - const read_size = std.math.floorPowerOfTwo(usize, byte_count - bytes_read); - var int_buffer = switch (endian) { - .Little => buffer[bytes_read..], - .Big => buffer[(abi_size - bytes_read - read_size)..], - }; - limb |= @intCast(Limb, switch (read_size) { - 1 => mem.readInt(u8, int_buffer[0..1], endian), - 2 => mem.readInt(u16, int_buffer[0..2], endian), - 4 => mem.readInt(u32, int_buffer[0..4], endian), - 8 => mem.readInt(u64, int_buffer[0..8], endian), - 16 => mem.readInt(u128, int_buffer[0..16], endian), - else => unreachable, - }) << @intCast(Log2Limb, 8 * (bytes_read % @sizeOf(Limb))); - bytes_read += read_size; - } + // Copy the remaining bits + if (bit_count != bit_index) { + // Read all remaining bits + var limb = switch (signedness) { + .unsigned => mem.readVarPackedInt(Limb, bytes, bit_index + bit_offset, bit_count - bit_index, endian, .unsigned), + .signed => b: { + const SLimb = std.meta.Int(.signed, @bitSizeOf(Limb)); + const limb = mem.readVarPackedInt(SLimb, bytes, bit_index + bit_offset, bit_count - bit_index, endian, .signed); + break :b @bitCast(Limb, limb); + }, + }; // 2's complement (bitwise not, then add carry bit) - if (!positive) _ = @addWithOverflow(Limb, ~limb, carry, &limb); - - // Mask off any unused bits - const valid_bits = @intCast(Log2Limb, bit_count % @bitSizeOf(Limb)); - const mask = (@as(Limb, 1) << valid_bits) -% 1; // 0b0..01..1 with (valid_bits_in_limb) trailing ones - limb &= mask; + if (!positive) assert(!@addWithOverflow(Limb, ~limb, carry, &limb)); + x.limbs[limb_index] = limb; - x.limbs[limb_count - 1] = limb; + limb_index += 1; } + x.positive = positive; - x.len = limb_count; + x.len = limb_index; x.normalize(x.len); } @@ -2212,66 +2207,48 @@ pub const Const = struct { } /// Write the value of `x` into `buffer` - /// Asserts that `buffer`, `abi_size`, and `bit_count` are large enough to store the value. + /// Asserts that `buffer` is large enough to store the value. /// /// `buffer` is filled so that its contents match what would be observed via - /// @ptrCast(*[abi_size]const u8, &x). Byte ordering is determined by `endian`, + /// @ptrCast(*[buffer.len]const u8, &x). Byte ordering is determined by `endian`, /// and any required padding bits are added on the MSB end. - pub fn writeTwosComplement(x: Const, buffer: []u8, bit_count: usize, abi_size: usize, endian: Endian) void { + pub fn writeTwosComplement(x: Const, buffer: []u8, endian: Endian) void { + return writePackedTwosComplement(x, buffer, 0, 8 * buffer.len, endian); + } - // byte_count is our total write size - const byte_count = abi_size; - assert(8 * byte_count >= bit_count); - assert(buffer.len >= byte_count); + /// Write the value of `x` to a packed memory `buffer`. + /// Asserts that `buffer` is large enough to contain a value of bit-size `bit_count` + /// at offset `bit_offset`. + /// + /// This is equivalent to storing the value of an integer with `bit_count` bits as + /// if it were a field in packed memory at the provided bit offset. + pub fn writePackedTwosComplement(x: Const, bytes: []u8, bit_offset: usize, bit_count: usize, endian: Endian) void { assert(x.fitsInTwosComp(if (x.positive) .unsigned else .signed, bit_count)); // Copy all complete limbs - var carry: u1 = if (x.positive) 0 else 1; + var carry: u1 = 1; var limb_index: usize = 0; - while (limb_index < byte_count / @sizeOf(Limb)) : (limb_index += 1) { - var buf_index = switch (endian) { - .Little => @sizeOf(Limb) * limb_index, - .Big => abi_size - (limb_index + 1) * @sizeOf(Limb), - }; - + var bit_index: usize = 0; + while (limb_index < bit_count / @bitSizeOf(Limb)) : (limb_index += 1) { var limb: Limb = if (limb_index < x.limbs.len) x.limbs[limb_index] else 0; + // 2's complement (bitwise not, then add carry bit) if (!x.positive) carry = @boolToInt(@addWithOverflow(Limb, ~limb, carry, &limb)); - var limb_buf = @ptrCast(*[@sizeOf(Limb)]u8, buffer[buf_index..]); - mem.writeInt(Limb, limb_buf, limb, endian); + // Write one Limb of bits + mem.writePackedInt(Limb, bytes, bit_index + bit_offset, limb, endian); + bit_index += @bitSizeOf(Limb); } - // Copy the remaining N bytes (N < @sizeOf(Limb)) - var bytes_written = limb_index * @sizeOf(Limb); - if (bytes_written != byte_count) { + // Copy the remaining bits + if (bit_count != bit_index) { var limb: Limb = if (limb_index < x.limbs.len) x.limbs[limb_index] else 0; + // 2's complement (bitwise not, then add carry bit) if (!x.positive) _ = @addWithOverflow(Limb, ~limb, carry, &limb); - while (bytes_written != byte_count) { - const write_size = std.math.floorPowerOfTwo(usize, byte_count - bytes_written); - var int_buffer = switch (endian) { - .Little => buffer[bytes_written..], - .Big => buffer[(abi_size - bytes_written - write_size)..], - }; - - if (write_size == 1) { - mem.writeInt(u8, int_buffer[0..1], @truncate(u8, limb), endian); - } else if (@sizeOf(Limb) >= 2 and write_size == 2) { - mem.writeInt(u16, int_buffer[0..2], @truncate(u16, limb), endian); - } else if (@sizeOf(Limb) >= 4 and write_size == 4) { - mem.writeInt(u32, int_buffer[0..4], @truncate(u32, limb), endian); - } else if (@sizeOf(Limb) >= 8 and write_size == 8) { - mem.writeInt(u64, int_buffer[0..8], @truncate(u64, limb), endian); - } else if (@sizeOf(Limb) >= 16 and write_size == 16) { - mem.writeInt(u128, int_buffer[0..16], @truncate(u128, limb), endian); - } else if (@sizeOf(Limb) >= 32) { - @compileError("@sizeOf(Limb) exceeded supported range"); - } else unreachable; - limb >>= @intCast(Log2Limb, 8 * write_size); - bytes_written += write_size; - } + // Write all remaining bits + mem.writeVarPackedInt(bytes, bit_index + bit_offset, bit_count - bit_index, limb, endian); } } diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig index 5685a38d41..97de06bfcc 100644 --- a/lib/std/math/big/int_test.zig +++ b/lib/std/math/big/int_test.zig @@ -2603,13 +2603,13 @@ test "big int conversion read/write twos complement" { for (endians) |endian| { // Writing to buffer and back should not change anything - a.toConst().writeTwosComplement(buffer1, 493, abi_size, endian); - m.readTwosComplement(buffer1, 493, abi_size, endian, .unsigned); + a.toConst().writeTwosComplement(buffer1[0..abi_size], endian); + m.readTwosComplement(buffer1[0..abi_size], 493, endian, .unsigned); try testing.expect(m.toConst().order(a.toConst()) == .eq); // Equivalent to @bitCast(i493, @as(u493, intMax(u493)) - a.toConst().writeTwosComplement(buffer1, 493, abi_size, endian); - m.readTwosComplement(buffer1, 493, abi_size, endian, .signed); + a.toConst().writeTwosComplement(buffer1[0..abi_size], endian); + m.readTwosComplement(buffer1[0..abi_size], 493, endian, .signed); try testing.expect(m.toConst().orderAgainstScalar(-1) == .eq); } } @@ -2628,26 +2628,26 @@ test "big int conversion read twos complement with padding" { // (3) should sign-extend any bits from bit_count to 8 * abi_size var bit_count: usize = 12 * 8 + 1; - a.toConst().writeTwosComplement(buffer1, bit_count, 13, .Little); + a.toConst().writeTwosComplement(buffer1[0..13], .Little); try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0xaa, 0xaa, 0xaa })); - a.toConst().writeTwosComplement(buffer1, bit_count, 13, .Big); + a.toConst().writeTwosComplement(buffer1[0..13], .Big); try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xaa, 0xaa, 0xaa })); - a.toConst().writeTwosComplement(buffer1, bit_count, 16, .Little); + a.toConst().writeTwosComplement(buffer1[0..16], .Little); try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0x0, 0x0, 0x0 })); - a.toConst().writeTwosComplement(buffer1, bit_count, 16, .Big); + a.toConst().writeTwosComplement(buffer1[0..16], .Big); try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0x0, 0x0, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd })); @memset(buffer1.ptr, 0xaa, buffer1.len); try a.set(-0x01_02030405_06070809_0a0b0c0d); bit_count = 12 * 8 + 2; - a.toConst().writeTwosComplement(buffer1, bit_count, 13, .Little); + a.toConst().writeTwosComplement(buffer1[0..13], .Little); try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xaa, 0xaa, 0xaa })); - a.toConst().writeTwosComplement(buffer1, bit_count, 13, .Big); + a.toConst().writeTwosComplement(buffer1[0..13], .Big); try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3, 0xaa, 0xaa, 0xaa })); - a.toConst().writeTwosComplement(buffer1, bit_count, 16, .Little); + a.toConst().writeTwosComplement(buffer1[0..16], .Little); try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, 0xff, 0xff })); - a.toConst().writeTwosComplement(buffer1, bit_count, 16, .Big); + a.toConst().writeTwosComplement(buffer1[0..16], .Big); try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3 })); } @@ -2660,17 +2660,15 @@ test "big int write twos complement +/- zero" { defer testing.allocator.free(buffer1); @memset(buffer1.ptr, 0xaa, buffer1.len); - var bit_count: usize = 0; - // Test zero - m.toConst().writeTwosComplement(buffer1, bit_count, 13, .Little); + m.toConst().writeTwosComplement(buffer1[0..13], .Little); try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3)))); - m.toConst().writeTwosComplement(buffer1, bit_count, 13, .Big); + m.toConst().writeTwosComplement(buffer1[0..13], .Big); try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3)))); - m.toConst().writeTwosComplement(buffer1, bit_count, 16, .Little); + m.toConst().writeTwosComplement(buffer1[0..16], .Little); try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16)))); - m.toConst().writeTwosComplement(buffer1, bit_count, 16, .Big); + m.toConst().writeTwosComplement(buffer1[0..16], .Big); try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16)))); @memset(buffer1.ptr, 0xaa, buffer1.len); @@ -2678,13 +2676,13 @@ test "big int write twos complement +/- zero" { // Test negative zero - m.toConst().writeTwosComplement(buffer1, bit_count, 13, .Little); + m.toConst().writeTwosComplement(buffer1[0..13], .Little); try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3)))); - m.toConst().writeTwosComplement(buffer1, bit_count, 13, .Big); + m.toConst().writeTwosComplement(buffer1[0..13], .Big); try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3)))); - m.toConst().writeTwosComplement(buffer1, bit_count, 16, .Little); + m.toConst().writeTwosComplement(buffer1[0..16], .Little); try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16)))); - m.toConst().writeTwosComplement(buffer1, bit_count, 16, .Big); + m.toConst().writeTwosComplement(buffer1[0..16], .Big); try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16)))); } @@ -2705,62 +2703,82 @@ test "big int conversion write twos complement with padding" { // Test 0x01_02030405_06070809_0a0b0c0d buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xb }; - m.readTwosComplement(buffer, bit_count, 13, .Little, .unsigned); + m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned); try testing.expect(m.toConst().orderAgainstScalar(0x01_02030405_06070809_0a0b0c0d) == .eq); buffer = &[_]u8{ 0xb, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd }; - m.readTwosComplement(buffer, bit_count, 13, .Big, .unsigned); + m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned); try testing.expect(m.toConst().orderAgainstScalar(0x01_02030405_06070809_0a0b0c0d) == .eq); buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xab, 0xaa, 0xaa, 0xaa }; - m.readTwosComplement(buffer, bit_count, 16, .Little, .unsigned); + m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned); try testing.expect(m.toConst().orderAgainstScalar(0x01_02030405_06070809_0a0b0c0d) == .eq); buffer = &[_]u8{ 0xaa, 0xaa, 0xaa, 0xab, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd }; - m.readTwosComplement(buffer, bit_count, 16, .Big, .unsigned); + m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned); try testing.expect(m.toConst().orderAgainstScalar(0x01_02030405_06070809_0a0b0c0d) == .eq); + bit_count = @sizeOf(Limb) * 8; + + // Test 0x0a0a0a0a_02030405_06070809_0a0b0c0d + + buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xaa }; + m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned); + try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaa_02030405_06070809_0a0b0c0d)) == .eq); + + buffer = &[_]u8{ 0xaa, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd }; + m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned); + try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaa_02030405_06070809_0a0b0c0d)) == .eq); + + buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xaa, 0xaa, 0xaa, 0xaa }; + m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned); + try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaaaaaaaa_02030405_06070809_0a0b0c0d)) == .eq); + + buffer = &[_]u8{ 0xaa, 0xaa, 0xaa, 0xaa, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd }; + m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned); + try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaaaaaaaa_02030405_06070809_0a0b0c0d)) == .eq); + bit_count = 12 * 8 + 2; // Test -0x01_02030405_06070809_0a0b0c0d buffer = &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0x02 }; - m.readTwosComplement(buffer, bit_count, 13, .Little, .signed); + m.readTwosComplement(buffer[0..13], bit_count, .Little, .signed); try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq); buffer = &[_]u8{ 0x02, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3 }; - m.readTwosComplement(buffer, bit_count, 13, .Big, .signed); + m.readTwosComplement(buffer[0..13], bit_count, .Big, .signed); try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq); buffer = &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0x02, 0xaa, 0xaa, 0xaa }; - m.readTwosComplement(buffer, bit_count, 16, .Little, .signed); + m.readTwosComplement(buffer[0..16], bit_count, .Little, .signed); try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq); buffer = &[_]u8{ 0xaa, 0xaa, 0xaa, 0x02, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3 }; - m.readTwosComplement(buffer, bit_count, 16, .Big, .signed); + m.readTwosComplement(buffer[0..16], bit_count, .Big, .signed); try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq); // Test 0 buffer = &([_]u8{0} ** 16); - m.readTwosComplement(buffer, bit_count, 13, .Little, .unsigned); + m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned); try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq); - m.readTwosComplement(buffer, bit_count, 13, .Big, .unsigned); + m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned); try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq); - m.readTwosComplement(buffer, bit_count, 16, .Little, .unsigned); + m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned); try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq); - m.readTwosComplement(buffer, bit_count, 16, .Big, .unsigned); + m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned); try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq); bit_count = 0; buffer = &([_]u8{0xaa} ** 16); - m.readTwosComplement(buffer, bit_count, 13, .Little, .unsigned); + m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned); try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq); - m.readTwosComplement(buffer, bit_count, 13, .Big, .unsigned); + m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned); try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq); - m.readTwosComplement(buffer, bit_count, 16, .Little, .unsigned); + m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned); try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq); - m.readTwosComplement(buffer, bit_count, 16, .Big, .unsigned); + m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned); try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq); } @@ -2779,15 +2797,15 @@ test "big int conversion write twos complement zero" { var buffer: []const u8 = undefined; buffer = &([_]u8{0} ** 13); - m.readTwosComplement(buffer, bit_count, 13, .Little, .unsigned); + m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned); try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq); - m.readTwosComplement(buffer, bit_count, 13, .Big, .unsigned); + m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned); try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq); buffer = &([_]u8{0} ** 16); - m.readTwosComplement(buffer, bit_count, 16, .Little, .unsigned); + m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned); try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq); - m.readTwosComplement(buffer, bit_count, 16, .Big, .unsigned); + m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned); try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq); } diff --git a/src/Sema.zig b/src/Sema.zig index 9f425b7bcf..fbc8d1dd8e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -26445,48 +26445,6 @@ fn bitCastVal( const target = sema.mod.getTarget(); if (old_ty.eql(new_ty, sema.mod)) return val; - // Some conversions have a bitwise definition that ignores in-memory layout, - // such as converting between f80 and u80. - - if (old_ty.eql(Type.f80, sema.mod) and new_ty.isAbiInt()) { - const float = val.toFloat(f80); - switch (new_ty.intInfo(target).signedness) { - .signed => { - const int = @bitCast(i80, float); - const limbs = try sema.arena.alloc(std.math.big.Limb, 2); - const big_int = std.math.big.int.Mutable.init(limbs, int); - return Value.fromBigInt(sema.arena, big_int.toConst()); - }, - .unsigned => { - const int = @bitCast(u80, float); - const limbs = try sema.arena.alloc(std.math.big.Limb, 2); - const big_int = std.math.big.int.Mutable.init(limbs, int); - return Value.fromBigInt(sema.arena, big_int.toConst()); - }, - } - } - - if (new_ty.eql(Type.f80, sema.mod) and old_ty.isAbiInt()) { - var bigint_space: Value.BigIntSpace = undefined; - var bigint = try val.toBigIntAdvanced(&bigint_space, target, sema.kit(block, src)); - switch (old_ty.intInfo(target).signedness) { - .signed => { - // This conversion cannot fail because we already checked bit size before - // calling bitCastVal. - const int = bigint.to(i80) catch unreachable; - const float = @bitCast(f80, int); - return Value.Tag.float_80.create(sema.arena, float); - }, - .unsigned => { - // This conversion cannot fail because we already checked bit size before - // calling bitCastVal. - const int = bigint.to(u80) catch unreachable; - const float = @bitCast(f80, int); - return Value.Tag.float_80.create(sema.arena, float); - }, - } - } - // For types with well-defined memory layouts, we serialize them a byte buffer, // then deserialize to the new type. const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(target)); diff --git a/src/codegen.zig b/src/codegen.zig index 757bd23b38..6acea5a509 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -470,7 +470,7 @@ pub fn generateSymbol( const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; const start = code.items.len; try code.resize(start + abi_size); - bigint.writeTwosComplement(code.items[start..][0..abi_size], info.bits, abi_size, endian); + bigint.writeTwosComplement(code.items[start..][0..abi_size], endian); return Result{ .appended = {} }; } switch (info.signedness) { diff --git a/src/value.zig b/src/value.zig index 28601c1723..7468a69fda 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1206,8 +1206,13 @@ pub const Value = extern union { }; } + /// Write a Value's contents to `buffer`. + /// + /// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past + /// the end of the value in memory. pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) void { const target = mod.getTarget(); + const endian = target.cpu.arch.endian(); if (val.isUndef()) { const size = @intCast(usize, ty.abiSize(target)); std.mem.set(u8, buffer[0..size], 0xaa); @@ -1218,31 +1223,41 @@ pub const Value = extern union { .Bool => { buffer[0] = @boolToInt(val.toBool()); }, - .Int => { - var bigint_buffer: BigIntSpace = undefined; - const bigint = val.toBigInt(&bigint_buffer, target); - const bits = ty.intInfo(target).bits; - const abi_size = @intCast(usize, ty.abiSize(target)); - bigint.writeTwosComplement(buffer, bits, abi_size, target.cpu.arch.endian()); - }, - .Enum => { + .Int, .Enum => { + const int_info = ty.intInfo(target); + const bits = int_info.bits; + const byte_count = (bits + 7) / 8; + var enum_buffer: Payload.U64 = undefined; const int_val = val.enumToInt(ty, &enum_buffer); - var bigint_buffer: BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_buffer, target); - const bits = ty.intInfo(target).bits; - const abi_size = @intCast(usize, ty.abiSize(target)); - bigint.writeTwosComplement(buffer, bits, abi_size, target.cpu.arch.endian()); + + if (byte_count <= @sizeOf(u64)) { + const int: u64 = switch (int_val.tag()) { + .zero => 0, + .one => 1, + .int_u64 => int_val.castTag(.int_u64).?.data, + .int_i64 => @bitCast(u64, int_val.castTag(.int_i64).?.data), + else => unreachable, + }; + for (buffer[0..byte_count]) |_, i| switch (endian) { + .Little => buffer[i] = @truncate(u8, (int >> @intCast(u6, (8 * i)))), + .Big => buffer[byte_count - i - 1] = @truncate(u8, (int >> @intCast(u6, (8 * i)))), + }; + } else { + var bigint_buffer: BigIntSpace = undefined; + const bigint = int_val.toBigInt(&bigint_buffer, target); + bigint.writeTwosComplement(buffer[0..byte_count], endian); + } }, .Float => switch (ty.floatBits(target)) { - 16 => return floatWriteToMemory(f16, val.toFloat(f16), target, buffer), - 32 => return floatWriteToMemory(f32, val.toFloat(f32), target, buffer), - 64 => return floatWriteToMemory(f64, val.toFloat(f64), target, buffer), - 80 => return floatWriteToMemory(f80, val.toFloat(f80), target, buffer), - 128 => return floatWriteToMemory(f128, val.toFloat(f128), target, buffer), + 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(u16, val.toFloat(f16)), endian), + 32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(u32, val.toFloat(f32)), endian), + 64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(u64, val.toFloat(f64)), endian), + 80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(u80, val.toFloat(f80)), endian), + 128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(u128, val.toFloat(f128)), endian), else => unreachable, }, - .Array, .Vector => { + .Array => { const len = ty.arrayLen(); const elem_ty = ty.childType(); const elem_size = @intCast(usize, elem_ty.abiSize(target)); @@ -1251,10 +1266,16 @@ pub const Value = extern union { var buf_off: usize = 0; while (elem_i < len) : (elem_i += 1) { const elem_val = val.elemValueBuffer(mod, elem_i, &elem_value_buf); - writeToMemory(elem_val, elem_ty, mod, buffer[buf_off..]); + elem_val.writeToMemory(elem_ty, mod, buffer[buf_off..]); buf_off += elem_size; } }, + .Vector => { + // We use byte_count instead of abi_size here, so that any padding bytes + // follow the data bytes, on both big- and little-endian systems. + const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); + }, .Struct => switch (ty.containerLayout()) { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => { @@ -1266,122 +1287,113 @@ pub const Value = extern union { } }, .Packed => { - // TODO allocate enough heap space instead of using this buffer - // on the stack. - var buf: [16]std.math.big.Limb = undefined; - const host_int = packedStructToInt(val, ty, target, &buf); - const abi_size = @intCast(usize, ty.abiSize(target)); - const bit_size = @intCast(usize, ty.bitSize(target)); - host_int.writeTwosComplement(buffer, bit_size, abi_size, target.cpu.arch.endian()); + const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, }, .ErrorSet => { // TODO revisit this when we have the concept of the error tag type const Int = u16; const int = mod.global_error_set.get(val.castTag(.@"error").?.data.name).?; - std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), target.cpu.arch.endian()); + std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian); }, else => @panic("TODO implement writeToMemory for more types"), } } - fn packedStructToInt(val: Value, ty: Type, target: Target, buf: []std.math.big.Limb) BigIntConst { - var bigint = BigIntMutable.init(buf, 0); - const fields = ty.structFields().values(); - const field_vals = val.castTag(.aggregate).?.data; - var bits: u16 = 0; - // TODO allocate enough heap space instead of using this buffer - // on the stack. - var field_buf: [16]std.math.big.Limb = undefined; - var field_space: BigIntSpace = undefined; - var field_buf2: [16]std.math.big.Limb = undefined; - for (fields) |field, i| { - const field_val = field_vals[i]; - const field_bigint_const = switch (field.ty.zigTypeTag()) { - .Void => continue, - .Float => floatToBigInt(field_val, field.ty, target, &field_buf), - .Int, .Bool => intOrBoolToBigInt(field_val, field.ty, target, &field_buf, &field_space), - .Struct => switch (field.ty.containerLayout()) { - .Auto, .Extern => unreachable, // Sema should have error'd before this. - .Packed => packedStructToInt(field_val, field.ty, target, &field_buf), - }, - .Vector => vectorToBigInt(field_val, field.ty, target, &field_buf), - .Enum => enumToBigInt(field_val, field.ty, target, &field_space), - .Union => unreachable, // TODO: packed structs support packed unions - else => unreachable, - }; - var field_bigint = BigIntMutable.init(&field_buf2, 0); - field_bigint.shiftLeft(field_bigint_const, bits); - bits += @intCast(u16, field.ty.bitSize(target)); - bigint.bitOr(bigint.toConst(), field_bigint.toConst()); - } - return bigint.toConst(); - } - - fn intOrBoolToBigInt(val: Value, ty: Type, target: Target, buf: []std.math.big.Limb, space: *BigIntSpace) BigIntConst { - const big_int_const = val.toBigInt(space, target); - if (big_int_const.positive) return big_int_const; - - var big_int = BigIntMutable.init(buf, 0); - big_int.bitNotWrap(big_int_const.negate(), .unsigned, @intCast(u32, ty.bitSize(target))); - big_int.addScalar(big_int.toConst(), 1); - return big_int.toConst(); - } - - fn vectorToBigInt(val: Value, ty: Type, target: Target, buf: []std.math.big.Limb) BigIntConst { + /// Write a Value's contents to `buffer`. + /// + /// Both the start and the end of the provided buffer must be tight, since + /// big-endian packed memory layouts start at the end of the buffer. + pub fn writeToPackedMemory(val: Value, ty: Type, mod: *Module, buffer: []u8, bit_offset: usize) void { + const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - var vec_bitint = BigIntMutable.init(buf, 0); - const vec_len = @intCast(usize, ty.arrayLen()); - const elem_ty = ty.childType(); - const elem_size = @intCast(usize, elem_ty.bitSize(target)); - - var elem_buf: [16]std.math.big.Limb = undefined; - var elem_space: BigIntSpace = undefined; - var elem_buf2: [16]std.math.big.Limb = undefined; - - var elem_i: usize = 0; - while (elem_i < vec_len) : (elem_i += 1) { - const elem_i_target = if (endian == .Big) vec_len - elem_i - 1 else elem_i; - const elem_val = val.indexVectorlike(elem_i_target); - const elem_bigint_const = switch (elem_ty.zigTypeTag()) { - .Int, .Bool => intOrBoolToBigInt(elem_val, elem_ty, target, &elem_buf, &elem_space), - .Float => floatToBigInt(elem_val, elem_ty, target, &elem_buf), - .Pointer => unreachable, // TODO - else => unreachable, // Sema should not let this happen - }; - var elem_bitint = BigIntMutable.init(&elem_buf2, 0); - elem_bitint.shiftLeft(elem_bigint_const, elem_size * elem_i); - vec_bitint.bitOr(vec_bitint.toConst(), elem_bitint.toConst()); + if (val.isUndef()) { + const bit_size = @intCast(usize, ty.bitSize(target)); + std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian); + return; } - return vec_bitint.toConst(); - } + switch (ty.zigTypeTag()) { + .Void => {}, + .Bool => { + const byte_index = switch (endian) { + .Little => bit_offset / 8, + .Big => buffer.len - bit_offset / 8 - 1, + }; + if (val.toBool()) { + buffer[byte_index] |= (@as(u8, 1) << @intCast(u3, bit_offset % 8)); + } else { + buffer[byte_index] &= ~(@as(u8, 1) << @intCast(u3, bit_offset % 8)); + } + }, + .Int, .Enum => { + const bits = ty.intInfo(target).bits; + const abi_size = @intCast(usize, ty.abiSize(target)); - fn enumToBigInt(val: Value, ty: Type, target: Target, space: *BigIntSpace) BigIntConst { - var enum_buf: Payload.U64 = undefined; - const int_val = val.enumToInt(ty, &enum_buf); - return int_val.toBigInt(space, target); - } + var enum_buffer: Payload.U64 = undefined; + const int_val = val.enumToInt(ty, &enum_buffer); - fn floatToBigInt(val: Value, ty: Type, target: Target, buf: []std.math.big.Limb) BigIntConst { - return switch (ty.floatBits(target)) { - 16 => bitcastFloatToBigInt(f16, val.toFloat(f16), buf), - 32 => bitcastFloatToBigInt(f32, val.toFloat(f32), buf), - 64 => bitcastFloatToBigInt(f64, val.toFloat(f64), buf), - 80 => bitcastFloatToBigInt(f80, val.toFloat(f80), buf), - 128 => bitcastFloatToBigInt(f128, val.toFloat(f128), buf), - else => unreachable, - }; - } + if (abi_size <= @sizeOf(u64)) { + const int: u64 = switch (int_val.tag()) { + .zero => 0, + .one => 1, + .int_u64 => int_val.castTag(.int_u64).?.data, + .int_i64 => @bitCast(u64, int_val.castTag(.int_i64).?.data), + else => unreachable, + }; + std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian); + } else { + var bigint_buffer: BigIntSpace = undefined; + const bigint = int_val.toBigInt(&bigint_buffer, target); + bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian); + } + }, + .Float => switch (ty.floatBits(target)) { + 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(u16, val.toFloat(f16)), endian), + 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(u32, val.toFloat(f32)), endian), + 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(u64, val.toFloat(f64)), endian), + 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(u80, val.toFloat(f80)), endian), + 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(u128, val.toFloat(f128)), endian), + else => unreachable, + }, + .Vector => { + const len = ty.arrayLen(); + const elem_ty = ty.childType(); + const elem_bit_size = @intCast(u16, elem_ty.bitSize(target)); - fn bitcastFloatToBigInt(comptime F: type, f: F, buf: []std.math.big.Limb) BigIntConst { - const Int = @Type(.{ .Int = .{ - .signedness = .unsigned, - .bits = @typeInfo(F).Float.bits, - } }); - const int = @bitCast(Int, f); - return BigIntMutable.init(buf, int).toConst(); + var bits: u16 = 0; + var elem_i: usize = 0; + var elem_value_buf: ElemValueBuffer = undefined; + while (elem_i < len) : (elem_i += 1) { + // On big-endian systems, LLVM reverses the element order of vectors by default + const tgt_elem_i = if (endian == .Big) len - elem_i - 1 else elem_i; + const elem_val = val.elemValueBuffer(mod, tgt_elem_i, &elem_value_buf); + elem_val.writeToPackedMemory(elem_ty, mod, buffer, bit_offset + bits); + bits += elem_bit_size; + } + }, + .Struct => switch (ty.containerLayout()) { + .Auto => unreachable, // Sema is supposed to have emitted a compile error already + .Extern => unreachable, // Handled in non-packed writeToMemory + .Packed => { + var bits: u16 = 0; + const fields = ty.structFields().values(); + const field_vals = val.castTag(.aggregate).?.data; + for (fields) |field, i| { + const field_bits = @intCast(u16, field.ty.bitSize(target)); + field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits); + bits += field_bits; + } + }, + }, + else => @panic("TODO implement writeToPackedMemory for more types"), + } } + /// Load a Value from the contents of `buffer`. + /// + /// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past + /// the end of the value in memory. pub fn readFromMemory( ty: Type, mod: *Module, @@ -1389,6 +1401,7 @@ pub const Value = extern union { arena: Allocator, ) Allocator.Error!Value { const target = mod.getTarget(); + const endian = target.cpu.arch.endian(); switch (ty.zigTypeTag()) { .Void => return Value.@"void", .Bool => { @@ -1398,27 +1411,40 @@ pub const Value = extern union { return Value.@"true"; } }, - .Int => { - if (buffer.len == 0) return Value.zero; + .Int, .Enum => { const int_info = ty.intInfo(target); - const endian = target.cpu.arch.endian(); - const Limb = std.math.big.Limb; - const limb_count = (buffer.len + @sizeOf(Limb) - 1) / @sizeOf(Limb); - const limbs_buffer = try arena.alloc(Limb, limb_count); - const abi_size = @intCast(usize, ty.abiSize(target)); - var bigint = BigIntMutable.init(limbs_buffer, 0); - bigint.readTwosComplement(buffer, int_info.bits, abi_size, endian, int_info.signedness); - return fromBigInt(arena, bigint.toConst()); + const bits = int_info.bits; + const byte_count = (bits + 7) / 8; + if (bits == 0 or buffer.len == 0) return Value.zero; + + if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 + .signed => { + const val = std.mem.readVarInt(i64, buffer[0..byte_count], endian); + return Value.Tag.int_i64.create(arena, (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits)); + }, + .unsigned => { + const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian); + return Value.Tag.int_u64.create(arena, (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits)); + }, + } else { // Slow path, we have to construct a big-int + const Limb = std.math.big.Limb; + const limb_count = (byte_count + @sizeOf(Limb) - 1) / @sizeOf(Limb); + const limbs_buffer = try arena.alloc(Limb, limb_count); + + var bigint = BigIntMutable.init(limbs_buffer, 0); + bigint.readTwosComplement(buffer[0..byte_count], bits, endian, int_info.signedness); + return fromBigInt(arena, bigint.toConst()); + } }, .Float => switch (ty.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, floatReadFromMemory(f16, target, buffer)), - 32 => return Value.Tag.float_32.create(arena, floatReadFromMemory(f32, target, buffer)), - 64 => return Value.Tag.float_64.create(arena, floatReadFromMemory(f64, target, buffer)), - 80 => return Value.Tag.float_80.create(arena, floatReadFromMemory(f80, target, buffer)), - 128 => return Value.Tag.float_128.create(arena, floatReadFromMemory(f128, target, buffer)), + 16 => return Value.Tag.float_16.create(arena, @bitCast(f16, std.mem.readInt(u16, buffer[0..2], endian))), + 32 => return Value.Tag.float_32.create(arena, @bitCast(f32, std.mem.readInt(u32, buffer[0..4], endian))), + 64 => return Value.Tag.float_64.create(arena, @bitCast(f64, std.mem.readInt(u64, buffer[0..8], endian))), + 80 => return Value.Tag.float_80.create(arena, @bitCast(f80, std.mem.readInt(u80, buffer[0..10], endian))), + 128 => return Value.Tag.float_128.create(arena, @bitCast(f128, std.mem.readInt(u128, buffer[0..16], endian))), else => unreachable, }, - .Array, .Vector => { + .Array => { const elem_ty = ty.childType(); const elem_size = elem_ty.abiSize(target); const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen())); @@ -1429,6 +1455,12 @@ pub const Value = extern union { } return Tag.aggregate.create(arena, elems); }, + .Vector => { + // We use byte_count instead of abi_size here, so that any padding bytes + // follow the data bytes, on both big- and little-endian systems. + const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); + }, .Struct => switch (ty.containerLayout()) { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => { @@ -1436,26 +1468,20 @@ pub const Value = extern union { const field_vals = try arena.alloc(Value, fields.len); for (fields) |field, i| { const off = @intCast(usize, ty.structFieldOffset(i, target)); - field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..], arena); + const sz = @intCast(usize, ty.structFieldType(i).abiSize(target)); + field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena); } return Tag.aggregate.create(arena, field_vals); }, .Packed => { - const endian = target.cpu.arch.endian(); - const Limb = std.math.big.Limb; - const abi_size = @intCast(usize, ty.abiSize(target)); - const bit_size = @intCast(usize, ty.bitSize(target)); - const limb_count = (buffer.len + @sizeOf(Limb) - 1) / @sizeOf(Limb); - const limbs_buffer = try arena.alloc(Limb, limb_count); - var bigint = BigIntMutable.init(limbs_buffer, 0); - bigint.readTwosComplement(buffer, bit_size, abi_size, endian, .unsigned); - return intToPackedStruct(ty, target, bigint.toConst(), arena); + const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, }, .ErrorSet => { // TODO revisit this when we have the concept of the error tag type const Int = u16; - const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], target.cpu.arch.endian()); + const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], endian); const payload = try arena.create(Value.Payload.Error); payload.* = .{ @@ -1468,115 +1494,90 @@ pub const Value = extern union { } } - fn intToPackedStruct( + /// Load a Value from the contents of `buffer`. + /// + /// Both the start and the end of the provided buffer must be tight, since + /// big-endian packed memory layouts start at the end of the buffer. + pub fn readFromPackedMemory( ty: Type, - target: Target, - bigint: BigIntConst, + mod: *Module, + buffer: []const u8, + bit_offset: usize, arena: Allocator, ) Allocator.Error!Value { - const limbs_buffer = try arena.alloc(std.math.big.Limb, bigint.limbs.len); - var bigint_mut = bigint.toMutable(limbs_buffer); - const fields = ty.structFields().values(); - const field_vals = try arena.alloc(Value, fields.len); - var bits: u16 = 0; - for (fields) |field, i| { - const field_bits = @intCast(u16, field.ty.bitSize(target)); - bigint_mut.shiftRight(bigint, bits); - bigint_mut.truncate(bigint_mut.toConst(), .unsigned, field_bits); - bits += field_bits; - const field_bigint = bigint_mut.toConst(); - - field_vals[i] = switch (field.ty.zigTypeTag()) { - .Float => switch (field.ty.floatBits(target)) { - 16 => try bitCastBigIntToFloat(f16, .float_16, field_bigint, arena), - 32 => try bitCastBigIntToFloat(f32, .float_32, field_bigint, arena), - 64 => try bitCastBigIntToFloat(f64, .float_64, field_bigint, arena), - 80 => try bitCastBigIntToFloat(f80, .float_80, field_bigint, arena), - 128 => try bitCastBigIntToFloat(f128, .float_128, field_bigint, arena), - else => unreachable, - }, - .Bool => makeBool(!field_bigint.eqZero()), - .Int => try Tag.int_big_positive.create( - arena, - try arena.dupe(std.math.big.Limb, field_bigint.limbs), - ), - .Struct => try intToPackedStruct(field.ty, target, field_bigint, arena), - else => unreachable, - }; - } - return Tag.aggregate.create(arena, field_vals); - } - - fn bitCastBigIntToFloat( - comptime F: type, - comptime float_tag: Tag, - bigint: BigIntConst, - arena: Allocator, - ) !Value { - const Int = @Type(.{ .Int = .{ - .signedness = .unsigned, - .bits = @typeInfo(F).Float.bits, - } }); - const int = bigint.to(Int) catch |err| switch (err) { - error.NegativeIntoUnsigned => unreachable, - error.TargetTooSmall => unreachable, - }; - const f = @bitCast(F, int); - return float_tag.create(arena, f); - } - - fn floatWriteToMemory(comptime F: type, f: F, target: Target, buffer: []u8) void { + const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - if (F == f80) { - const repr = std.math.break_f80(f); - std.mem.writeInt(u64, buffer[0..8], repr.fraction, endian); - std.mem.writeInt(u16, buffer[8..10], repr.exp, endian); - std.mem.set(u8, buffer[10..], 0); - return; - } - const Int = @Type(.{ .Int = .{ - .signedness = .unsigned, - .bits = @typeInfo(F).Float.bits, - } }); - const int = @bitCast(Int, f); - std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], int, endian); - } + switch (ty.zigTypeTag()) { + .Void => return Value.@"void", + .Bool => { + const byte = switch (endian) { + .Big => buffer[buffer.len - bit_offset / 8 - 1], + .Little => buffer[bit_offset / 8], + }; + if (((byte >> @intCast(u3, bit_offset % 8)) & 1) == 0) { + return Value.@"false"; + } else { + return Value.@"true"; + } + }, + .Int, .Enum => { + if (buffer.len == 0) return Value.zero; + const int_info = ty.intInfo(target); + const abi_size = @intCast(usize, ty.abiSize(target)); - fn floatReadFromMemory(comptime F: type, target: Target, buffer: []const u8) F { - const endian = target.cpu.arch.endian(); - if (F == f80) { - return std.math.make_f80(.{ - .fraction = readInt(u64, buffer[0..8], endian), - .exp = readInt(u16, buffer[8..10], endian), - }); - } - const Int = @Type(.{ .Int = .{ - .signedness = .unsigned, - .bits = @typeInfo(F).Float.bits, - } }); - const int = readInt(Int, buffer[0..@sizeOf(Int)], endian); - return @bitCast(F, int); - } - - fn readInt(comptime Int: type, buffer: *const [@sizeOf(Int)]u8, endian: std.builtin.Endian) Int { - var result: Int = 0; - switch (endian) { - .Big => { - for (buffer) |byte| { - result <<= 8; - result |= byte; + const bits = int_info.bits; + if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 + .signed => return Value.Tag.int_i64.create(arena, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)), + .unsigned => return Value.Tag.int_u64.create(arena, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)), + } else { // Slow path, we have to construct a big-int + const Limb = std.math.big.Limb; + const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb); + const limbs_buffer = try arena.alloc(Limb, limb_count); + + var bigint = BigIntMutable.init(limbs_buffer, 0); + bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness); + return fromBigInt(arena, bigint.toConst()); } }, - .Little => { - var i: usize = buffer.len; - while (i != 0) { - i -= 1; - result <<= 8; - result |= buffer[i]; + .Float => switch (ty.floatBits(target)) { + 16 => return Value.Tag.float_16.create(arena, @bitCast(f16, std.mem.readPackedInt(u16, buffer, bit_offset, endian))), + 32 => return Value.Tag.float_32.create(arena, @bitCast(f32, std.mem.readPackedInt(u32, buffer, bit_offset, endian))), + 64 => return Value.Tag.float_64.create(arena, @bitCast(f64, std.mem.readPackedInt(u64, buffer, bit_offset, endian))), + 80 => return Value.Tag.float_80.create(arena, @bitCast(f80, std.mem.readPackedInt(u80, buffer, bit_offset, endian))), + 128 => return Value.Tag.float_128.create(arena, @bitCast(f128, std.mem.readPackedInt(u128, buffer, bit_offset, endian))), + else => unreachable, + }, + .Vector => { + const elem_ty = ty.childType(); + const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen())); + + var bits: u16 = 0; + const elem_bit_size = @intCast(u16, elem_ty.bitSize(target)); + for (elems) |_, i| { + // On big-endian systems, LLVM reverses the element order of vectors by default + const tgt_elem_i = if (endian == .Big) elems.len - i - 1 else i; + elems[tgt_elem_i] = try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena); + bits += elem_bit_size; } + return Tag.aggregate.create(arena, elems); }, + .Struct => switch (ty.containerLayout()) { + .Auto => unreachable, // Sema is supposed to have emitted a compile error already + .Extern => unreachable, // Handled by non-packed readFromMemory + .Packed => { + var bits: u16 = 0; + const fields = ty.structFields().values(); + const field_vals = try arena.alloc(Value, fields.len); + for (fields) |field, i| { + const field_bits = @intCast(u16, field.ty.bitSize(target)); + field_vals[i] = try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena); + bits += field_bits; + } + return Tag.aggregate.create(arena, field_vals); + }, + }, + else => @panic("TODO implement readFromPackedMemory for more types"), } - return result; } /// Asserts that the value is a float or an integer. diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index c629a1a34b..b225c31858 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -63,6 +63,10 @@ fn testBitCast(comptime N: usize) !void { try expect(conv_iN(N, 0) == 0); try expect(conv_iN(N, -0) == 0); + + if (N > 24) { + try expect(conv_uN(N, 0xf23456) == 0xf23456); + } } fn conv_iN(comptime N: usize, x: std.meta.Int(.signed, N)) std.meta.Int(.unsigned, N) { @@ -73,6 +77,55 @@ fn conv_uN(comptime N: usize, x: std.meta.Int(.unsigned, N)) std.meta.Int(.signe return @bitCast(std.meta.Int(.signed, N), x); } +test "bitcast uX to bytes" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + + const bit_values = [_]usize{ 1, 48, 27, 512, 493, 293, 125, 204, 112 }; + inline for (bit_values) |bits| { + try testBitCast(bits); + comptime try testBitCast(bits); + } +} + +fn testBitCastuXToBytes(comptime N: usize) !void { + + // The location of padding bits in these layouts are technically not defined + // by LLVM, but we currently allow exotic integers to be cast (at comptime) + // to types that expose their padding bits anyway. + // + // This test at least makes sure those bits are matched by the runtime behavior + // on the platforms we target. If the above behavior is restricted after all, + // this test should be deleted. + + const T = std.meta.Int(.unsigned, N); + for ([_]T{ 0, ~@as(T, 0) }) |init_value| { + var x: T = init_value; + const bytes = std.mem.asBytes(&x); + + const byte_count = (N + 7) / 8; + switch (builtin.cpu.arch.endian()) { + .Little => { + var byte_i = 0; + while (byte_i < (byte_count - 1)) : (byte_i += 1) { + try expect(bytes[byte_i] == 0xff); + } + try expect(((bytes[byte_i] ^ 0xff) << -%@truncate(u3, N)) == 0); + }, + .Big => { + var byte_i = byte_count - 1; + while (byte_i > 0) : (byte_i -= 1) { + try expect(bytes[byte_i] == 0xff); + } + try expect(((bytes[byte_i] ^ 0xff) << -%@truncate(u3, N)) == 0); + }, + } + } +} + test "nested bitcast" { const S = struct { fn moo(x: isize) !void { @@ -283,7 +336,8 @@ test "@bitCast packed struct of floats" { comptime try S.doTheTest(); } -test "comptime @bitCast packed struct to int" { +test "comptime @bitCast packed struct to int and back" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -304,6 +358,37 @@ test "comptime @bitCast packed struct to int" { vectorf: @Vector(2, f16) = .{ 3.14, 2.71 }, }; const Int = @typeInfo(S).Struct.backing_integer.?; + + // S -> Int var s: S = .{}; try expectEqual(@bitCast(Int, s), comptime @bitCast(Int, S{})); + + // Int -> S + var i: Int = 0; + const rt_cast = @bitCast(S, i); + const ct_cast = comptime @bitCast(S, @as(Int, 0)); + inline for (@typeInfo(S).Struct.fields) |field| { + if (@typeInfo(field.field_type) == .Vector) + continue; //TODO: https://github.com/ziglang/zig/issues/13201 + + try expectEqual(@field(rt_cast, field.name), @field(ct_cast, field.name)); + } +} + +test "comptime bitcast with fields following a float" { + if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO: https://github.com/ziglang/zig/issues/13214 + + const FloatT = extern struct { f: f80, x: u128 }; + var x: FloatT = .{ .f = 0.5, .x = 123 }; + try expect(@bitCast(u256, x) == comptime @bitCast(u256, @as(FloatT, .{ .f = 0.5, .x = 123 }))); +} + +test "bitcast vector to integer and back" { + if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO: https://github.com/ziglang/zig/issues/13220 + if (builtin.zig_backend == .stage1) return error.SkipZigTest; // stage1 gets the comptime cast wrong + + const arr: [16]bool = [_]bool{ true, false } ++ [_]bool{true} ** 14; + var x = @splat(16, true); + x[1] = false; + try expect(@bitCast(u16, x) == comptime @bitCast(u16, @as(@Vector(16, bool), arr))); } -- cgit v1.2.3 From 9d0a4b60e1c715238f96a64c45619a680f94c300 Mon Sep 17 00:00:00 2001 From: Cody Tapscott Date: Wed, 19 Oct 2022 12:02:21 -0700 Subject: Value: Add `@intCast` in `writeToPackedMemory` for 32-bit targets --- src/value.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/value.zig b/src/value.zig index 7468a69fda..2ecfaefa56 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1357,9 +1357,9 @@ pub const Value = extern union { else => unreachable, }, .Vector => { - const len = ty.arrayLen(); const elem_ty = ty.childType(); const elem_bit_size = @intCast(u16, elem_ty.bitSize(target)); + const len = @intCast(usize, ty.arrayLen()); var bits: u16 = 0; var elem_i: usize = 0; -- cgit v1.2.3 From c616141241047d6d6c811d43f644eb1b7d2b26ce Mon Sep 17 00:00:00 2001 From: Evan Haas Date: Thu, 27 Oct 2022 23:53:08 -0700 Subject: translate-c: Better support for division in macros Perform C-style arithmetic conversions on operands to division operator in macros Closes #13162 --- lib/std/zig/c_translation.zig | 126 +++++++++++++++++++++++++++++++++++ src/translate_c.zig | 2 +- src/translate_c/ast.zig | 24 +++++++ test/behavior/translate_c_macros.h | 3 + test/behavior/translate_c_macros.zig | 34 ++++++++++ 5 files changed, 188 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/lib/std/zig/c_translation.zig b/lib/std/zig/c_translation.zig index 6bc664f04c..664cb09ae4 100644 --- a/lib/std/zig/c_translation.zig +++ b/lib/std/zig/c_translation.zig @@ -40,6 +40,17 @@ pub fn cast(comptime DestType: type, target: anytype) DestType { .Fn => { return castInt(DestType, @ptrToInt(&target)); }, + .Bool => { + return @boolToInt(target); + }, + else => {}, + } + }, + .Float => { + switch (@typeInfo(SourceType)) { + .Int => return @intToFloat(DestType, target), + .Float => return @floatCast(DestType, target), + .Bool => return @intToFloat(DestType, @boolToInt(target)), else => {}, } }, @@ -446,6 +457,121 @@ pub const Macros = struct { } }; +/// Integer promotion described in C11 6.3.1.1.2 +fn PromotedIntType(comptime T: type) type { + return switch (T) { + bool, u8, i8, c_short => c_int, + c_ushort => if (@sizeOf(c_ushort) == @sizeOf(c_int)) c_uint else c_int, + c_int, c_uint, c_long, c_ulong, c_longlong, c_ulonglong => T, + else => if (T == comptime_int) { + @compileError("Cannot promote `" ++ @typeName(T) ++ "`; a fixed-size number type is required"); + } else if (@typeInfo(T) == .Int) { + @compileError("Cannot promote `" ++ @typeName(T) ++ "`; a C ABI type is required"); + } else { + @compileError("Attempted to promote invalid type `" ++ @typeName(T) ++ "`"); + }, + }; +} + +/// C11 6.3.1.1.1 +fn integerRank(comptime T: type) u8 { + return switch (T) { + bool => 0, + u8, i8 => 1, + c_short, c_ushort => 2, + c_int, c_uint => 3, + c_long, c_ulong => 4, + c_longlong, c_ulonglong => 5, + else => @compileError("integer rank not supported for `" ++ @typeName(T) ++ "`"), + }; +} + +fn ToUnsigned(comptime T: type) type { + return switch (T) { + c_int => c_uint, + c_long => c_ulong, + c_longlong => c_ulonglong, + else => @compileError("Cannot convert `" ++ @typeName(T) ++ "` to unsigned"), + }; +} + +/// "Usual arithmetic conversions" from C11 standard 6.3.1.8 +fn ArithmeticConversion(comptime A: type, comptime B: type) type { + if (A == c_longdouble or B == c_longdouble) return c_longdouble; + if (A == f80 or B == f80) return f80; + if (A == f64 or B == f64) return f64; + if (A == f32 or B == f32) return f32; + + const A_Promoted = PromotedIntType(A); + const B_Promoted = PromotedIntType(B); + comptime { + std.debug.assert(integerRank(A_Promoted) >= integerRank(c_int)); + std.debug.assert(integerRank(B_Promoted) >= integerRank(c_int)); + } + + if (A_Promoted == B_Promoted) return A_Promoted; + + const a_signed = @typeInfo(A_Promoted).Int.signedness == .signed; + const b_signed = @typeInfo(B_Promoted).Int.signedness == .signed; + + if (a_signed == b_signed) { + return if (integerRank(A_Promoted) > integerRank(B_Promoted)) A_Promoted else B_Promoted; + } + + const SignedType = if (a_signed) A_Promoted else B_Promoted; + const UnsignedType = if (!a_signed) A_Promoted else B_Promoted; + + if (integerRank(UnsignedType) >= integerRank(SignedType)) return UnsignedType; + + if (std.math.maxInt(SignedType) >= std.math.maxInt(UnsignedType)) return SignedType; + + return ToUnsigned(SignedType); +} + +test "ArithmeticConversion" { + // Promotions not necessarily the same for other platforms + if (builtin.target.cpu.arch != .x86_64 or builtin.target.os.tag != .linux) return error.SkipZigTest; + + const Test = struct { + /// Order of operands should not matter for arithmetic conversions + fn checkPromotion(comptime A: type, comptime B: type, comptime Expected: type) !void { + try std.testing.expect(ArithmeticConversion(A, B) == Expected); + try std.testing.expect(ArithmeticConversion(B, A) == Expected); + } + }; + + try Test.checkPromotion(c_longdouble, c_int, c_longdouble); + try Test.checkPromotion(c_int, f64, f64); + try Test.checkPromotion(f32, bool, f32); + + try Test.checkPromotion(bool, c_short, c_int); + try Test.checkPromotion(c_int, c_int, c_int); + try Test.checkPromotion(c_short, c_int, c_int); + + try Test.checkPromotion(c_int, c_long, c_long); + + try Test.checkPromotion(c_ulonglong, c_uint, c_ulonglong); + + try Test.checkPromotion(c_uint, c_int, c_uint); + + try Test.checkPromotion(c_uint, c_long, c_long); + + try Test.checkPromotion(c_ulong, c_longlong, c_ulonglong); +} + +pub const MacroArithmetic = struct { + pub fn div(a: anytype, b: anytype) ArithmeticConversion(@TypeOf(a), @TypeOf(b)) { + const ResType = ArithmeticConversion(@TypeOf(a), @TypeOf(b)); + const a_casted = cast(ResType, a); + const b_casted = cast(ResType, b); + switch (@typeInfo(ResType)) { + .Float => return a_casted / b_casted, + .Int => return @divTrunc(a_casted, b_casted), + else => unreachable, + } + } +}; + test "Macro suffix functions" { try testing.expect(@TypeOf(Macros.F_SUFFIX(1)) == f32); diff --git a/src/translate_c.zig b/src/translate_c.zig index 7cc843e17c..693f274e8e 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -6232,7 +6232,7 @@ fn parseCMulExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node { .Slash => { const lhs = try macroBoolToInt(c, node); const rhs = try macroBoolToInt(c, try parseCCastExpr(c, m, scope)); - node = try Tag.div.create(c.arena, .{ .lhs = lhs, .rhs = rhs }); + node = try Tag.macro_arithmetic.create(c.arena, .{ .op = .div, .lhs = lhs, .rhs = rhs }); }, .Percent => { const lhs = try macroBoolToInt(c, node); diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig index 4dcdbc4250..4a64c13ce7 100644 --- a/src/translate_c/ast.zig +++ b/src/translate_c/ast.zig @@ -159,6 +159,9 @@ pub const Node = extern union { /// @shuffle(type, a, b, mask) shuffle, + /// @import("std").zig.c_translation.MacroArithmetic.(lhs, rhs) + macro_arithmetic, + asm_simple, negate, @@ -370,6 +373,7 @@ pub const Node = extern union { .field_access => Payload.FieldAccess, .string_slice => Payload.StringSlice, .shuffle => Payload.Shuffle, + .macro_arithmetic => Payload.MacroArithmetic, }; } @@ -713,6 +717,19 @@ pub const Payload = struct { mask_vector: Node, }, }; + + pub const MacroArithmetic = struct { + base: Payload, + data: struct { + op: Operator, + lhs: Node, + rhs: Node, + }, + + pub const Operator = enum { + div, + }; + }; }; /// Converts the nodes into a Zig Ast. @@ -1408,6 +1425,12 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { payload.mask_vector, }); }, + .macro_arithmetic => { + const payload = node.castTag(.macro_arithmetic).?.data; + const op = @tagName(payload.op); + const import_node = try renderStdImport(c, &.{ "zig", "c_translation", "MacroArithmetic", op }); + return renderCall(c, import_node, &.{ payload.lhs, payload.rhs }); + }, .alignof => { const payload = node.castTag(.alignof).?.data; return renderBuiltinCall(c, "@alignOf", &.{payload}); @@ -2349,6 +2372,7 @@ fn renderNodeGrouped(c: *Context, node: Node) !NodeIndex { .shuffle, .static_local_var, .mut_str, + .macro_arithmetic, => { // no grouping needed return renderNode(c, node); diff --git a/test/behavior/translate_c_macros.h b/test/behavior/translate_c_macros.h index 5d4cf3473d..fc9aaaaf52 100644 --- a/test/behavior/translate_c_macros.h +++ b/test/behavior/translate_c_macros.h @@ -53,3 +53,6 @@ typedef _Bool uintptr_t; #define LARGE_INT 18446744073709550592 #define EMBEDDED_TAB "hello " + +#define DIVIDE_CONSTANT(version) (version / 1000) +#define DIVIDE_ARGS(A, B) (A / B) diff --git a/test/behavior/translate_c_macros.zig b/test/behavior/translate_c_macros.zig index deda45df91..29cd949508 100644 --- a/test/behavior/translate_c_macros.zig +++ b/test/behavior/translate_c_macros.zig @@ -147,3 +147,37 @@ test "string and char literals that are not UTF-8 encoded. Issue #12784" { try expectEqual(@as(u8, '\xA9'), latin1.UNPRINTABLE_CHAR); try expectEqualStrings("\xA9\xA9\xA9", latin1.UNPRINTABLE_STRING); } + +test "Macro that uses division operator. Issue #13162" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + + try expectEqual(@as(c_int, 42), h.DIVIDE_CONSTANT(@as(c_int, 42_000))); + try expectEqual(@as(c_uint, 42), h.DIVIDE_CONSTANT(@as(c_uint, 42_000))); + + try expectEqual( + @as(f64, 42.0), + h.DIVIDE_ARGS( + @as(f64, 42.0), + true, + ), + ); + try expectEqual( + @as(c_int, 21), + h.DIVIDE_ARGS( + @as(i8, 42), + @as(i8, 2), + ), + ); + + try expectEqual( + @as(c_int, 21), + h.DIVIDE_ARGS( + @as(c_ushort, 42), + @as(c_ushort, 2), + ), + ); +} -- cgit v1.2.3 From 84e0c148b1d276d0dd60488c095dfb395372a216 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 28 Oct 2022 15:30:04 -0700 Subject: CLI: report error when `-fstage1` requested but not available --- src/Compilation.zig | 1 + 1 file changed, 1 insertion(+) (limited to 'src') diff --git a/src/Compilation.zig b/src/Compilation.zig index 5c3db25555..be9e82cd87 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1109,6 +1109,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { const root_name = try arena.dupeZ(u8, options.root_name); const use_stage1 = options.use_stage1 orelse false; + if (use_stage1 and !build_options.have_stage1) return error.ZigCompilerBuiltWithoutStage1; // Make a decision on whether to use LLVM or our own backend. const use_llvm = build_options.have_llvm and blk: { -- cgit v1.2.3