aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/langref.html.in30
-rw-r--r--lib/std/array_list.zig12
-rw-r--r--lib/std/child_process.zig8
-rw-r--r--lib/std/coff.zig66
-rw-r--r--lib/std/debug/leb128.zig47
-rw-r--r--lib/std/fmt.zig23
-rw-r--r--lib/std/fmt/parse_float.zig2
-rw-r--r--lib/std/fs.zig12
-rw-r--r--lib/std/fs/file.zig40
-rw-r--r--lib/std/fs/test.zig10
-rw-r--r--lib/std/hash/auto_hash.zig2
-rw-r--r--lib/std/heap.zig6
-rw-r--r--lib/std/io/reader.zig10
-rw-r--r--lib/std/io/serialization.zig6
-rw-r--r--lib/std/io/writer.zig10
-rw-r--r--lib/std/log.zig4
-rw-r--r--lib/std/math.zig63
-rw-r--r--lib/std/math/big.zig11
-rw-r--r--lib/std/math/big/int.zig87
-rw-r--r--lib/std/math/big/int_test.zig6
-rw-r--r--lib/std/math/big/rational.zig16
-rw-r--r--lib/std/math/cos.zig2
-rw-r--r--lib/std/math/pow.zig4
-rw-r--r--lib/std/math/sin.zig2
-rw-r--r--lib/std/math/sqrt.zig6
-rw-r--r--lib/std/math/tan.zig2
-rw-r--r--lib/std/mem.zig42
-rw-r--r--lib/std/mem/Allocator.zig8
-rw-r--r--lib/std/os.zig2
-rw-r--r--lib/std/os/bits/linux.zig2
-rw-r--r--lib/std/os/linux.zig8
-rw-r--r--lib/std/os/windows/ws2_32.zig2
-rw-r--r--lib/std/pdb.zig2
-rw-r--r--lib/std/rand.zig57
-rw-r--r--lib/std/special/build_runner.zig2
-rw-r--r--lib/std/special/c.zig5
-rw-r--r--lib/std/special/compiler_rt/addXf3.zig18
-rw-r--r--lib/std/special/compiler_rt/aulldiv.zig4
-rw-r--r--lib/std/special/compiler_rt/aullrem.zig4
-rw-r--r--lib/std/special/compiler_rt/compareXf2.zig7
-rw-r--r--lib/std/special/compiler_rt/divdf3.zig9
-rw-r--r--lib/std/special/compiler_rt/divsf3.zig7
-rw-r--r--lib/std/special/compiler_rt/divtf3.zig5
-rw-r--r--lib/std/special/compiler_rt/divti3.zig4
-rw-r--r--lib/std/special/compiler_rt/fixint.zig9
-rw-r--r--lib/std/special/compiler_rt/fixuint.zig6
-rw-r--r--lib/std/special/compiler_rt/floatXisf.zig9
-rw-r--r--lib/std/special/compiler_rt/floatsiXf.zig7
-rw-r--r--lib/std/special/compiler_rt/floatundisf.zig2
-rw-r--r--lib/std/special/compiler_rt/floatunditf.zig2
-rw-r--r--lib/std/special/compiler_rt/floatunsitf.zig2
-rw-r--r--lib/std/special/compiler_rt/int.zig2
-rw-r--r--lib/std/special/compiler_rt/modti3.zig4
-rw-r--r--lib/std/special/compiler_rt/mulXf3.zig10
-rw-r--r--lib/std/special/compiler_rt/mulodi4.zig2
-rw-r--r--lib/std/special/compiler_rt/muloti4.zig6
-rw-r--r--lib/std/special/compiler_rt/negXf2.zig3
-rw-r--r--lib/std/special/compiler_rt/shift.zig25
-rw-r--r--lib/std/special/compiler_rt/truncXfYf2.zig4
-rw-r--r--lib/std/special/compiler_rt/udivmod.zig70
-rw-r--r--lib/std/start.zig4
-rw-r--r--lib/std/target.zig58
-rw-r--r--lib/std/thread.zig6
-rw-r--r--lib/std/zig.zig2
-rw-r--r--src-self-hosted/Module.zig7
-rw-r--r--src-self-hosted/codegen.zig278
-rw-r--r--src-self-hosted/link.zig22
-rw-r--r--src-self-hosted/link/Coff.zig792
-rw-r--r--src-self-hosted/link/Elf.zig8
-rw-r--r--src-self-hosted/link/MachO.zig25
-rw-r--r--src-self-hosted/link/msdos-stub.binbin0 -> 128 bytes
-rw-r--r--src-self-hosted/main.zig23
-rw-r--r--src-self-hosted/stage2.zig1
-rw-r--r--src/analyze.cpp2
-rw-r--r--src/ir.cpp164
-rw-r--r--test/compile_errors.zig25
-rw-r--r--test/stage1/behavior/align.zig2
-rw-r--r--test/stage1/behavior/array.zig10
-rw-r--r--test/stage1/behavior/async_fn.zig6
-rw-r--r--test/stage1/behavior/bit_shifting.zig12
-rw-r--r--test/stage1/behavior/bugs/5487.zig4
-rw-r--r--test/stage1/behavior/error.zig4
-rw-r--r--test/stage1/behavior/misc.zig10
-rw-r--r--test/stage1/behavior/reflection.zig22
84 files changed, 1591 insertions, 734 deletions
diff --git a/doc/langref.html.in b/doc/langref.html.in
index b9b8f71c7a..10bc81e6df 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -2156,7 +2156,7 @@ test "pointer casting" {
test "pointer child type" {
// pointer types have a `child` field which tells you the type they point to.
- assert((*u32).Child == u32);
+ assert(@typeInfo(*u32).Pointer.child == u32);
}
{#code_end#}
{#header_open|Alignment#}
@@ -2184,7 +2184,7 @@ test "variable alignment" {
assert(@TypeOf(&x) == *i32);
assert(*i32 == *align(align_of_i32) i32);
if (std.Target.current.cpu.arch == .x86_64) {
- assert((*i32).alignment == 4);
+ assert(@typeInfo(*i32).Pointer.alignment == 4);
}
}
{#code_end#}
@@ -2202,7 +2202,7 @@ const assert = @import("std").debug.assert;
var foo: u8 align(4) = 100;
test "global variable alignment" {
- assert(@TypeOf(&foo).alignment == 4);
+ assert(@typeInfo(@TypeOf(&foo)).Pointer.alignment == 4);
assert(@TypeOf(&foo) == *align(4) u8);
const as_pointer_to_array: *[1]u8 = &foo;
const as_slice: []u8 = as_pointer_to_array;
@@ -4310,8 +4310,8 @@ test "fn type inference" {
const assert = @import("std").debug.assert;
test "fn reflection" {
- assert(@TypeOf(assert).ReturnType == void);
- assert(@TypeOf(assert).is_var_args == false);
+ assert(@typeInfo(@TypeOf(assert)).Fn.return_type.? == void);
+ assert(@typeInfo(@TypeOf(assert)).Fn.is_var_args == false);
}
{#code_end#}
{#header_close#}
@@ -4611,10 +4611,10 @@ test "error union" {
foo = error.SomeError;
// Use compile-time reflection to access the payload type of an error union:
- comptime assert(@TypeOf(foo).Payload == i32);
+ comptime assert(@typeInfo(@TypeOf(foo)).ErrorUnion.payload == i32);
// Use compile-time reflection to access the error set type of an error union:
- comptime assert(@TypeOf(foo).ErrorSet == anyerror);
+ comptime assert(@typeInfo(@TypeOf(foo)).ErrorUnion.error_set == anyerror);
}
{#code_end#}
{#header_open|Merging Error Sets#}
@@ -4991,7 +4991,7 @@ test "optional type" {
foo = 1234;
// Use compile-time reflection to access the child type of the optional:
- comptime assert(@TypeOf(foo).Child == i32);
+ comptime assert(@typeInfo(@TypeOf(foo)).Optional.child == i32);
}
{#code_end#}
{#header_close#}
@@ -6889,7 +6889,7 @@ fn func(y: *i32) void {
This builtin function atomically dereferences a pointer and returns the value.
</p>
<p>
- {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
+ {#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
{#header_close#}
@@ -6899,7 +6899,7 @@ fn func(y: *i32) void {
This builtin function atomically modifies memory and then returns the previous value.
</p>
<p>
- {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
+ {#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
<p>
@@ -6925,7 +6925,7 @@ fn func(y: *i32) void {
This builtin function atomically stores a value.
</p>
<p>
- {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
+ {#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
{#header_close#}
@@ -7208,10 +7208,10 @@ fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_v
more efficiently in machine instructions.
</p>
<p>
- {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
+ {#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
- <p>{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
+ <p>{#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
{#see_also|Compile Variables|cmpxchgWeak#}
{#header_close#}
{#header_open|@cmpxchgWeak#}
@@ -7237,10 +7237,10 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
However if you need a stronger guarantee, use {#link|@cmpxchgStrong#}.
</p>
<p>
- {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
+ {#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
- <p>{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
+ <p>{#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
{#see_also|Compile Variables|cmpxchgStrong#}
{#header_close#}
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index a7432a30ae..f298d14631 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -46,7 +46,11 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn initCapacity(allocator: *Allocator, num: usize) !Self {
var self = Self.init(allocator);
- try self.ensureCapacity(num);
+
+ const new_memory = try self.allocator.allocAdvanced(T, alignment, num, .at_least);
+ self.items.ptr = new_memory.ptr;
+ self.capacity = new_memory.len;
+
return self;
}
@@ -366,7 +370,11 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn initCapacity(allocator: *Allocator, num: usize) !Self {
var self = Self{};
- try self.ensureCapacity(allocator, num);
+
+ const new_memory = try self.allocator.allocAdvanced(T, alignment, num, .at_least);
+ self.items.ptr = new_memory.ptr;
+ self.capacity = new_memory.len;
+
return self;
}
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index c64fefbc63..9219b05088 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -275,9 +275,7 @@ pub const ChildProcess = struct {
}
fn handleWaitResult(self: *ChildProcess, status: u32) void {
- // TODO https://github.com/ziglang/zig/issues/3190
- var term = self.cleanupAfterWait(status);
- self.term = term;
+ self.term = self.cleanupAfterWait(status);
}
fn cleanupStreams(self: *ChildProcess) void {
@@ -487,8 +485,8 @@ pub const ChildProcess = struct {
const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore);
const nul_handle = if (any_ignore)
- windows.OpenFile(&[_]u16{ 'N', 'U', 'L' }, .{
- .dir = std.fs.cwd().fd,
+ // "\Device\Null" or "\??\NUL"
+ windows.OpenFile(&[_]u16{ '\\', 'D', 'e', 'v', 'i', 'c', 'e', '\\', 'N', 'u', 'l', 'l' }, .{
.access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE,
.share_access = windows.FILE_SHARE_READ,
.creation = windows.OPEN_EXISTING,
diff --git a/lib/std/coff.zig b/lib/std/coff.zig
index cd567b3a6e..ea3a232187 100644
--- a/lib/std/coff.zig
+++ b/lib/std/coff.zig
@@ -18,11 +18,77 @@ const IMAGE_FILE_MACHINE_I386 = 0x014c;
const IMAGE_FILE_MACHINE_IA64 = 0x0200;
const IMAGE_FILE_MACHINE_AMD64 = 0x8664;
+pub const MachineType = enum(u16) {
+ Unknown = 0x0,
+ /// Matsushita AM33
+ AM33 = 0x1d3,
+ /// x64
+ X64 = 0x8664,
+ /// ARM little endian
+ ARM = 0x1c0,
+ /// ARM64 little endian
+ ARM64 = 0xaa64,
+ /// ARM Thumb-2 little endian
+ ARMNT = 0x1c4,
+ /// EFI byte code
+ EBC = 0xebc,
+ /// Intel 386 or later processors and compatible processors
+ I386 = 0x14c,
+ /// Intel Itanium processor family
+ IA64 = 0x200,
+ /// Mitsubishi M32R little endian
+ M32R = 0x9041,
+ /// MIPS16
+ MIPS16 = 0x266,
+ /// MIPS with FPU
+ MIPSFPU = 0x366,
+ /// MIPS16 with FPU
+ MIPSFPU16 = 0x466,
+ /// Power PC little endian
+ POWERPC = 0x1f0,
+ /// Power PC with floating point support
+ POWERPCFP = 0x1f1,
+ /// MIPS little endian
+ R4000 = 0x166,
+ /// RISC-V 32-bit address space
+ RISCV32 = 0x5032,
+ /// RISC-V 64-bit address space
+ RISCV64 = 0x5064,
+ /// RISC-V 128-bit address space
+ RISCV128 = 0x5128,
+ /// Hitachi SH3
+ SH3 = 0x1a2,
+ /// Hitachi SH3 DSP
+ SH3DSP = 0x1a3,
+ /// Hitachi SH4
+ SH4 = 0x1a6,
+ /// Hitachi SH5
+ SH5 = 0x1a8,
+ /// Thumb
+ Thumb = 0x1c2,
+ /// MIPS little-endian WCE v2
+ WCEMIPSV2 = 0x169,
+};
+
// OptionalHeader.magic values
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms680339(v=vs.85).aspx
const IMAGE_NT_OPTIONAL_HDR32_MAGIC = 0x10b;
const IMAGE_NT_OPTIONAL_HDR64_MAGIC = 0x20b;
+// Image Characteristics
+pub const IMAGE_FILE_RELOCS_STRIPPED = 0x1;
+pub const IMAGE_FILE_DEBUG_STRIPPED = 0x200;
+pub const IMAGE_FILE_EXECUTABLE_IMAGE = 0x2;
+pub const IMAGE_FILE_32BIT_MACHINE = 0x100;
+pub const IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x20;
+
+// Section flags
+pub const IMAGE_SCN_CNT_INITIALIZED_DATA = 0x40;
+pub const IMAGE_SCN_MEM_READ = 0x40000000;
+pub const IMAGE_SCN_CNT_CODE = 0x20;
+pub const IMAGE_SCN_MEM_EXECUTE = 0x20000000;
+pub const IMAGE_SCN_MEM_WRITE = 0x80000000;
+
const IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16;
const IMAGE_DEBUG_TYPE_CODEVIEW = 2;
const DEBUG_DIRECTORY = 6;
diff --git a/lib/std/debug/leb128.zig b/lib/std/debug/leb128.zig
index eca777c1cf..2b96d39131 100644
--- a/lib/std/debug/leb128.zig
+++ b/lib/std/debug/leb128.zig
@@ -9,10 +9,10 @@ const testing = std.testing;
/// Read a single unsigned LEB128 value from the given reader as type T,
/// or error.Overflow if the value cannot fit.
pub fn readULEB128(comptime T: type, reader: anytype) !T {
- const U = if (T.bit_count < 8) u8 else T;
+ const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
const ShiftT = std.math.Log2Int(U);
- const max_group = (U.bit_count + 6) / 7;
+ const max_group = (@typeInfo(U).Int.bits + 6) / 7;
var value = @as(U, 0);
var group = @as(ShiftT, 0);
@@ -40,7 +40,7 @@ pub fn readULEB128(comptime T: type, reader: anytype) !T {
/// Write a single unsigned integer as unsigned LEB128 to the given writer.
pub fn writeULEB128(writer: anytype, uint_value: anytype) !void {
const T = @TypeOf(uint_value);
- const U = if (T.bit_count < 8) u8 else T;
+ const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
var value = @intCast(U, uint_value);
while (true) {
@@ -68,7 +68,7 @@ pub fn readULEB128Mem(comptime T: type, ptr: *[]const u8) !T {
/// returning the number of bytes written.
pub fn writeULEB128Mem(ptr: []u8, uint_value: anytype) !usize {
const T = @TypeOf(uint_value);
- const max_group = (T.bit_count + 6) / 7;
+ const max_group = (@typeInfo(T).Int.bits + 6) / 7;
var buf = std.io.fixedBufferStream(ptr);
try writeULEB128(buf.writer(), uint_value);
return buf.pos;
@@ -77,11 +77,11 @@ pub fn writeULEB128Mem(ptr: []u8, uint_value: anytype) !usize {
/// Read a single signed LEB128 value from the given reader as type T,
/// or error.Overflow if the value cannot fit.
pub fn readILEB128(comptime T: type, reader: anytype) !T {
- const S = if (T.bit_count < 8) i8 else T;
- const U = std.meta.Int(false, S.bit_count);
+ const S = if (@typeInfo(T).Int.bits < 8) i8 else T;
+ const U = std.meta.Int(false, @typeInfo(S).Int.bits);
const ShiftU = std.math.Log2Int(U);
- const max_group = (U.bit_count + 6) / 7;
+ const max_group = (@typeInfo(U).Int.bits + 6) / 7;
var value = @as(U, 0);
var group = @as(ShiftU, 0);
@@ -97,7 +97,7 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
if (@bitCast(S, temp) >= 0) return error.Overflow;
// and all the overflowed bits are 1
- const remaining_shift = @intCast(u3, U.bit_count - @as(u16, shift));
+ const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift));
const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift;
if (remaining_bits != -1) return error.Overflow;
}
@@ -127,8 +127,8 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
/// Write a single signed integer as signed LEB128 to the given writer.
pub fn writeILEB128(writer: anytype, int_value: anytype) !void {
const T = @TypeOf(int_value);
- const S = if (T.bit_count < 8) i8 else T;
- const U = std.meta.Int(false, S.bit_count);
+ const S = if (@typeInfo(T).Int.bits < 8) i8 else T;
+ const U = std.meta.Int(false, @typeInfo(S).Int.bits);
var value = @intCast(S, int_value);
@@ -173,7 +173,7 @@ pub fn writeILEB128Mem(ptr: []u8, int_value: anytype) !usize {
/// different value without shifting all the following code.
pub fn writeUnsignedFixed(comptime l: usize, ptr: *[l]u8, int: std.meta.Int(false, l * 7)) void {
const T = @TypeOf(int);
- const U = if (T.bit_count < 8) u8 else T;
+ const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
var value = @intCast(U, int);
comptime var i = 0;
@@ -346,28 +346,29 @@ test "deserialize unsigned LEB128" {
fn test_write_leb128(value: anytype) !void {
const T = @TypeOf(value);
+ const t_signed = @typeInfo(T).Int.is_signed;
- const writeStream = if (T.is_signed) writeILEB128 else writeULEB128;
- const writeMem = if (T.is_signed) writeILEB128Mem else writeULEB128Mem;
- const readStream = if (T.is_signed) readILEB128 else readULEB128;
- const readMem = if (T.is_signed) readILEB128Mem else readULEB128Mem;
+ const writeStream = if (t_signed) writeILEB128 else writeULEB128;
+ const writeMem = if (t_signed) writeILEB128Mem else writeULEB128Mem;
+ const readStream = if (t_signed) readILEB128 else readULEB128;
+ const readMem = if (t_signed) readILEB128Mem else readULEB128Mem;
// decode to a larger bit size too, to ensure sign extension
// is working as expected
- const larger_type_bits = ((T.bit_count + 8) / 8) * 8;
- const B = std.meta.Int(T.is_signed, larger_type_bits);
+ const larger_type_bits = ((@typeInfo(T).Int.bits + 8) / 8) * 8;
+ const B = std.meta.Int(t_signed, larger_type_bits);
const bytes_needed = bn: {
- const S = std.meta.Int(T.is_signed, @sizeOf(T) * 8);
- if (T.bit_count <= 7) break :bn @as(u16, 1);
+ const S = std.meta.Int(t_signed, @sizeOf(T) * 8);
+ if (@typeInfo(T).Int.bits <= 7) break :bn @as(u16, 1);
const unused_bits = if (value < 0) @clz(T, ~value) else @clz(T, value);
- const used_bits: u16 = (T.bit_count - unused_bits) + @boolToInt(T.is_signed);
+ const used_bits: u16 = (@typeInfo(T).Int.bits - unused_bits) + @boolToInt(t_signed);
if (used_bits <= 7) break :bn @as(u16, 1);
break :bn ((used_bits + 6) / 7);
};
- const max_groups = if (T.bit_count == 0) 1 else (T.bit_count + 6) / 7;
+ const max_groups = if (@typeInfo(T).Int.bits == 0) 1 else (@typeInfo(T).Int.bits + 6) / 7;
var buf: [max_groups]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buf);
@@ -414,7 +415,7 @@ test "serialize unsigned LEB128" {
const T = std.meta.Int(false, t);
const min = std.math.minInt(T);
const max = std.math.maxInt(T);
- var i = @as(std.meta.Int(false, T.bit_count + 1), min);
+ var i = @as(std.meta.Int(false, @typeInfo(T).Int.bits + 1), min);
while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i));
}
@@ -432,7 +433,7 @@ test "serialize signed LEB128" {
const T = std.meta.Int(true, t);
const min = std.math.minInt(T);
const max = std.math.maxInt(T);
- var i = @as(std.meta.Int(true, T.bit_count + 1), min);
+ var i = @as(std.meta.Int(true, @typeInfo(T).Int.bits + 1), min);
while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i));
}
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index 3067a55759..a652bd8c21 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -82,6 +82,8 @@ fn peekIsAlign(comptime fmt: []const u8) bool {
/// This allows user types to be formatted in a logical manner instead of dumping all fields of the type.
///
/// A user type may be a `struct`, `vector`, `union` or `enum` type.
+///
+/// To print literal curly braces, escape them by writing them twice, e.g. `{{` or `}}`.
pub fn format(
writer: anytype,
comptime fmt: []const u8,
@@ -91,7 +93,7 @@ pub fn format(
if (@typeInfo(@TypeOf(args)) != .Struct) {
@compileError("Expected tuple or struct argument, found " ++ @typeName(@TypeOf(args)));
}
- if (args.len > ArgSetType.bit_count) {
+ if (args.len > @typeInfo(ArgSetType).Int.bits) {
@compileError("32 arguments max are supported per format call");
}
@@ -325,7 +327,7 @@ pub fn formatType(
max_depth: usize,
) @TypeOf(writer).Error!void {
if (comptime std.mem.eql(u8, fmt, "*")) {
- try writer.writeAll(@typeName(@TypeOf(value).Child));
+ try writer.writeAll(@typeName(@typeInfo(@TypeOf(value)).Pointer.child));
try writer.writeAll("@");
try formatInt(@ptrToInt(value), 16, false, FormatOptions{}, writer);
return;
@@ -430,12 +432,12 @@ pub fn formatType(
if (info.child == u8) {
return formatText(value, fmt, options, writer);
}
- return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) });
+ return format(writer, "{}@{x}", .{ @typeName(@typeInfo(T).Pointer.child), @ptrToInt(value) });
},
.Enum, .Union, .Struct => {
return formatType(value.*, fmt, options, writer, max_depth);
},
- else => return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) }),
+ else => return format(writer, "{}@{x}", .{ @typeName(@typeInfo(T).Pointer.child), @ptrToInt(value) }),
},
.Many, .C => {
if (ptr_info.sentinel) |sentinel| {
@@ -446,7 +448,7 @@ pub fn formatType(
return formatText(mem.span(value), fmt, options, writer);
}
}
- return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) });
+ return format(writer, "{}@{x}", .{ @typeName(@typeInfo(T).Pointer.child), @ptrToInt(value) });
},
.Slice => {
if (fmt.len > 0 and ((fmt[0] == 'x') or (fmt[0] == 'X'))) {
@@ -536,7 +538,7 @@ pub fn formatIntValue(
radix = 10;
uppercase = false;
} else if (comptime std.mem.eql(u8, fmt, "c")) {
- if (@TypeOf(int_value).bit_count <= 8) {
+ if (@typeInfo(@TypeOf(int_value)).Int.bits <= 8) {
return formatAsciiChar(@as(u8, int_value), options, writer);
} else {
@compileError("Cannot print integer that is larger than 8 bits as a ascii");
@@ -945,7 +947,7 @@ pub fn formatInt(
} else
value;
- if (@TypeOf(int_value).is_signed) {
+ if (@typeInfo(@TypeOf(int_value)).Int.is_signed) {
return formatIntSigned(int_value, base, uppercase, options, writer);
} else {
return formatIntUnsigned(int_value, base, uppercase, options, writer);
@@ -987,9 +989,10 @@ fn formatIntUnsigned(
writer: anytype,
) !void {
assert(base >= 2);
- var buf: [math.max(@TypeOf(value).bit_count, 1)]u8 = undefined;
- const min_int_bits = comptime math.max(@TypeOf(value).bit_count, @TypeOf(base).bit_count);
- const MinInt = std.meta.Int(@TypeOf(value).is_signed, min_int_bits);
+ const value_info = @typeInfo(@TypeOf(value)).Int;
+ var buf: [math.max(value_info.bits, 1)]u8 = undefined;
+ const min_int_bits = comptime math.max(value_info.bits, @typeInfo(@TypeOf(base)).Int.bits);
+ const MinInt = std.meta.Int(value_info.is_signed, min_int_bits);
var a: MinInt = value;
var index: usize = buf.len;
diff --git a/lib/std/fmt/parse_float.zig b/lib/std/fmt/parse_float.zig
index 7e72e7ba51..de17c60db6 100644
--- a/lib/std/fmt/parse_float.zig
+++ b/lib/std/fmt/parse_float.zig
@@ -374,7 +374,7 @@ test "fmt.parseFloat" {
const epsilon = 1e-7;
inline for ([_]type{ f16, f32, f64, f128 }) |T| {
- const Z = std.meta.Int(false, T.bit_count);
+ const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
testing.expectError(error.InvalidCharacter, parseFloat(T, ""));
testing.expectError(error.InvalidCharacter, parseFloat(T, " 1"));
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 21a00eeb1d..a217fb3e9b 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -1437,26 +1437,32 @@ pub const Dir = struct {
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
pub fn readFileAlloc(self: Dir, allocator: *mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 {
- return self.readFileAllocOptions(allocator, file_path, max_bytes, @alignOf(u8), null);
+ return self.readFileAllocOptions(allocator, file_path, max_bytes, null, @alignOf(u8), null);
}
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
+ /// If `size_hint` is specified the initial buffer size is calculated using
+ /// that value, otherwise the effective file size is used instead.
/// Allows specifying alignment and a sentinel value.
pub fn readFileAllocOptions(
self: Dir,
allocator: *mem.Allocator,
file_path: []const u8,
max_bytes: usize,
+ size_hint: ?usize,
comptime alignment: u29,
comptime optional_sentinel: ?u8,
) !(if (optional_sentinel) |s| [:s]align(alignment) u8 else []align(alignment) u8) {
var file = try self.openFile(file_path, .{});
defer file.close();
- const stat_size = try file.getEndPos();
+ // If the file size doesn't fit a usize it'll be certainly greater than
+ // `max_bytes`
+ const stat_size = size_hint orelse math.cast(usize, try file.getEndPos()) catch
+ return error.FileTooBig;
- return file.readAllAllocOptions(allocator, stat_size, max_bytes, alignment, optional_sentinel);
+ return file.readToEndAllocOptions(allocator, max_bytes, stat_size, alignment, optional_sentinel);
}
pub const DeleteTreeError = error{
diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig
index 6fb2385a85..ef1b501ec3 100644
--- a/lib/std/fs/file.zig
+++ b/lib/std/fs/file.zig
@@ -363,31 +363,49 @@ pub const File = struct {
try os.futimens(self.handle, &times);
}
+ /// Reads all the bytes from the current position to the end of the file.
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
- pub fn readAllAlloc(self: File, allocator: *mem.Allocator, stat_size: u64, max_bytes: usize) ![]u8 {
- return self.readAllAllocOptions(allocator, stat_size, max_bytes, @alignOf(u8), null);
+ pub fn readToEndAlloc(self: File, allocator: *mem.Allocator, max_bytes: usize) ![]u8 {
+ return self.readToEndAllocOptions(allocator, max_bytes, null, @alignOf(u8), null);
}
+ /// Reads all the bytes from the current position to the end of the file.
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
+ /// If `size_hint` is specified the initial buffer size is calculated using
+ /// that value, otherwise an arbitrary value is used instead.
/// Allows specifying alignment and a sentinel value.
- pub fn readAllAllocOptions(
+ pub fn readToEndAllocOptions(
self: File,
allocator: *mem.Allocator,
- stat_size: u64,
max_bytes: usize,
+ size_hint: ?usize,
comptime alignment: u29,
comptime optional_sentinel: ?u8,
) !(if (optional_sentinel) |s| [:s]align(alignment) u8 else []align(alignment) u8) {
- const size = math.cast(usize, stat_size) catch math.maxInt(usize);
- if (size > max_bytes) return error.FileTooBig;
-
- const buf = try allocator.allocWithOptions(u8, size, alignment, optional_sentinel);
- errdefer allocator.free(buf);
+ // If no size hint is provided fall back to the size=0 code path
+ const size = size_hint orelse 0;
+
+ // The file size returned by stat is used as hint to set the buffer
+ // size. If the reported size is zero, as it happens on Linux for files
+ // in /proc, a small buffer is allocated instead.
+ const initial_cap = (if (size > 0) size else 1024) + @boolToInt(optional_sentinel != null);
+ var array_list = try std.ArrayListAligned(u8, alignment).initCapacity(allocator, initial_cap);
+ defer array_list.deinit();
+
+ self.reader().readAllArrayList(&array_list, max_bytes) catch |err| switch (err) {
+ error.StreamTooLong => return error.FileTooBig,
+ else => |e| return e,
+ };
- try self.reader().readNoEof(buf);
- return buf;
+ if (optional_sentinel) |sentinel| {
+ try array_list.append(sentinel);
+ const buf = array_list.toOwnedSlice();
+ return buf[0 .. buf.len - 1 :sentinel];
+ } else {
+ return array_list.toOwnedSlice();
+ }
}
pub const ReadError = os.ReadError;
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index 409a53b1a7..a59bc46245 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -188,30 +188,30 @@ test "readAllAlloc" {
var file = try tmp_dir.dir.createFile("test_file", .{ .read = true });
defer file.close();
- const buf1 = try file.readAllAlloc(testing.allocator, 0, 1024);
+ const buf1 = try file.readToEndAlloc(testing.allocator, 1024);
defer testing.allocator.free(buf1);
testing.expect(buf1.len == 0);
const write_buf: []const u8 = "this is a test.\nthis is a test.\nthis is a test.\nthis is a test.\n";
try file.writeAll(write_buf);
try file.seekTo(0);
- const file_size = try file.getEndPos();
// max_bytes > file_size
- const buf2 = try file.readAllAlloc(testing.allocator, file_size, 1024);
+ const buf2 = try file.readToEndAlloc(testing.allocator, 1024);
defer testing.allocator.free(buf2);
testing.expectEqual(write_buf.len, buf2.len);
testing.expect(std.mem.eql(u8, write_buf, buf2));
try file.seekTo(0);
// max_bytes == file_size
- const buf3 = try file.readAllAlloc(testing.allocator, file_size, write_buf.len);
+ const buf3 = try file.readToEndAlloc(testing.allocator, write_buf.len);
defer testing.allocator.free(buf3);
testing.expectEqual(write_buf.len, buf3.len);
testing.expect(std.mem.eql(u8, write_buf, buf3));
+ try file.seekTo(0);
// max_bytes < file_size
- testing.expectError(error.FileTooBig, file.readAllAlloc(testing.allocator, file_size, write_buf.len - 1));
+ testing.expectError(error.FileTooBig, file.readToEndAlloc(testing.allocator, write_buf.len - 1));
}
test "directory operations on files" {
diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig
index 2905a6af13..5877c77b5d 100644
--- a/lib/std/hash/auto_hash.zig
+++ b/lib/std/hash/auto_hash.zig
@@ -113,7 +113,7 @@ pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
.Array => hashArray(hasher, key, strat),
.Vector => |info| {
- if (info.child.bit_count % 8 == 0) {
+ if (std.meta.bitCount(info.child) % 8 == 0) {
// If there's no unused bits in the child type, we can just hash
// this as an array of bytes.
hasher.update(mem.asBytes(&key));
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index d6977f2f9c..6db1be539c 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -915,6 +915,10 @@ pub fn testAllocator(base_allocator: *mem.Allocator) !void {
testing.expect(slice.len == 10);
allocator.free(slice);
+
+ const zero_bit_ptr = try allocator.create(u0);
+ zero_bit_ptr.* = 0;
+ allocator.destroy(zero_bit_ptr);
}
pub fn testAllocatorAligned(base_allocator: *mem.Allocator, comptime alignment: u29) !void {
@@ -952,7 +956,7 @@ pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) mem.Allocator
// very near usize?
if (mem.page_size << 2 > maxInt(usize)) return;
- const USizeShift = std.meta.Int(false, std.math.log2(usize.bit_count));
+ const USizeShift = std.meta.Int(false, std.math.log2(std.meta.bitCount(usize)));
const large_align = @as(u29, mem.page_size << 2);
var align_mask: usize = undefined;
diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig
index 2ab799046a..4090f5a476 100644
--- a/lib/std/io/reader.zig
+++ b/lib/std/io/reader.zig
@@ -198,28 +198,28 @@ pub fn Reader(
/// Reads a native-endian integer
pub fn readIntNative(self: Self, comptime T: type) !T {
- const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
+ const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
return mem.readIntNative(T, &bytes);
}
/// Reads a foreign-endian integer
pub fn readIntForeign(self: Self, comptime T: type) !T {
- const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
+ const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
return mem.readIntForeign(T, &bytes);
}
pub fn readIntLittle(self: Self, comptime T: type) !T {
- const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
+ const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
return mem.readIntLittle(T, &bytes);
}
pub fn readIntBig(self: Self, comptime T: type) !T {
- const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
+ const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
return mem.readIntBig(T, &bytes);
}
pub fn readInt(self: Self, comptime T: type, endian: builtin.Endian) !T {
- const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
+ const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
return mem.readInt(T, &bytes, endian);
}
diff --git a/lib/std/io/serialization.zig b/lib/std/io/serialization.zig
index 4f8c149b47..925c929cee 100644
--- a/lib/std/io/serialization.zig
+++ b/lib/std/io/serialization.zig
@@ -60,7 +60,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
const U = std.meta.Int(false, t_bit_count);
const Log2U = math.Log2Int(U);
- const int_size = (U.bit_count + 7) / 8;
+ const int_size = (t_bit_count + 7) / 8;
if (packing == .Bit) {
const result = try self.in_stream.readBitsNoEof(U, t_bit_count);
@@ -73,7 +73,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
if (int_size == 1) {
if (t_bit_count == 8) return @bitCast(T, buffer[0]);
- const PossiblySignedByte = std.meta.Int(T.is_signed, 8);
+ const PossiblySignedByte = std.meta.Int(@typeInfo(T).Int.is_signed, 8);
return @truncate(T, @bitCast(PossiblySignedByte, buffer[0]));
}
@@ -247,7 +247,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
const U = std.meta.Int(false, t_bit_count);
const Log2U = math.Log2Int(U);
- const int_size = (U.bit_count + 7) / 8;
+ const int_size = (t_bit_count + 7) / 8;
const u_value = @bitCast(U, value);
diff --git a/lib/std/io/writer.zig b/lib/std/io/writer.zig
index 39729ef0a2..770cd5f0fa 100644
--- a/lib/std/io/writer.zig
+++ b/lib/std/io/writer.zig
@@ -53,7 +53,7 @@ pub fn Writer(
/// Write a native-endian integer.
/// TODO audit non-power-of-two int sizes
pub fn writeIntNative(self: Self, comptime T: type, value: T) Error!void {
- var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
+ var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
mem.writeIntNative(T, &bytes, value);
return self.writeAll(&bytes);
}
@@ -61,28 +61,28 @@ pub fn Writer(
/// Write a foreign-endian integer.
/// TODO audit non-power-of-two int sizes
pub fn writeIntForeign(self: Self, comptime T: type, value: T) Error!void {
- var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
+ var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
mem.writeIntForeign(T, &bytes, value);
return self.writeAll(&bytes);
}
/// TODO audit non-power-of-two int sizes
pub fn writeIntLittle(self: Self, comptime T: type, value: T) Error!void {
- var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
+ var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
mem.writeIntLittle(T, &bytes, value);
return self.writeAll(&bytes);
}
/// TODO audit non-power-of-two int sizes
pub fn writeIntBig(self: Self, comptime T: type, value: T) Error!void {
- var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
+ var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
mem.writeIntBig(T, &bytes, value);
return self.writeAll(&bytes);
}
/// TODO audit non-power-of-two int sizes
pub fn writeInt(self: Self, comptime T: type, value: T, endian: builtin.Endian) Error!void {
- var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
+ var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
mem.writeInt(T, &bytes, value, endian);
return self.writeAll(&bytes);
}
diff --git a/lib/std/log.zig b/lib/std/log.zig
index 50bdfdc068..7b677f698a 100644
--- a/lib/std/log.zig
+++ b/lib/std/log.zig
@@ -127,6 +127,10 @@ fn log(
if (@enumToInt(message_level) <= @enumToInt(level)) {
if (@hasDecl(root, "log")) {
root.log(message_level, scope, format, args);
+ } else if (std.Target.current.os.tag == .freestanding) {
+ // On freestanding one must provide a log function; we do not have
+ // any I/O configured.
+ return;
} else if (builtin.mode != .ReleaseSmall) {
const held = std.debug.getStderrMutex().acquire();
defer held.release();
diff --git a/lib/std/math.zig b/lib/std/math.zig
index de9f5e349d..f05c967b2d 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -195,7 +195,7 @@ test "" {
pub fn floatMantissaBits(comptime T: type) comptime_int {
assert(@typeInfo(T) == .Float);
- return switch (T.bit_count) {
+ return switch (@typeInfo(T).Float.bits) {
16 => 10,
32 => 23,
64 => 52,
@@ -208,7 +208,7 @@ pub fn floatMantissaBits(comptime T: type) comptime_int {
pub fn floatExponentBits(comptime T: type) comptime_int {
assert(@typeInfo(T) == .Float);
- return switch (T.bit_count) {
+ return switch (@typeInfo(T).Float.bits) {
16 => 5,
32 => 8,
64 => 11,
@@ -347,9 +347,9 @@ pub fn shlExact(comptime T: type, a: T, shift_amt: Log2Int(T)) !T {
/// A negative shift amount results in a right shift.
pub fn shl(comptime T: type, a: T, shift_amt: anytype) T {
const abs_shift_amt = absCast(shift_amt);
- const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt);
+ const casted_shift_amt = if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0 else @intCast(Log2Int(T), abs_shift_amt);
- if (@TypeOf(shift_amt) == comptime_int or @TypeOf(shift_amt).is_signed) {
+ if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).Int.is_signed) {
if (shift_amt < 0) {
return a >> casted_shift_amt;
}
@@ -373,9 +373,9 @@ test "math.shl" {
/// A negative shift amount results in a left shift.
pub fn shr(comptime T: type, a: T, shift_amt: anytype) T {
const abs_shift_amt = absCast(shift_amt);
- const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt);
+ const casted_shift_amt = if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0 else @intCast(Log2Int(T), abs_shift_amt);
- if (@TypeOf(shift_amt) == comptime_int or @TypeOf(shift_amt).is_signed) {
+ if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).Int.is_signed) {
if (shift_amt >= 0) {
return a >> casted_shift_amt;
} else {
@@ -400,11 +400,11 @@ test "math.shr" {
/// Rotates right. Only unsigned values can be rotated.
/// Negative shift values results in shift modulo the bit count.
pub fn rotr(comptime T: type, x: T, r: anytype) T {
- if (T.is_signed) {
+ if (@typeInfo(T).Int.is_signed) {
@compileError("cannot rotate signed integer");
} else {
- const ar = @mod(r, T.bit_count);
- return shr(T, x, ar) | shl(T, x, T.bit_count - ar);
+ const ar = @mod(r, @typeInfo(T).Int.bits);
+ return shr(T, x, ar) | shl(T, x, @typeInfo(T).Int.bits - ar);
}
}
@@ -419,11 +419,11 @@ test "math.rotr" {
/// Rotates left. Only unsigned values can be rotated.
/// Negative shift values results in shift modulo the bit count.
pub fn rotl(comptime T: type, x: T, r: anytype) T {
- if (T.is_signed) {
+ if (@typeInfo(T).Int.is_signed) {
@compileError("cannot rotate signed integer");
} else {
- const ar = @mod(r, T.bit_count);
- return shl(T, x, ar) | shr(T, x, T.bit_count - ar);
+ const ar = @mod(r, @typeInfo(T).Int.bits);
+ return shl(T, x, ar) | shr(T, x, @typeInfo(T).Int.bits - ar);
}
}
@@ -438,7 +438,7 @@ test "math.rotl" {
pub fn Log2Int(comptime T: type) type {
// comptime ceil log2
comptime var count = 0;
- comptime var s = T.bit_count - 1;
+ comptime var s = @typeInfo(T).Int.bits - 1;
inline while (s != 0) : (s >>= 1) {
count += 1;
}
@@ -524,7 +524,7 @@ fn testOverflow() void {
pub fn absInt(x: anytype) !@TypeOf(x) {
const T = @TypeOf(x);
comptime assert(@typeInfo(T) == .Int); // must pass an integer to absInt
- comptime assert(T.is_signed); // must pass a signed integer to absInt
+ comptime assert(@typeInfo(T).Int.is_signed); // must pass a signed integer to absInt
if (x == minInt(@TypeOf(x))) {
return error.Overflow;
@@ -557,7 +557,7 @@ fn testAbsFloat() void {
pub fn divTrunc(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
if (denominator == 0) return error.DivisionByZero;
- if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
+ if (@typeInfo(T) == .Int and @typeInfo(T).Int.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
return @divTrunc(numerator, denominator);
}
@@ -578,7 +578,7 @@ fn testDivTrunc() void {
pub fn divFloor(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
if (denominator == 0) return error.DivisionByZero;
- if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
+ if (@typeInfo(T) == .Int and @typeInfo(T).Int.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
return @divFloor(numerator, denominator);
}
@@ -652,7 +652,7 @@ fn testDivCeil() void {
pub fn divExact(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
if (denominator == 0) return error.DivisionByZero;
- if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
+ if (@typeInfo(T) == .Int and @typeInfo(T).Int.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
const result = @divTrunc(numerator, denominator);
if (result * denominator != numerator) return error.UnexpectedRemainder;
return result;
@@ -757,10 +757,10 @@ test "math.absCast" {
/// Returns the negation of the integer parameter.
/// Result is a signed integer.
-pub fn negateCast(x: anytype) !std.meta.Int(true, @TypeOf(x).bit_count) {
- if (@TypeOf(x).is_signed) return negate(x);
+pub fn negateCast(x: anytype) !std.meta.Int(true, std.meta.bitCount(@TypeOf(x))) {
+ if (@typeInfo(@TypeOf(x)).Int.is_signed) return negate(x);
- const int = std.meta.Int(true, @TypeOf(x).bit_count);
+ const int = std.meta.Int(true, std.meta.bitCount(@TypeOf(x)));
if (x > -minInt(int)) return error.Overflow;
if (x == -minInt(int)) return minInt(int);
@@ -823,7 +823,7 @@ pub fn floorPowerOfTwo(comptime T: type, value: T) T {
var x = value;
comptime var i = 1;
- inline while (T.bit_count > i) : (i *= 2) {
+ inline while (@typeInfo(T).Int.bits > i) : (i *= 2) {
x |= (x >> i);
}
@@ -847,13 +847,13 @@ fn testFloorPowerOfTwo() void {
/// Returns the next power of two (if the value is not already a power of two).
/// Only unsigned integers can be used. Zero is not an allowed input.
/// Result is a type with 1 more bit than the input type.
-pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(T.is_signed, T.bit_count + 1) {
+pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits + 1) {
comptime assert(@typeInfo(T) == .Int);
- comptime assert(!T.is_signed);
+ comptime assert(!@typeInfo(T).Int.is_signed);
assert(value != 0);
- comptime const PromotedType = std.meta.Int(T.is_signed, T.bit_count + 1);
+ comptime const PromotedType = std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits + 1);
comptime const shiftType = std.math.Log2Int(PromotedType);
- return @as(PromotedType, 1) << @intCast(shiftType, T.bit_count - @clz(T, value - 1));
+ return @as(PromotedType, 1) << @intCast(shiftType, @typeInfo(T).Int.bits - @clz(T, value - 1));
}
/// Returns the next power of two (if the value is not already a power of two).
@@ -861,9 +861,10 @@ pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(T.is_signe
/// If the value doesn't fit, returns an error.
pub fn ceilPowerOfTwo(comptime T: type, value: T) (error{Overflow}!T) {
comptime assert(@typeInfo(T) == .Int);
- comptime assert(!T.is_signed);
- comptime const PromotedType = std.meta.Int(T.is_signed, T.bit_count + 1);
- comptime const overflowBit = @as(PromotedType, 1) << T.bit_count;
+ const info = @typeInfo(T).Int;
+ comptime assert(!info.is_signed);
+ comptime const PromotedType = std.meta.Int(info.is_signed, info.bits + 1);
+ comptime const overflowBit = @as(PromotedType, 1) << info.bits;
var x = ceilPowerOfTwoPromote(T, value);
if (overflowBit & x != 0) {
return error.Overflow;
@@ -911,7 +912,7 @@ fn testCeilPowerOfTwo() !void {
pub fn log2_int(comptime T: type, x: T) Log2Int(T) {
assert(x != 0);
- return @intCast(Log2Int(T), T.bit_count - 1 - @clz(T, x));
+ return @intCast(Log2Int(T), @typeInfo(T).Int.bits - 1 - @clz(T, x));
}
pub fn log2_int_ceil(comptime T: type, x: T) Log2Int(T) {
@@ -1008,8 +1009,8 @@ test "max value type" {
testing.expect(x == 2147483647);
}
-pub fn mulWide(comptime T: type, a: T, b: T) std.meta.Int(T.is_signed, T.bit_count * 2) {
- const ResultInt = std.meta.Int(T.is_signed, T.bit_count * 2);
+pub fn mulWide(comptime T: type, a: T, b: T) std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits * 2) {
+ const ResultInt = std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits * 2);
return @as(ResultInt, a) * @as(ResultInt, b);
}
diff --git a/lib/std/math/big.zig b/lib/std/math/big.zig
index 6246a4fb8b..03257e35ea 100644
--- a/lib/std/math/big.zig
+++ b/lib/std/math/big.zig
@@ -9,14 +9,15 @@ const assert = std.debug.assert;
pub const Rational = @import("big/rational.zig").Rational;
pub const int = @import("big/int.zig");
pub const Limb = usize;
-pub const DoubleLimb = std.meta.IntType(false, 2 * Limb.bit_count);
-pub const SignedDoubleLimb = std.meta.IntType(true, DoubleLimb.bit_count);
+const limb_info = @typeInfo(Limb).Int;
+pub const DoubleLimb = std.meta.IntType(false, 2 * limb_info.bits);
+pub const SignedDoubleLimb = std.meta.IntType(true, 2 * limb_info.bits);
pub const Log2Limb = std.math.Log2Int(Limb);
comptime {
- assert(std.math.floorPowerOfTwo(usize, Limb.bit_count) == Limb.bit_count);
- assert(Limb.bit_count <= 64); // u128 set is unsupported
- assert(Limb.is_signed == false);
+ assert(std.math.floorPowerOfTwo(usize, limb_info.bits) == limb_info.bits);
+ assert(limb_info.bits <= 64); // u128 set is unsupported
+ assert(limb_info.is_signed == false);
}
test "" {
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index 28da1064c9..19f6d0809e 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -6,6 +6,7 @@
const std = @import("../../std.zig");
const math = std.math;
const Limb = std.math.big.Limb;
+const limb_bits = @typeInfo(Limb).Int.bits;
const DoubleLimb = std.math.big.DoubleLimb;
const SignedDoubleLimb = std.math.big.SignedDoubleLimb;
const Log2Limb = std.math.big.Log2Limb;
@@ -28,7 +29,7 @@ pub fn calcLimbLen(scalar: anytype) usize {
},
.ComptimeInt => {
const w_value = if (scalar < 0) -scalar else scalar;
- return @divFloor(math.log2(w_value), Limb.bit_count) + 1;
+ return @divFloor(math.log2(w_value), limb_bits) + 1;
},
else => @compileError("parameter must be a primitive integer type"),
}
@@ -54,7 +55,7 @@ pub fn calcSetStringLimbsBufferLen(base: u8, string_len: usize) usize {
}
pub fn calcSetStringLimbCount(base: u8, string_len: usize) usize {
- return (string_len + (Limb.bit_count / base - 1)) / (Limb.bit_count / base);
+ return (string_len + (limb_bits / base - 1)) / (limb_bits / base);
}
/// a + b * c + *carry, sets carry to the overflow bits
@@ -68,7 +69,7 @@ pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb {
// r2 = b * c
const bc = @as(DoubleLimb, math.mulWide(Limb, b, c));
const r2 = @truncate(Limb, bc);
- const c2 = @truncate(Limb, bc >> Limb.bit_count);
+ const c2 = @truncate(Limb, bc >> limb_bits);
// r1 = r1 + r2
const c3: Limb = @boolToInt(@addWithOverflow(Limb, r1, r2, &r1));
@@ -181,7 +182,7 @@ pub const Mutable = struct {
switch (@typeInfo(T)) {
.Int => |info| {
- const UT = if (T.is_signed) std.meta.Int(false, T.bit_count - 1) else T;
+ const UT = if (info.is_signed) std.meta.Int(false, info.bits - 1) else T;
const needed_limbs = @sizeOf(UT) / @sizeOf(Limb);
assert(needed_limbs <= self.limbs.len); // value too big
@@ -190,7 +191,7 @@ pub const Mutable = struct {
var w_value: UT = if (value < 0) @intCast(UT, -value) else @intCast(UT, value);
- if (info.bits <= Limb.bit_count) {
+ if (info.bits <= limb_bits) {
self.limbs[0] = @as(Limb, w_value);
self.len += 1;
} else {
@@ -200,15 +201,15 @@ pub const Mutable = struct {
self.len += 1;
// TODO: shift == 64 at compile-time fails. Fails on u128 limbs.
- w_value >>= Limb.bit_count / 2;
- w_value >>= Limb.bit_count / 2;
+ w_value >>= limb_bits / 2;
+ w_value >>= limb_bits / 2;
}
}
},
.ComptimeInt => {
comptime var w_value = if (value < 0) -value else value;
- const req_limbs = @divFloor(math.log2(w_value), Limb.bit_count) + 1;
+ const req_limbs = @divFloor(math.log2(w_value), limb_bits) + 1;
assert(req_limbs <= self.limbs.len); // value too big
self.len = req_limbs;
@@ -217,14 +218,14 @@ pub const Mutable = struct {
if (w_value <= maxInt(Limb)) {
self.limbs[0] = w_value;
} else {
- const mask = (1 << Limb.bit_count) - 1;
+ const mask = (1 << limb_bits) - 1;
comptime var i = 0;
inline while (w_value != 0) : (i += 1) {
self.limbs[i] = w_value & mask;
- w_value >>= Limb.bit_count / 2;
- w_value >>= Limb.bit_count / 2;
+ w_value >>= limb_bits / 2;
+ w_value >>= limb_bits / 2;
}
}
},
@@ -506,7 +507,7 @@ pub const Mutable = struct {
/// `a.limbs.len + (shift / (@sizeOf(Limb) * 8))`.
pub fn shiftLeft(r: *Mutable, a: Const, shift: usize) void {
llshl(r.limbs[0..], a.limbs[0..a.limbs.len], shift);
- r.normalize(a.limbs.len + (shift / Limb.bit_count) + 1);
+ r.normalize(a.limbs.len + (shift / limb_bits) + 1);
r.positive = a.positive;
}
@@ -516,7 +517,7 @@ pub const Mutable = struct {
/// Asserts there is enough memory to fit the result. The upper bound Limb count is
/// `a.limbs.len - (shift / (@sizeOf(Limb) * 8))`.
pub fn shiftRight(r: *Mutable, a: Const, shift: usize) void {
- if (a.limbs.len <= shift / Limb.bit_count) {
+ if (a.limbs.len <= shift / limb_bits) {
r.len = 1;
r.positive = true;
r.limbs[0] = 0;
@@ -524,7 +525,7 @@ pub const Mutable = struct {
}
const r_len = llshr(r.limbs[0..], a.limbs[0..a.limbs.len], shift);
- r.len = a.limbs.len - (shift / Limb.bit_count);
+ r.len = a.limbs.len - (shift / limb_bits);
r.positive = a.positive;
}
@@ -772,7 +773,7 @@ pub const Mutable = struct {
}
if (ab_zero_limb_count != 0) {
- rem.shiftLeft(rem.toConst(), ab_zero_limb_count * Limb.bit_count);
+ rem.shiftLeft(rem.toConst(), ab_zero_limb_count * limb_bits);
}
}
@@ -803,10 +804,10 @@ pub const Mutable = struct {
};
tmp.limbs[0] = 0;
- // Normalize so y > Limb.bit_count / 2 (i.e. leading bit is set) and even
+ // Normalize so y > limb_bits / 2 (i.e. leading bit is set) and even
var norm_shift = @clz(Limb, y.limbs[y.len - 1]);
if (norm_shift == 0 and y.toConst().isOdd()) {
- norm_shift = Limb.bit_count;
+ norm_shift = limb_bits;
}
x.shiftLeft(x.toConst(), norm_shift);
y.shiftLeft(y.toConst(), norm_shift);
@@ -820,7 +821,7 @@ pub const Mutable = struct {
mem.set(Limb, q.limbs[0..q.len], 0);
// 2.
- tmp.shiftLeft(y.toConst(), Limb.bit_count * (n - t));
+ tmp.shiftLeft(y.toConst(), limb_bits * (n - t));
while (x.toConst().order(tmp.toConst()) != .lt) {
q.limbs[n - t] += 1;
x.sub(x.toConst(), tmp.toConst());
@@ -833,7 +834,7 @@ pub const Mutable = struct {
if (x.limbs[i] == y.limbs[t]) {
q.limbs[i - t - 1] = maxInt(Limb);
} else {
- const num = (@as(DoubleLimb, x.limbs[i]) << Limb.bit_count) | @as(DoubleLimb, x.limbs[i - 1]);
+ const num = (@as(DoubleLimb, x.limbs[i]) << limb_bits) | @as(DoubleLimb, x.limbs[i - 1]);
const z = @intCast(Limb, num / @as(DoubleLimb, y.limbs[t]));
q.limbs[i - t - 1] = if (z > maxInt(Limb)) maxInt(Limb) else @as(Limb, z);
}
@@ -862,11 +863,11 @@ pub const Mutable = struct {
// 3.3
tmp.set(q.limbs[i - t - 1]);
tmp.mul(tmp.toConst(), y.toConst(), mul_limb_buf, allocator);
- tmp.shiftLeft(tmp.toConst(), Limb.bit_count * (i - t - 1));
+ tmp.shiftLeft(tmp.toConst(), limb_bits * (i - t - 1));
x.sub(x.toConst(), tmp.toConst());
if (!x.positive) {
- tmp.shiftLeft(y.toConst(), Limb.bit_count * (i - t - 1));
+ tmp.shiftLeft(y.toConst(), limb_bits * (i - t - 1));
x.add(x.toConst(), tmp.toConst());
q.limbs[i - t - 1] -= 1;
}
@@ -949,7 +950,7 @@ pub const Const = struct {
/// Returns the number of bits required to represent the absolute value of an integer.
pub fn bitCountAbs(self: Const) usize {
- return (self.limbs.len - 1) * Limb.bit_count + (Limb.bit_count - @clz(Limb, self.limbs[self.limbs.len - 1]));
+ return (self.limbs.len - 1) * limb_bits + (limb_bits - @clz(Limb, self.limbs[self.limbs.len - 1]));
}
/// Returns the number of bits required to represent the integer in twos-complement form.
@@ -1019,10 +1020,10 @@ pub const Const = struct {
/// Returns an error if self cannot be narrowed into the requested type without truncation.
pub fn to(self: Const, comptime T: type) ConvertError!T {
switch (@typeInfo(T)) {
- .Int => {
- const UT = std.meta.Int(false, T.bit_count);
+ .Int => |info| {
+ const UT = std.meta.Int(false, info.bits);
- if (self.bitCountTwosComp() > T.bit_count) {
+ if (self.bitCountTwosComp() > info.bits) {
return error.TargetTooSmall;
}
@@ -1033,12 +1034,12 @@ pub const Const = struct {
} else {
for (self.limbs[0..self.limbs.len]) |_, ri| {
const limb = self.limbs[self.limbs.len - ri - 1];
- r <<= Limb.bit_count;
+ r <<= limb_bits;
r |= limb;
}
}
- if (!T.is_signed) {
+ if (!info.is_signed) {
return if (self.positive) @intCast(T, r) else error.NegativeIntoUnsigned;
} else {
if (self.positive) {
@@ -1149,7 +1150,7 @@ pub const Const = struct {
outer: for (self.limbs[0..self.limbs.len]) |limb| {
var shift: usize = 0;
- while (shift < Limb.bit_count) : (shift += base_shift) {
+ while (shift < limb_bits) : (shift += base_shift) {
const r = @intCast(u8, (limb >> @intCast(Log2Limb, shift)) & @as(Limb, base - 1));
const ch = std.fmt.digitToChar(r, uppercase);
string[digits_len] = ch;
@@ -1295,7 +1296,7 @@ pub const Const = struct {
/// Memory is allocated as needed to ensure operations never overflow. The range
/// is bounded only by available memory.
pub const Managed = struct {
- pub const sign_bit: usize = 1 << (usize.bit_count - 1);
+ pub const sign_bit: usize = 1 << (@typeInfo(usize).Int.bits - 1);
/// Default number of limbs to allocate on creation of a `Managed`.
pub const default_capacity = 4;
@@ -1448,7 +1449,7 @@ pub const Managed = struct {
for (self.limbs[0..self.len()]) |limb| {
std.debug.warn("{x} ", .{limb});
}
- std.debug.warn("capacity={} positive={}\n", .{ self.limbs.len, self.positive });
+ std.debug.warn("capacity={} positive={}\n", .{ self.limbs.len, self.isPositive() });
}
/// Negate the sign.
@@ -1716,7 +1717,7 @@ pub const Managed = struct {
/// r = a << shift, in other words, r = a * 2^shift
pub fn shiftLeft(r: *Managed, a: Managed, shift: usize) !void {
- try r.ensureCapacity(a.len() + (shift / Limb.bit_count) + 1);
+ try r.ensureCapacity(a.len() + (shift / limb_bits) + 1);
var m = r.toMutable();
m.shiftLeft(a.toConst(), shift);
r.setMetadata(m.positive, m.len);
@@ -1724,13 +1725,13 @@ pub const Managed = struct {
/// r = a >> shift
pub fn shiftRight(r: *Managed, a: Managed, shift: usize) !void {
- if (a.len() <= shift / Limb.bit_count) {
+ if (a.len() <= shift / limb_bits) {
r.metadata = 1;
r.limbs[0] = 0;
return;
}
- try r.ensureCapacity(a.len() - (shift / Limb.bit_count));
+ try r.ensureCapacity(a.len() - (shift / limb_bits));
var m = r.toMutable();
m.shiftRight(a.toConst(), shift);
r.setMetadata(m.positive, m.len);
@@ -2021,7 +2022,7 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
rem.* = 0;
for (a) |_, ri| {
const i = a.len - ri - 1;
- const pdiv = ((@as(DoubleLimb, rem.*) << Limb.bit_count) | a[i]);
+ const pdiv = ((@as(DoubleLimb, rem.*) << limb_bits) | a[i]);
if (pdiv == 0) {
quo[i] = 0;
@@ -2042,10 +2043,10 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
@setRuntimeSafety(debug_safety);
assert(a.len >= 1);
- assert(r.len >= a.len + (shift / Limb.bit_count) + 1);
+ assert(r.len >= a.len + (shift / limb_bits) + 1);
- const limb_shift = shift / Limb.bit_count + 1;
- const interior_limb_shift = @intCast(Log2Limb, shift % Limb.bit_count);
+ const limb_shift = shift / limb_bits + 1;
+ const interior_limb_shift = @intCast(Log2Limb, shift % limb_bits);
var carry: Limb = 0;
var i: usize = 0;
@@ -2057,7 +2058,7 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
r[dst_i] = carry | @call(.{ .modifier = .always_inline }, math.shr, .{
Limb,
src_digit,
- Limb.bit_count - @intCast(Limb, interior_limb_shift),
+ limb_bits - @intCast(Limb, interior_limb_shift),
});
carry = (src_digit << interior_limb_shift);
}
@@ -2069,10 +2070,10 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
@setRuntimeSafety(debug_safety);
assert(a.len >= 1);
- assert(r.len >= a.len - (shift / Limb.bit_count));
+ assert(r.len >= a.len - (shift / limb_bits));
- const limb_shift = shift / Limb.bit_count;
- const interior_limb_shift = @intCast(Log2Limb, shift % Limb.bit_count);
+ const limb_shift = shift / limb_bits;
+ const interior_limb_shift = @intCast(Log2Limb, shift % limb_bits);
var carry: Limb = 0;
var i: usize = 0;
@@ -2085,7 +2086,7 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
carry = @call(.{ .modifier = .always_inline }, math.shl, .{
Limb,
src_digit,
- Limb.bit_count - @intCast(Limb, interior_limb_shift),
+ limb_bits - @intCast(Limb, interior_limb_shift),
});
}
}
@@ -2135,7 +2136,7 @@ fn fixedIntFromSignedDoubleLimb(A: SignedDoubleLimb, storage: []Limb) Mutable {
const A_is_positive = A >= 0;
const Au = @intCast(DoubleLimb, if (A < 0) -A else A);
storage[0] = @truncate(Limb, Au);
- storage[1] = @truncate(Limb, Au >> Limb.bit_count);
+ storage[1] = @truncate(Limb, Au >> limb_bits);
return .{
.limbs = storage[0..2],
.positive = A_is_positive,
diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig
index 5931767a82..9de93e94ac 100644
--- a/lib/std/math/big/int_test.zig
+++ b/lib/std/math/big/int_test.zig
@@ -23,13 +23,13 @@ test "big.int comptime_int set" {
var a = try Managed.initSet(testing.allocator, s);
defer a.deinit();
- const s_limb_count = 128 / Limb.bit_count;
+ const s_limb_count = 128 / @typeInfo(Limb).Int.bits;
comptime var i: usize = 0;
inline while (i < s_limb_count) : (i += 1) {
const result = @as(Limb, s & maxInt(Limb));
- s >>= Limb.bit_count / 2;
- s >>= Limb.bit_count / 2;
+ s >>= @typeInfo(Limb).Int.bits / 2;
+ s >>= @typeInfo(Limb).Int.bits / 2;
testing.expect(a.limbs[i] == result);
}
}
diff --git a/lib/std/math/big/rational.zig b/lib/std/math/big/rational.zig
index 5b3c105718..d75a7b599c 100644
--- a/lib/std/math/big/rational.zig
+++ b/lib/std/math/big/rational.zig
@@ -136,7 +136,7 @@ pub const Rational = struct {
// Translated from golang.go/src/math/big/rat.go.
debug.assert(@typeInfo(T) == .Float);
- const UnsignedInt = std.meta.Int(false, T.bit_count);
+ const UnsignedInt = std.meta.Int(false, @typeInfo(T).Float.bits);
const f_bits = @bitCast(UnsignedInt, f);
const exponent_bits = math.floatExponentBits(T);
@@ -194,8 +194,8 @@ pub const Rational = struct {
// TODO: Indicate whether the result is not exact.
debug.assert(@typeInfo(T) == .Float);
- const fsize = T.bit_count;
- const BitReprType = std.meta.Int(false, T.bit_count);
+ const fsize = @typeInfo(T).Float.bits;
+ const BitReprType = std.meta.Int(false, fsize);
const msize = math.floatMantissaBits(T);
const msize1 = msize + 1;
@@ -475,16 +475,18 @@ pub const Rational = struct {
fn extractLowBits(a: Int, comptime T: type) T {
testing.expect(@typeInfo(T) == .Int);
- if (T.bit_count <= Limb.bit_count) {
+ const t_bits = @typeInfo(T).Int.bits;
+ const limb_bits = @typeInfo(Limb).Int.bits;
+ if (t_bits <= limb_bits) {
return @truncate(T, a.limbs[0]);
} else {
var r: T = 0;
comptime var i: usize = 0;
- // Remainder is always 0 since if T.bit_count >= Limb.bit_count -> Limb | T and both
+ // Remainder is always 0 since if t_bits >= limb_bits -> Limb | T and both
// are powers of two.
- inline while (i < T.bit_count / Limb.bit_count) : (i += 1) {
- r |= math.shl(T, a.limbs[i], i * Limb.bit_count);
+ inline while (i < t_bits / limb_bits) : (i += 1) {
+ r |= math.shl(T, a.limbs[i], i * limb_bits);
}
return r;
diff --git a/lib/std/math/cos.zig b/lib/std/math/cos.zig
index 3d282c82e1..54d08d12ca 100644
--- a/lib/std/math/cos.zig
+++ b/lib/std/math/cos.zig
@@ -49,7 +49,7 @@ const pi4c = 2.69515142907905952645E-15;
const m4pi = 1.273239544735162542821171882678754627704620361328125;
fn cos_(comptime T: type, x_: T) T {
- const I = std.meta.Int(true, T.bit_count);
+ const I = std.meta.Int(true, @typeInfo(T).Float.bits);
var x = x_;
if (math.isNan(x) or math.isInf(x)) {
diff --git a/lib/std/math/pow.zig b/lib/std/math/pow.zig
index 30b52acbda..66a371fc3e 100644
--- a/lib/std/math/pow.zig
+++ b/lib/std/math/pow.zig
@@ -128,7 +128,7 @@ pub fn pow(comptime T: type, x: T, y: T) T {
if (yf != 0 and x < 0) {
return math.nan(T);
}
- if (yi >= 1 << (T.bit_count - 1)) {
+ if (yi >= 1 << (@typeInfo(T).Float.bits - 1)) {
return math.exp(y * math.ln(x));
}
@@ -150,7 +150,7 @@ pub fn pow(comptime T: type, x: T, y: T) T {
var xe = r2.exponent;
var x1 = r2.significand;
- var i = @floatToInt(std.meta.Int(true, T.bit_count), yi);
+ var i = @floatToInt(std.meta.Int(true, @typeInfo(T).Float.bits), yi);
while (i != 0) : (i >>= 1) {
const overflow_shift = math.floatExponentBits(T) + 1;
if (xe < -(1 << overflow_shift) or (1 << overflow_shift) < xe) {
diff --git a/lib/std/math/sin.zig b/lib/std/math/sin.zig
index c7db4f8623..c4a330df5d 100644
--- a/lib/std/math/sin.zig
+++ b/lib/std/math/sin.zig
@@ -50,7 +50,7 @@ const pi4c = 2.69515142907905952645E-15;
const m4pi = 1.273239544735162542821171882678754627704620361328125;
fn sin_(comptime T: type, x_: T) T {
- const I = std.meta.Int(true, T.bit_count);
+ const I = std.meta.Int(true, @typeInfo(T).Float.bits);
var x = x_;
if (x == 0 or math.isNan(x)) {
diff --git a/lib/std/math/sqrt.zig b/lib/std/math/sqrt.zig
index 34851ca647..1c0b15c3de 100644
--- a/lib/std/math/sqrt.zig
+++ b/lib/std/math/sqrt.zig
@@ -36,10 +36,10 @@ pub fn sqrt(x: anytype) Sqrt(@TypeOf(x)) {
}
}
-fn sqrt_int(comptime T: type, value: T) std.meta.Int(false, T.bit_count / 2) {
+fn sqrt_int(comptime T: type, value: T) std.meta.Int(false, @typeInfo(T).Int.bits / 2) {
var op = value;
var res: T = 0;
- var one: T = 1 << (T.bit_count - 2);
+ var one: T = 1 << (@typeInfo(T).Int.bits - 2);
// "one" starts at the highest power of four <= than the argument.
while (one > op) {
@@ -55,7 +55,7 @@ fn sqrt_int(comptime T: type, value: T) std.meta.Int(false, T.bit_count / 2) {
one >>= 2;
}
- const ResultType = std.meta.Int(false, T.bit_count / 2);
+ const ResultType = std.meta.Int(false, @typeInfo(T).Int.bits / 2);
return @intCast(ResultType, res);
}
diff --git a/lib/std/math/tan.zig b/lib/std/math/tan.zig
index 5e5a80e15d..358eb8a380 100644
--- a/lib/std/math/tan.zig
+++ b/lib/std/math/tan.zig
@@ -43,7 +43,7 @@ const pi4c = 2.69515142907905952645E-15;
const m4pi = 1.273239544735162542821171882678754627704620361328125;
fn tan_(comptime T: type, x_: T) T {
- const I = std.meta.Int(true, T.bit_count);
+ const I = std.meta.Int(true, @typeInfo(T).Float.bits);
var x = x_;
if (x == 0 or math.isNan(x)) {
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 71190069a8..b10c318635 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -949,7 +949,7 @@ pub fn readVarInt(comptime ReturnType: type, bytes: []const u8, endian: builtin.
/// This function cannot fail and cannot cause undefined behavior.
/// Assumes the endianness of memory is native. This means the function can
/// simply pointer cast memory.
-pub fn readIntNative(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8) T {
+pub fn readIntNative(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T {
return @ptrCast(*align(1) const T, bytes).*;
}
@@ -957,7 +957,7 @@ pub fn readIntNative(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]
/// The bit count of T must be evenly divisible by 8.
/// This function cannot fail and cannot cause undefined behavior.
/// Assumes the endianness of memory is foreign, so it must byte-swap.
-pub fn readIntForeign(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8) T {
+pub fn readIntForeign(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T {
return @byteSwap(T, readIntNative(T, bytes));
}
@@ -971,18 +971,18 @@ pub const readIntBig = switch (builtin.endian) {
.Big => readIntNative,
};
-/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0
+/// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0
/// and ignores extra bytes.
/// The bit count of T must be evenly divisible by 8.
/// Assumes the endianness of memory is native. This means the function can
/// simply pointer cast memory.
pub fn readIntSliceNative(comptime T: type, bytes: []const u8) T {
- const n = @divExact(T.bit_count, 8);
+ const n = @divExact(@typeInfo(T).Int.bits, 8);
assert(bytes.len >= n);
return readIntNative(T, bytes[0..n]);
}
-/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0
+/// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0
/// and ignores extra bytes.
/// The bit count of T must be evenly divisible by 8.
/// Assumes the endianness of memory is foreign, so it must byte-swap.
@@ -1003,7 +1003,7 @@ pub const readIntSliceBig = switch (builtin.endian) {
/// Reads an integer from memory with bit count specified by T.
/// The bit count of T must be evenly divisible by 8.
/// This function cannot fail and cannot cause undefined behavior.
-pub fn readInt(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8, endian: builtin.Endian) T {
+pub fn readInt(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8, endian: builtin.Endian) T {
if (endian == builtin.endian) {
return readIntNative(T, bytes);
} else {
@@ -1011,11 +1011,11 @@ pub fn readInt(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8, en
}
}
-/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0
+/// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0
/// and ignores extra bytes.
/// The bit count of T must be evenly divisible by 8.
pub fn readIntSlice(comptime T: type, bytes: []const u8, endian: builtin.Endian) T {
- const n = @divExact(T.bit_count, 8);
+ const n = @divExact(@typeInfo(T).Int.bits, 8);
assert(bytes.len >= n);
return readInt(T, bytes[0..n], endian);
}
@@ -1060,7 +1060,7 @@ test "readIntBig and readIntLittle" {
/// accepts any integer bit width.
/// This function stores in native endian, which means it is implemented as a simple
/// memory store.
-pub fn writeIntNative(comptime T: type, buf: *[(T.bit_count + 7) / 8]u8, value: T) void {
+pub fn writeIntNative(comptime T: type, buf: *[(@typeInfo(T).Int.bits + 7) / 8]u8, value: T) void {
@ptrCast(*align(1) T, buf).* = value;
}
@@ -1068,7 +1068,7 @@ pub fn writeIntNative(comptime T: type, buf: *[(T.bit_count + 7) / 8]u8, value:
/// This function always succeeds, has defined behavior for all inputs, but
/// the integer bit width must be divisible by 8.
/// This function stores in foreign endian, which means it does a @byteSwap first.
-pub fn writeIntForeign(comptime T: type, buf: *[@divExact(T.bit_count, 8)]u8, value: T) void {
+pub fn writeIntForeign(comptime T: type, buf: *[@divExact(@typeInfo(T).Int.bits, 8)]u8, value: T) void {
writeIntNative(T, buf, @byteSwap(T, value));
}
@@ -1085,7 +1085,7 @@ pub const writeIntBig = switch (builtin.endian) {
/// Writes an integer to memory, storing it in twos-complement.
/// This function always succeeds, has defined behavior for all inputs, but
/// the integer bit width must be divisible by 8.
-pub fn writeInt(comptime T: type, buffer: *[@divExact(T.bit_count, 8)]u8, value: T, endian: builtin.Endian) void {
+pub fn writeInt(comptime T: type, buffer: *[@divExact(@typeInfo(T).Int.bits, 8)]u8, value: T, endian: builtin.Endian) void {
if (endian == builtin.endian) {
return writeIntNative(T, buffer, value);
} else {
@@ -1094,19 +1094,19 @@ pub fn writeInt(comptime T: type, buffer: *[@divExact(T.bit_count, 8)]u8, value:
}
/// Writes a twos-complement little-endian integer to memory.
-/// Asserts that buf.len >= T.bit_count / 8.
+/// Asserts that buf.len >= @typeInfo(T).Int.bits / 8.
/// The bit count of T must be divisible by 8.
/// Any extra bytes in buffer after writing the integer are set to zero. To
/// avoid the branch to check for extra buffer bytes, use writeIntLittle
/// instead.
pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void {
- assert(buffer.len >= @divExact(T.bit_count, 8));
+ assert(buffer.len >= @divExact(@typeInfo(T).Int.bits, 8));
- if (T.bit_count == 0)
+ if (@typeInfo(T).Int.bits == 0)
return set(u8, buffer, 0);
// TODO I want to call writeIntLittle here but comptime eval facilities aren't good enough
- const uint = std.meta.Int(false, T.bit_count);
+ const uint = std.meta.Int(false, @typeInfo(T).Int.bits);
var bits = @truncate(uint, value);
for (buffer) |*b| {
b.* = @truncate(u8, bits);
@@ -1115,18 +1115,18 @@ pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void {
}
/// Writes a twos-complement big-endian integer to memory.
-/// Asserts that buffer.len >= T.bit_count / 8.
+/// Asserts that buffer.len >= @typeInfo(T).Int.bits / 8.
/// The bit count of T must be divisible by 8.
/// Any extra bytes in buffer before writing the integer are set to zero. To
/// avoid the branch to check for extra buffer bytes, use writeIntBig instead.
pub fn writeIntSliceBig(comptime T: type, buffer: []u8, value: T) void {
- assert(buffer.len >= @divExact(T.bit_count, 8));
+ assert(buffer.len >= @divExact(@typeInfo(T).Int.bits, 8));
- if (T.bit_count == 0)
+ if (@typeInfo(T).Int.bits == 0)
return set(u8, buffer, 0);
// TODO I want to call writeIntBig here but comptime eval facilities aren't good enough
- const uint = std.meta.Int(false, T.bit_count);
+ const uint = std.meta.Int(false, @typeInfo(T).Int.bits);
var bits = @truncate(uint, value);
var index: usize = buffer.len;
while (index != 0) {
@@ -1147,13 +1147,13 @@ pub const writeIntSliceForeign = switch (builtin.endian) {
};
/// Writes a twos-complement integer to memory, with the specified endianness.
-/// Asserts that buf.len >= T.bit_count / 8.
+/// Asserts that buf.len >= @typeInfo(T).Int.bits / 8.
/// The bit count of T must be evenly divisible by 8.
/// Any extra bytes in buffer not part of the integer are set to zero, with
/// respect to endianness. To avoid the branch to check for extra buffer bytes,
/// use writeInt instead.
pub fn writeIntSlice(comptime T: type, buffer: []u8, value: T, endian: builtin.Endian) void {
- comptime assert(T.bit_count % 8 == 0);
+ comptime assert(@typeInfo(T).Int.bits % 8 == 0);
return switch (endian) {
.Little => writeIntSliceLittle(T, buffer, value),
.Big => writeIntSliceBig(T, buffer, value),
diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig
index bb59de2a7e..326a73b915 100644
--- a/lib/std/mem/Allocator.zig
+++ b/lib/std/mem/Allocator.zig
@@ -159,7 +159,7 @@ fn moveBytes(
/// Returns a pointer to undefined memory.
/// Call `destroy` with the result to free the memory.
pub fn create(self: *Allocator, comptime T: type) Error!*T {
- if (@sizeOf(T) == 0) return &(T{});
+ if (@sizeOf(T) == 0) return @as(*T, undefined);
const slice = try self.allocAdvancedWithRetAddr(T, null, 1, .exact, @returnAddress());
return &slice[0];
}
@@ -167,11 +167,11 @@ pub fn create(self: *Allocator, comptime T: type) Error!*T {
/// `ptr` should be the return value of `create`, or otherwise
/// have the same address and alignment property.
pub fn destroy(self: *Allocator, ptr: anytype) void {
- const T = @TypeOf(ptr).Child;
+ const info = @typeInfo(@TypeOf(ptr)).Pointer;
+ const T = info.child;
if (@sizeOf(T) == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
- const ptr_align = @typeInfo(@TypeOf(ptr)).Pointer.alignment;
- _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], ptr_align, 0, 0, @returnAddress());
+ _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], info.alignment, 0, 0, @returnAddress());
}
/// Allocates an array of `n` items of type `T` and sets all the
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 08ac67aabf..181bf4930d 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -4526,7 +4526,7 @@ pub fn res_mkquery(
// Make a reasonably unpredictable id
var ts: timespec = undefined;
clock_gettime(CLOCK_REALTIME, &ts) catch {};
- const UInt = std.meta.Int(false, @TypeOf(ts.tv_nsec).bit_count);
+ const UInt = std.meta.Int(false, std.meta.bitCount(@TypeOf(ts.tv_nsec)));
const unsec = @bitCast(UInt, ts.tv_nsec);
const id = @truncate(u32, unsec + unsec / 65536);
q[0] = @truncate(u8, id / 256);
diff --git a/lib/std/os/bits/linux.zig b/lib/std/os/bits/linux.zig
index 1e12a278f3..6d85d06236 100644
--- a/lib/std/os/bits/linux.zig
+++ b/lib/std/os/bits/linux.zig
@@ -846,7 +846,7 @@ pub const SIG_ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize));
pub const SIG_DFL = @intToPtr(?Sigaction.sigaction_fn, 0);
pub const SIG_IGN = @intToPtr(?Sigaction.sigaction_fn, 1);
-pub const empty_sigset = [_]u32{0} ** sigset_t.len;
+pub const empty_sigset = [_]u32{0} ** @typeInfo(sigset_t).Array.len;
pub const signalfd_siginfo = extern struct {
signo: u32,
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index 8452d69d8f..8f697fb967 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -829,17 +829,19 @@ pub fn sigaction(sig: u6, noalias act: *const Sigaction, noalias oact: ?*Sigacti
return 0;
}
+const usize_bits = @typeInfo(usize).Int.bits;
+
pub fn sigaddset(set: *sigset_t, sig: u6) void {
const s = sig - 1;
// shift in musl: s&8*sizeof *set->__bits-1
- const shift = @intCast(u5, s & (usize.bit_count - 1));
+ const shift = @intCast(u5, s & (usize_bits - 1));
const val = @intCast(u32, 1) << shift;
- (set.*)[@intCast(usize, s) / usize.bit_count] |= val;
+ (set.*)[@intCast(usize, s) / usize_bits] |= val;
}
pub fn sigismember(set: *const sigset_t, sig: u6) bool {
const s = sig - 1;
- return ((set.*)[@intCast(usize, s) / usize.bit_count] & (@intCast(usize, 1) << (s & (usize.bit_count - 1)))) != 0;
+ return ((set.*)[@intCast(usize, s) / usize_bits] & (@intCast(usize, 1) << (s & (usize_bits - 1)))) != 0;
}
pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
diff --git a/lib/std/os/windows/ws2_32.zig b/lib/std/os/windows/ws2_32.zig
index cfc212d15a..ac21b6ffc9 100644
--- a/lib/std/os/windows/ws2_32.zig
+++ b/lib/std/os/windows/ws2_32.zig
@@ -12,7 +12,7 @@ pub const SOCKET_ERROR = -1;
pub const WSADESCRIPTION_LEN = 256;
pub const WSASYS_STATUS_LEN = 128;
-pub const WSADATA = if (usize.bit_count == u64.bit_count)
+pub const WSADATA = if (@sizeOf(usize) == @sizeOf(u64))
extern struct {
wVersion: WORD,
wHighVersion: WORD,
diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig
index e8c61f859d..91e22307d8 100644
--- a/lib/std/pdb.zig
+++ b/lib/std/pdb.zig
@@ -636,7 +636,7 @@ const MsfStream = struct {
blocks: []u32 = undefined,
block_size: u32 = undefined,
- pub const Error = @TypeOf(read).ReturnType.ErrorSet;
+ pub const Error = @typeInfo(@typeInfo(@TypeOf(read)).Fn.return_type.?).ErrorUnion.error_set;
fn init(block_size: u32, file: File, blocks: []u32) MsfStream {
const stream = MsfStream{
diff --git a/lib/std/rand.zig b/lib/std/rand.zig
index 7988efffc9..7e05592869 100644
--- a/lib/std/rand.zig
+++ b/lib/std/rand.zig
@@ -51,8 +51,9 @@ pub const Random = struct {
/// Returns a random int `i` such that `0 <= i <= maxInt(T)`.
/// `i` is evenly distributed.
pub fn int(r: *Random, comptime T: type) T {
- const UnsignedT = std.meta.Int(false, T.bit_count);
- const ByteAlignedT = std.meta.Int(false, @divTrunc(T.bit_count + 7, 8) * 8);
+ const bits = @typeInfo(T).Int.bits;
+ const UnsignedT = std.meta.Int(false, bits);
+ const ByteAlignedT = std.meta.Int(false, @divTrunc(bits + 7, 8) * 8);
var rand_bytes: [@sizeOf(ByteAlignedT)]u8 = undefined;
r.bytes(rand_bytes[0..]);
@@ -68,10 +69,11 @@ pub const Random = struct {
/// Constant-time implementation off `uintLessThan`.
/// The results of this function may be biased.
pub fn uintLessThanBiased(r: *Random, comptime T: type, less_than: T) T {
- comptime assert(T.is_signed == false);
- comptime assert(T.bit_count <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
+ comptime assert(@typeInfo(T).Int.is_signed == false);
+ const bits = @typeInfo(T).Int.bits;
+ comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
assert(0 < less_than);
- if (T.bit_count <= 32) {
+ if (bits <= 32) {
return @intCast(T, limitRangeBiased(u32, r.int(u32), less_than));
} else {
return @intCast(T, limitRangeBiased(u64, r.int(u64), less_than));
@@ -87,13 +89,15 @@ pub const Random = struct {
/// this function is guaranteed to return.
/// If you need deterministic runtime bounds, use `uintLessThanBiased`.
pub fn uintLessThan(r: *Random, comptime T: type, less_than: T) T {
- comptime assert(T.is_signed == false);
- comptime assert(T.bit_count <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
+ comptime assert(@typeInfo(T).Int.is_signed == false);
+ const bits = @typeInfo(T).Int.bits;
+ comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
assert(0 < less_than);
// Small is typically u32
- const Small = std.meta.Int(false, @divTrunc(T.bit_count + 31, 32) * 32);
+ const small_bits = @divTrunc(bits + 31, 32) * 32;
+ const Small = std.meta.Int(false, small_bits);
// Large is typically u64
- const Large = std.meta.Int(false, Small.bit_count * 2);
+ const Large = std.meta.Int(false, small_bits * 2);
// adapted from:
// http://www.pcg-random.org/posts/bounded-rands.html
@@ -105,7 +109,7 @@ pub const Random = struct {
// TODO: workaround for https://github.com/ziglang/zig/issues/1770
// should be:
// var t: Small = -%less_than;
- var t: Small = @bitCast(Small, -%@bitCast(std.meta.Int(true, Small.bit_count), @as(Small, less_than)));
+ var t: Small = @bitCast(Small, -%@bitCast(std.meta.Int(true, small_bits), @as(Small, less_than)));
if (t >= less_than) {
t -= less_than;
@@ -119,13 +123,13 @@ pub const Random = struct {
l = @truncate(Small, m);
}
}
- return @intCast(T, m >> Small.bit_count);
+ return @intCast(T, m >> small_bits);
}
/// Constant-time implementation off `uintAtMost`.
/// The results of this function may be biased.
pub fn uintAtMostBiased(r: *Random, comptime T: type, at_most: T) T {
- assert(T.is_signed == false);
+ assert(@typeInfo(T).Int.is_signed == false);
if (at_most == maxInt(T)) {
// have the full range
return r.int(T);
@@ -137,7 +141,7 @@ pub const Random = struct {
/// See `uintLessThan`, which this function uses in most cases,
/// for commentary on the runtime of this function.
pub fn uintAtMost(r: *Random, comptime T: type, at_most: T) T {
- assert(T.is_signed == false);
+ assert(@typeInfo(T).Int.is_signed == false);
if (at_most == maxInt(T)) {
// have the full range
return r.int(T);
@@ -149,9 +153,10 @@ pub const Random = struct {
/// The results of this function may be biased.
pub fn intRangeLessThanBiased(r: *Random, comptime T: type, at_least: T, less_than: T) T {
assert(at_least < less_than);
- if (T.is_signed) {
+ const info = @typeInfo(T).Int;
+ if (info.is_signed) {
// Two's complement makes this math pretty easy.
- const UnsignedT = std.meta.Int(false, T.bit_count);
+ const UnsignedT = std.meta.Int(false, info.bits);
const lo = @bitCast(UnsignedT, at_least);
const hi = @bitCast(UnsignedT, less_than);
const result = lo +% r.uintLessThanBiased(UnsignedT, hi -% lo);
@@ -167,9 +172,10 @@ pub const Random = struct {
/// for commentary on the runtime of this function.
pub fn intRangeLessThan(r: *Random, comptime T: type, at_least: T, less_than: T) T {
assert(at_least < less_than);
- if (T.is_signed) {
+ const info = @typeInfo(T).Int;
+ if (info.is_signed) {
// Two's complement makes this math pretty easy.
- const UnsignedT = std.meta.Int(false, T.bit_count);
+ const UnsignedT = std.meta.Int(false, info.bits);
const lo = @bitCast(UnsignedT, at_least);
const hi = @bitCast(UnsignedT, less_than);
const result = lo +% r.uintLessThan(UnsignedT, hi -% lo);
@@ -184,9 +190,10 @@ pub const Random = struct {
/// The results of this function may be biased.
pub fn intRangeAtMostBiased(r: *Random, comptime T: type, at_least: T, at_most: T) T {
assert(at_least <= at_most);
- if (T.is_signed) {
+ const info = @typeInfo(T).Int;
+ if (info.is_signed) {
// Two's complement makes this math pretty easy.
- const UnsignedT = std.meta.Int(false, T.bit_count);
+ const UnsignedT = std.meta.Int(false, info.bits);
const lo = @bitCast(UnsignedT, at_least);
const hi = @bitCast(UnsignedT, at_most);
const result = lo +% r.uintAtMostBiased(UnsignedT, hi -% lo);
@@ -202,9 +209,10 @@ pub const Random = struct {
/// for commentary on the runtime of this function.
pub fn intRangeAtMost(r: *Random, comptime T: type, at_least: T, at_most: T) T {
assert(at_least <= at_most);
- if (T.is_signed) {
+ const info = @typeInfo(T).Int;
+ if (info.is_signed) {
// Two's complement makes this math pretty easy.
- const UnsignedT = std.meta.Int(false, T.bit_count);
+ const UnsignedT = std.meta.Int(false, info.bits);
const lo = @bitCast(UnsignedT, at_least);
const hi = @bitCast(UnsignedT, at_most);
const result = lo +% r.uintAtMost(UnsignedT, hi -% lo);
@@ -280,14 +288,15 @@ pub const Random = struct {
/// into an integer 0 <= result < less_than.
/// This function introduces a minor bias.
pub fn limitRangeBiased(comptime T: type, random_int: T, less_than: T) T {
- comptime assert(T.is_signed == false);
- const T2 = std.meta.Int(false, T.bit_count * 2);
+ comptime assert(@typeInfo(T).Int.is_signed == false);
+ const bits = @typeInfo(T).Int.bits;
+ const T2 = std.meta.Int(false, bits * 2);
// adapted from:
// http://www.pcg-random.org/posts/bounded-rands.html
// "Integer Multiplication (Biased)"
var m: T2 = @as(T2, random_int) * @as(T2, less_than);
- return @intCast(T, m >> T.bit_count);
+ return @intCast(T, m >> bits);
}
const SequentialPrng = struct {
diff --git a/lib/std/special/build_runner.zig b/lib/std/special/build_runner.zig
index 46d3b0b615..3ab74a11a2 100644
--- a/lib/std/special/build_runner.zig
+++ b/lib/std/special/build_runner.zig
@@ -133,7 +133,7 @@ pub fn main() !void {
}
fn runBuild(builder: *Builder) anyerror!void {
- switch (@typeInfo(@TypeOf(root.build).ReturnType)) {
+ switch (@typeInfo(@typeInfo(@TypeOf(root.build)).Fn.return_type.?)) {
.Void => root.build(builder),
.ErrorUnion => try root.build(builder),
else => @compileError("expected return type of build to be 'void' or '!void'"),
diff --git a/lib/std/special/c.zig b/lib/std/special/c.zig
index d5903ece02..ce8d1c29cc 100644
--- a/lib/std/special/c.zig
+++ b/lib/std/special/c.zig
@@ -516,11 +516,12 @@ export fn roundf(a: f32) f32 {
fn generic_fmod(comptime T: type, x: T, y: T) T {
@setRuntimeSafety(false);
- const uint = std.meta.Int(false, T.bit_count);
+ const bits = @typeInfo(T).Float.bits;
+ const uint = std.meta.Int(false, bits);
const log2uint = math.Log2Int(uint);
const digits = if (T == f32) 23 else 52;
const exp_bits = if (T == f32) 9 else 12;
- const bits_minus_1 = T.bit_count - 1;
+ const bits_minus_1 = bits - 1;
const mask = if (T == f32) 0xff else 0x7ff;
var ux = @bitCast(uint, x);
var uy = @bitCast(uint, y);
diff --git a/lib/std/special/compiler_rt/addXf3.zig b/lib/std/special/compiler_rt/addXf3.zig
index 6dd0faaebb..da1238010e 100644
--- a/lib/std/special/compiler_rt/addXf3.zig
+++ b/lib/std/special/compiler_rt/addXf3.zig
@@ -59,23 +59,25 @@ pub fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 {
}
// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
-fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 {
- const Z = std.meta.Int(false, T.bit_count);
- const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
+fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 {
+ const bits = @typeInfo(T).Float.bits;
+ const Z = std.meta.Int(false, bits);
+ const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1));
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;
- const shift = @clz(std.meta.Int(false, T.bit_count), significand.*) - @clz(Z, implicitBit);
+ const shift = @clz(std.meta.Int(false, bits), significand.*) - @clz(Z, implicitBit);
significand.* <<= @intCast(S, shift);
return 1 - shift;
}
// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
fn addXf3(comptime T: type, a: T, b: T) T {
- const Z = std.meta.Int(false, T.bit_count);
- const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
+ const bits = @typeInfo(T).Float.bits;
+ const Z = std.meta.Int(false, bits);
+ const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1));
- const typeWidth = T.bit_count;
+ const typeWidth = bits;
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
@@ -187,7 +189,7 @@ fn addXf3(comptime T: type, a: T, b: T) T {
// If partial cancellation occured, we need to left-shift the result
// and adjust the exponent:
if (aSignificand < implicitBit << 3) {
- const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(std.meta.Int(false, T.bit_count), implicitBit << 3));
+ const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(std.meta.Int(false, bits), implicitBit << 3));
aSignificand <<= @intCast(S, shift);
aExponent -= shift;
}
diff --git a/lib/std/special/compiler_rt/aulldiv.zig b/lib/std/special/compiler_rt/aulldiv.zig
index cf9b26c5a6..321ff288bb 100644
--- a/lib/std/special/compiler_rt/aulldiv.zig
+++ b/lib/std/special/compiler_rt/aulldiv.zig
@@ -7,8 +7,8 @@ const builtin = @import("builtin");
pub fn _alldiv(a: i64, b: i64) callconv(.Stdcall) i64 {
@setRuntimeSafety(builtin.is_test);
- const s_a = a >> (i64.bit_count - 1);
- const s_b = b >> (i64.bit_count - 1);
+ const s_a = a >> (64 - 1);
+ const s_b = b >> (64 - 1);
const an = (a ^ s_a) -% s_a;
const bn = (b ^ s_b) -% s_b;
diff --git a/lib/std/special/compiler_rt/aullrem.zig b/lib/std/special/compiler_rt/aullrem.zig
index 7c981cc088..a14eb99be3 100644
--- a/lib/std/special/compiler_rt/aullrem.zig
+++ b/lib/std/special/compiler_rt/aullrem.zig
@@ -7,8 +7,8 @@ const builtin = @import("builtin");
pub fn _allrem(a: i64, b: i64) callconv(.Stdcall) i64 {
@setRuntimeSafety(builtin.is_test);
- const s_a = a >> (i64.bit_count - 1);
- const s_b = b >> (i64.bit_count - 1);
+ const s_a = a >> (64 - 1);
+ const s_b = b >> (64 - 1);
const an = (a ^ s_a) -% s_a;
const bn = (b ^ s_b) -% s_b;
diff --git a/lib/std/special/compiler_rt/compareXf2.zig b/lib/std/special/compiler_rt/compareXf2.zig
index f50dc67474..05af1e533c 100644
--- a/lib/std/special/compiler_rt/compareXf2.zig
+++ b/lib/std/special/compiler_rt/compareXf2.zig
@@ -27,8 +27,9 @@ const GE = extern enum(i32) {
pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
@setRuntimeSafety(builtin.is_test);
- const srep_t = std.meta.Int(true, T.bit_count);
- const rep_t = std.meta.Int(false, T.bit_count);
+ const bits = @typeInfo(T).Float.bits;
+ const srep_t = std.meta.Int(true, bits);
+ const rep_t = std.meta.Int(false, bits);
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
@@ -73,7 +74,7 @@ pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
pub fn unordcmp(comptime T: type, a: T, b: T) i32 {
@setRuntimeSafety(builtin.is_test);
- const rep_t = std.meta.Int(false, T.bit_count);
+ const rep_t = std.meta.Int(false, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
diff --git a/lib/std/special/compiler_rt/divdf3.zig b/lib/std/special/compiler_rt/divdf3.zig
index ad72f96057..11ede3af66 100644
--- a/lib/std/special/compiler_rt/divdf3.zig
+++ b/lib/std/special/compiler_rt/divdf3.zig
@@ -12,10 +12,9 @@ const builtin = @import("builtin");
pub fn __divdf3(a: f64, b: f64) callconv(.C) f64 {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, f64.bit_count);
- const SignedZ = std.meta.Int(true, f64.bit_count);
+ const Z = std.meta.Int(false, 64);
+ const SignedZ = std.meta.Int(true, 64);
- const typeWidth = f64.bit_count;
const significandBits = std.math.floatMantissaBits(f64);
const exponentBits = std.math.floatExponentBits(f64);
@@ -317,9 +316,9 @@ pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
}
}
-pub fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 {
+pub fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, T.bit_count);
+ const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;
diff --git a/lib/std/special/compiler_rt/divsf3.zig b/lib/std/special/compiler_rt/divsf3.zig
index 80af806eb1..13f4d8e68d 100644
--- a/lib/std/special/compiler_rt/divsf3.zig
+++ b/lib/std/special/compiler_rt/divsf3.zig
@@ -12,9 +12,8 @@ const builtin = @import("builtin");
pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, f32.bit_count);
+ const Z = std.meta.Int(false, 32);
- const typeWidth = f32.bit_count;
const significandBits = std.math.floatMantissaBits(f32);
const exponentBits = std.math.floatExponentBits(f32);
@@ -190,9 +189,9 @@ pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
}
}
-fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 {
+fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, T.bit_count);
+ const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;
diff --git a/lib/std/special/compiler_rt/divtf3.zig b/lib/std/special/compiler_rt/divtf3.zig
index f6f7c1bf7d..0582400ce3 100644
--- a/lib/std/special/compiler_rt/divtf3.zig
+++ b/lib/std/special/compiler_rt/divtf3.zig
@@ -11,10 +11,9 @@ const wideMultiply = @import("divdf3.zig").wideMultiply;
pub fn __divtf3(a: f128, b: f128) callconv(.C) f128 {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, f128.bit_count);
- const SignedZ = std.meta.Int(true, f128.bit_count);
+ const Z = std.meta.Int(false, 128);
+ const SignedZ = std.meta.Int(true, 128);
- const typeWidth = f128.bit_count;
const significandBits = std.math.floatMantissaBits(f128);
const exponentBits = std.math.floatExponentBits(f128);
diff --git a/lib/std/special/compiler_rt/divti3.zig b/lib/std/special/compiler_rt/divti3.zig
index 4b7d459991..a065111510 100644
--- a/lib/std/special/compiler_rt/divti3.zig
+++ b/lib/std/special/compiler_rt/divti3.zig
@@ -9,8 +9,8 @@ const builtin = @import("builtin");
pub fn __divti3(a: i128, b: i128) callconv(.C) i128 {
@setRuntimeSafety(builtin.is_test);
- const s_a = a >> (i128.bit_count - 1);
- const s_b = b >> (i128.bit_count - 1);
+ const s_a = a >> (128 - 1);
+ const s_b = b >> (128 - 1);
const an = (a ^ s_a) -% s_a;
const bn = (b ^ s_b) -% s_b;
diff --git a/lib/std/special/compiler_rt/fixint.zig b/lib/std/special/compiler_rt/fixint.zig
index 0bf0c8be1e..1512641be4 100644
--- a/lib/std/special/compiler_rt/fixint.zig
+++ b/lib/std/special/compiler_rt/fixint.zig
@@ -28,7 +28,7 @@ pub fn fixint(comptime fp_t: type, comptime fixint_t: type, a: fp_t) fixint_t {
else => unreachable,
};
- const typeWidth = rep_t.bit_count;
+ const typeWidth = @typeInfo(rep_t).Int.bits;
const exponentBits = (typeWidth - significandBits - 1);
const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
const maxExponent = ((1 << exponentBits) - 1);
@@ -50,12 +50,13 @@ pub fn fixint(comptime fp_t: type, comptime fixint_t: type, a: fp_t) fixint_t {
if (exponent < 0) return 0;
// The unsigned result needs to be large enough to handle an fixint_t or rep_t
- const fixuint_t = std.meta.Int(false, fixint_t.bit_count);
- const UintResultType = if (fixint_t.bit_count > rep_t.bit_count) fixuint_t else rep_t;
+ const fixint_bits = @typeInfo(fixint_t).Int.bits;
+ const fixuint_t = std.meta.Int(false, fixint_bits);
+ const UintResultType = if (fixint_bits > typeWidth) fixuint_t else rep_t;
var uint_result: UintResultType = undefined;
// If the value is too large for the integer type, saturate.
- if (@intCast(usize, exponent) >= fixint_t.bit_count) {
+ if (@intCast(usize, exponent) >= fixint_bits) {
return if (negative) @as(fixint_t, minInt(fixint_t)) else @as(fixint_t, maxInt(fixint_t));
}
diff --git a/lib/std/special/compiler_rt/fixuint.zig b/lib/std/special/compiler_rt/fixuint.zig
index 01eb03baa5..3f2d661244 100644
--- a/lib/std/special/compiler_rt/fixuint.zig
+++ b/lib/std/special/compiler_rt/fixuint.zig
@@ -15,14 +15,14 @@ pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t
f128 => u128,
else => unreachable,
};
- const srep_t = @import("std").meta.Int(true, rep_t.bit_count);
+ const typeWidth = @typeInfo(rep_t).Int.bits;
+ const srep_t = @import("std").meta.Int(true, typeWidth);
const significandBits = switch (fp_t) {
f32 => 23,
f64 => 52,
f128 => 112,
else => unreachable,
};
- const typeWidth = rep_t.bit_count;
const exponentBits = (typeWidth - significandBits - 1);
const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
const maxExponent = ((1 << exponentBits) - 1);
@@ -44,7 +44,7 @@ pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t
if (sign == -1 or exponent < 0) return 0;
// If the value is too large for the integer type, saturate.
- if (@intCast(c_uint, exponent) >= fixuint_t.bit_count) return ~@as(fixuint_t, 0);
+ if (@intCast(c_uint, exponent) >= @typeInfo(fixuint_t).Int.bits) return ~@as(fixuint_t, 0);
// If 0 <= exponent < significandBits, right shift to get the result.
// Otherwise, shift left.
diff --git a/lib/std/special/compiler_rt/floatXisf.zig b/lib/std/special/compiler_rt/floatXisf.zig
index 650b948396..134a1eba61 100644
--- a/lib/std/special/compiler_rt/floatXisf.zig
+++ b/lib/std/special/compiler_rt/floatXisf.zig
@@ -12,15 +12,16 @@ const FLT_MANT_DIG = 24;
fn __floatXisf(comptime T: type, arg: T) f32 {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, T.bit_count);
- const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
+ const bits = @typeInfo(T).Int.bits;
+ const Z = std.meta.Int(false, bits);
+ const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1));
if (arg == 0) {
return @as(f32, 0.0);
}
var ai = arg;
- const N: u32 = T.bit_count;
+ const N: u32 = bits;
const si = ai >> @intCast(S, (N - 1));
ai = ((ai ^ si) -% si);
var a = @bitCast(Z, ai);
@@ -66,7 +67,7 @@ fn __floatXisf(comptime T: type, arg: T) f32 {
// a is now rounded to FLT_MANT_DIG bits
}
- const s = @bitCast(Z, arg) >> (T.bit_count - 32);
+ const s = @bitCast(Z, arg) >> (@typeInfo(T).Int.bits - 32);
const r = (@intCast(u32, s) & 0x80000000) | // sign
(@intCast(u32, (e + 127)) << 23) | // exponent
(@truncate(u32, a) & 0x007fffff); // mantissa-high
diff --git a/lib/std/special/compiler_rt/floatsiXf.zig b/lib/std/special/compiler_rt/floatsiXf.zig
index 75db3d7040..b6ce36b6f7 100644
--- a/lib/std/special/compiler_rt/floatsiXf.zig
+++ b/lib/std/special/compiler_rt/floatsiXf.zig
@@ -10,8 +10,9 @@ const maxInt = std.math.maxInt;
fn floatsiXf(comptime T: type, a: i32) T {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, T.bit_count);
- const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
+ const bits = @typeInfo(T).Float.bits;
+ const Z = std.meta.Int(false, bits);
+ const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1));
if (a == 0) {
return @as(T, 0.0);
@@ -22,7 +23,7 @@ fn floatsiXf(comptime T: type, a: i32) T {
const exponentBias = ((1 << exponentBits - 1) - 1);
const implicitBit = @as(Z, 1) << significandBits;
- const signBit = @as(Z, 1 << Z.bit_count - 1);
+ const signBit = @as(Z, 1 << bits - 1);
const sign = a >> 31;
// Take absolute value of a via abs(x) = (x^(x >> 31)) - (x >> 31).
diff --git a/lib/std/special/compiler_rt/floatundisf.zig b/lib/std/special/compiler_rt/floatundisf.zig
index b580ec91fd..67cd53b21c 100644
--- a/lib/std/special/compiler_rt/floatundisf.zig
+++ b/lib/std/special/compiler_rt/floatundisf.zig
@@ -15,7 +15,7 @@ pub fn __floatundisf(arg: u64) callconv(.C) f32 {
if (arg == 0) return 0;
var a = arg;
- const N: usize = @TypeOf(a).bit_count;
+ const N: usize = @typeInfo(@TypeOf(a)).Int.bits;
// Number of significant digits
const sd = N - @clz(u64, a);
// 8 exponent
diff --git a/lib/std/special/compiler_rt/floatunditf.zig b/lib/std/special/compiler_rt/floatunditf.zig
index 90191c6388..014a479c5f 100644
--- a/lib/std/special/compiler_rt/floatunditf.zig
+++ b/lib/std/special/compiler_rt/floatunditf.zig
@@ -19,7 +19,7 @@ pub fn __floatunditf(a: u64) callconv(.C) f128 {
const exponent_bias = (1 << (exponent_bits - 1)) - 1;
const implicit_bit = 1 << mantissa_bits;
- const exp: u128 = (u64.bit_count - 1) - @clz(u64, a);
+ const exp: u128 = (64 - 1) - @clz(u64, a);
const shift: u7 = mantissa_bits - @intCast(u7, exp);
var result: u128 = (@intCast(u128, a) << shift) ^ implicit_bit;
diff --git a/lib/std/special/compiler_rt/floatunsitf.zig b/lib/std/special/compiler_rt/floatunsitf.zig
index ceb55f12c8..f59446abac 100644
--- a/lib/std/special/compiler_rt/floatunsitf.zig
+++ b/lib/std/special/compiler_rt/floatunsitf.zig
@@ -19,7 +19,7 @@ pub fn __floatunsitf(a: u64) callconv(.C) f128 {
const exponent_bias = (1 << (exponent_bits - 1)) - 1;
const implicit_bit = 1 << mantissa_bits;
- const exp = (u64.bit_count - 1) - @clz(u64, a);
+ const exp = (64 - 1) - @clz(u64, a);
const shift = mantissa_bits - @intCast(u7, exp);
// TODO(#1148): @bitCast alignment error
diff --git a/lib/std/special/compiler_rt/int.zig b/lib/std/special/compiler_rt/int.zig
index 141c4e52c1..1fb2c263e1 100644
--- a/lib/std/special/compiler_rt/int.zig
+++ b/lib/std/special/compiler_rt/int.zig
@@ -219,7 +219,7 @@ fn test_one_divsi3(a: i32, b: i32, expected_q: i32) void {
pub fn __udivsi3(n: u32, d: u32) callconv(.C) u32 {
@setRuntimeSafety(builtin.is_test);
- const n_uword_bits: c_uint = u32.bit_count;
+ const n_uword_bits: c_uint = 32;
// special cases
if (d == 0) return 0; // ?!
if (n == 0) return 0;
diff --git a/lib/std/special/compiler_rt/modti3.zig b/lib/std/special/compiler_rt/modti3.zig
index 1f859c2329..9c3de44395 100644
--- a/lib/std/special/compiler_rt/modti3.zig
+++ b/lib/std/special/compiler_rt/modti3.zig
@@ -14,8 +14,8 @@ const compiler_rt = @import("../compiler_rt.zig");
pub fn __modti3(a: i128, b: i128) callconv(.C) i128 {
@setRuntimeSafety(builtin.is_test);
- const s_a = a >> (i128.bit_count - 1); // s = a < 0 ? -1 : 0
- const s_b = b >> (i128.bit_count - 1); // s = b < 0 ? -1 : 0
+ const s_a = a >> (128 - 1); // s = a < 0 ? -1 : 0
+ const s_b = b >> (128 - 1); // s = b < 0 ? -1 : 0
const an = (a ^ s_a) -% s_a; // negate if s == -1
const bn = (b ^ s_b) -% s_b; // negate if s == -1
diff --git a/lib/std/special/compiler_rt/mulXf3.zig b/lib/std/special/compiler_rt/mulXf3.zig
index b6984ebbb6..40b5b4f658 100644
--- a/lib/std/special/compiler_rt/mulXf3.zig
+++ b/lib/std/special/compiler_rt/mulXf3.zig
@@ -33,9 +33,9 @@ pub fn __aeabi_dmul(a: f64, b: f64) callconv(.C) f64 {
fn mulXf3(comptime T: type, a: T, b: T) T {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, T.bit_count);
+ const typeWidth = @typeInfo(T).Float.bits;
+ const Z = std.meta.Int(false, typeWidth);
- const typeWidth = T.bit_count;
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
@@ -269,9 +269,9 @@ fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
}
}
-fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 {
+fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, T.bit_count);
+ const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;
@@ -282,7 +282,7 @@ fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i
fn wideRightShiftWithSticky(comptime Z: type, hi: *Z, lo: *Z, count: u32) void {
@setRuntimeSafety(builtin.is_test);
- const typeWidth = Z.bit_count;
+ const typeWidth = @typeInfo(Z).Int.bits;
const S = std.math.Log2Int(Z);
if (count < typeWidth) {
const sticky = @truncate(u8, lo.* << @intCast(S, typeWidth -% count));
diff --git a/lib/std/special/compiler_rt/mulodi4.zig b/lib/std/special/compiler_rt/mulodi4.zig
index b05931e937..fab345fa47 100644
--- a/lib/std/special/compiler_rt/mulodi4.zig
+++ b/lib/std/special/compiler_rt/mulodi4.zig
@@ -11,7 +11,7 @@ const minInt = std.math.minInt;
pub fn __mulodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
@setRuntimeSafety(builtin.is_test);
- const min = @bitCast(i64, @as(u64, 1 << (i64.bit_count - 1)));
+ const min = @bitCast(i64, @as(u64, 1 << (64 - 1)));
const max = ~min;
overflow.* = 0;
diff --git a/lib/std/special/compiler_rt/muloti4.zig b/lib/std/special/compiler_rt/muloti4.zig
index 4beafa3e15..b1ad82da29 100644
--- a/lib/std/special/compiler_rt/muloti4.zig
+++ b/lib/std/special/compiler_rt/muloti4.zig
@@ -9,7 +9,7 @@ const compiler_rt = @import("../compiler_rt.zig");
pub fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
@setRuntimeSafety(builtin.is_test);
- const min = @bitCast(i128, @as(u128, 1 << (i128.bit_count - 1)));
+ const min = @bitCast(i128, @as(u128, 1 << (128 - 1)));
const max = ~min;
overflow.* = 0;
@@ -27,9 +27,9 @@ pub fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
return r;
}
- const sa = a >> (i128.bit_count - 1);
+ const sa = a >> (128 - 1);
const abs_a = (a ^ sa) -% sa;
- const sb = b >> (i128.bit_count - 1);
+ const sb = b >> (128 - 1);
const abs_b = (b ^ sb) -% sb;
if (abs_a < 2 or abs_b < 2) {
diff --git a/lib/std/special/compiler_rt/negXf2.zig b/lib/std/special/compiler_rt/negXf2.zig
index 11f9e401e9..ae01e10776 100644
--- a/lib/std/special/compiler_rt/negXf2.zig
+++ b/lib/std/special/compiler_rt/negXf2.zig
@@ -24,9 +24,8 @@ pub fn __aeabi_dneg(arg: f64) callconv(.AAPCS) f64 {
}
fn negXf2(comptime T: type, a: T) T {
- const Z = std.meta.Int(false, T.bit_count);
+ const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
- const typeWidth = T.bit_count;
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
diff --git a/lib/std/special/compiler_rt/shift.zig b/lib/std/special/compiler_rt/shift.zig
index 1609cb115c..acb14c969a 100644
--- a/lib/std/special/compiler_rt/shift.zig
+++ b/lib/std/special/compiler_rt/shift.zig
@@ -9,8 +9,9 @@ const Log2Int = std.math.Log2Int;
fn Dwords(comptime T: type, comptime signed_half: bool) type {
return extern union {
- pub const HalfTU = std.meta.Int(false, @divExact(T.bit_count, 2));
- pub const HalfTS = std.meta.Int(true, @divExact(T.bit_count, 2));
+ pub const bits = @divExact(@typeInfo(T).Int.bits, 2);
+ pub const HalfTU = std.meta.Int(false, bits);
+ pub const HalfTS = std.meta.Int(true, bits);
pub const HalfT = if (signed_half) HalfTS else HalfTU;
all: T,
@@ -30,15 +31,15 @@ pub fn ashlXi3(comptime T: type, a: T, b: i32) T {
const input = dwords{ .all = a };
var output: dwords = undefined;
- if (b >= dwords.HalfT.bit_count) {
+ if (b >= dwords.bits) {
output.s.low = 0;
- output.s.high = input.s.low << @intCast(S, b - dwords.HalfT.bit_count);
+ output.s.high = input.s.low << @intCast(S, b - dwords.bits);
} else if (b == 0) {
return a;
} else {
output.s.low = input.s.low << @intCast(S, b);
output.s.high = input.s.high << @intCast(S, b);
- output.s.high |= input.s.low >> @intCast(S, dwords.HalfT.bit_count - b);
+ output.s.high |= input.s.low >> @intCast(S, dwords.bits - b);
}
return output.all;
@@ -53,14 +54,14 @@ pub fn ashrXi3(comptime T: type, a: T, b: i32) T {
const input = dwords{ .all = a };
var output: dwords = undefined;
- if (b >= dwords.HalfT.bit_count) {
- output.s.high = input.s.high >> (dwords.HalfT.bit_count - 1);
- output.s.low = input.s.high >> @intCast(S, b - dwords.HalfT.bit_count);
+ if (b >= dwords.bits) {
+ output.s.high = input.s.high >> (dwords.bits - 1);
+ output.s.low = input.s.high >> @intCast(S, b - dwords.bits);
} else if (b == 0) {
return a;
} else {
output.s.high = input.s.high >> @intCast(S, b);
- output.s.low = input.s.high << @intCast(S, dwords.HalfT.bit_count - b);
+ output.s.low = input.s.high << @intCast(S, dwords.bits - b);
// Avoid sign-extension here
output.s.low |= @bitCast(
dwords.HalfT,
@@ -80,14 +81,14 @@ pub fn lshrXi3(comptime T: type, a: T, b: i32) T {
const input = dwords{ .all = a };
var output: dwords = undefined;
- if (b >= dwords.HalfT.bit_count) {
+ if (b >= dwords.bits) {
output.s.high = 0;
- output.s.low = input.s.high >> @intCast(S, b - dwords.HalfT.bit_count);
+ output.s.low = input.s.high >> @intCast(S, b - dwords.bits);
} else if (b == 0) {
return a;
} else {
output.s.high = input.s.high >> @intCast(S, b);
- output.s.low = input.s.high << @intCast(S, dwords.HalfT.bit_count - b);
+ output.s.low = input.s.high << @intCast(S, dwords.bits - b);
output.s.low |= input.s.low >> @intCast(S, b);
}
diff --git a/lib/std/special/compiler_rt/truncXfYf2.zig b/lib/std/special/compiler_rt/truncXfYf2.zig
index e096e7e4f0..b5823607ea 100644
--- a/lib/std/special/compiler_rt/truncXfYf2.zig
+++ b/lib/std/special/compiler_rt/truncXfYf2.zig
@@ -50,7 +50,7 @@ fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
- const srcBits = src_t.bit_count;
+ const srcBits = @typeInfo(src_t).Float.bits;
const srcExpBits = srcBits - srcSigBits - 1;
const srcInfExp = (1 << srcExpBits) - 1;
const srcExpBias = srcInfExp >> 1;
@@ -65,7 +65,7 @@ fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
const srcQNaN = 1 << (srcSigBits - 1);
const srcNaNCode = srcQNaN - 1;
- const dstBits = dst_t.bit_count;
+ const dstBits = @typeInfo(dst_t).Float.bits;
const dstExpBits = dstBits - dstSigBits - 1;
const dstInfExp = (1 << dstExpBits) - 1;
const dstExpBias = dstInfExp >> 1;
diff --git a/lib/std/special/compiler_rt/udivmod.zig b/lib/std/special/compiler_rt/udivmod.zig
index 2836f34c85..f8c7e1298b 100644
--- a/lib/std/special/compiler_rt/udivmod.zig
+++ b/lib/std/special/compiler_rt/udivmod.zig
@@ -15,8 +15,10 @@ const high = 1 - low;
pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?*DoubleInt) DoubleInt {
@setRuntimeSafety(is_test);
- const SingleInt = @import("std").meta.Int(false, @divExact(DoubleInt.bit_count, 2));
- const SignedDoubleInt = @import("std").meta.Int(true, DoubleInt.bit_count);
+ const double_int_bits = @typeInfo(DoubleInt).Int.bits;
+ const single_int_bits = @divExact(double_int_bits, 2);
+ const SingleInt = @import("std").meta.Int(false, single_int_bits);
+ const SignedDoubleInt = @import("std").meta.Int(true, double_int_bits);
const Log2SingleInt = @import("std").math.Log2Int(SingleInt);
const n = @ptrCast(*const [2]SingleInt, &a).*; // TODO issue #421
@@ -82,21 +84,21 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// ---
// K 0
sr = @bitCast(c_uint, @as(c_int, @clz(SingleInt, d[high])) - @as(c_int, @clz(SingleInt, n[high])));
- // 0 <= sr <= SingleInt.bit_count - 2 or sr large
- if (sr > SingleInt.bit_count - 2) {
+ // 0 <= sr <= single_int_bits - 2 or sr large
+ if (sr > single_int_bits - 2) {
if (maybe_rem) |rem| {
rem.* = a;
}
return 0;
}
sr += 1;
- // 1 <= sr <= SingleInt.bit_count - 1
- // q.all = a << (DoubleInt.bit_count - sr);
+ // 1 <= sr <= single_int_bits - 1
+ // q.all = a << (double_int_bits - sr);
q[low] = 0;
- q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
+ q[high] = n[low] << @intCast(Log2SingleInt, single_int_bits - sr);
// r.all = a >> sr;
r[high] = n[high] >> @intCast(Log2SingleInt, sr);
- r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
+ r[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
} else {
// d[low] != 0
if (d[high] == 0) {
@@ -113,74 +115,74 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
}
sr = @ctz(SingleInt, d[low]);
q[high] = n[high] >> @intCast(Log2SingleInt, sr);
- q[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
+ q[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
return @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*; // TODO issue #421
}
// K X
// ---
// 0 K
- sr = 1 + SingleInt.bit_count + @as(c_uint, @clz(SingleInt, d[low])) - @as(c_uint, @clz(SingleInt, n[high]));
- // 2 <= sr <= DoubleInt.bit_count - 1
- // q.all = a << (DoubleInt.bit_count - sr);
+ sr = 1 + single_int_bits + @as(c_uint, @clz(SingleInt, d[low])) - @as(c_uint, @clz(SingleInt, n[high]));
+ // 2 <= sr <= double_int_bits - 1
+ // q.all = a << (double_int_bits - sr);
// r.all = a >> sr;
- if (sr == SingleInt.bit_count) {
+ if (sr == single_int_bits) {
q[low] = 0;
q[high] = n[low];
r[high] = 0;
r[low] = n[high];
- } else if (sr < SingleInt.bit_count) {
- // 2 <= sr <= SingleInt.bit_count - 1
+ } else if (sr < single_int_bits) {
+ // 2 <= sr <= single_int_bits - 1
q[low] = 0;
- q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
+ q[high] = n[low] << @intCast(Log2SingleInt, single_int_bits - sr);
r[high] = n[high] >> @intCast(Log2SingleInt, sr);
- r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
+ r[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
} else {
- // SingleInt.bit_count + 1 <= sr <= DoubleInt.bit_count - 1
- q[low] = n[low] << @intCast(Log2SingleInt, DoubleInt.bit_count - sr);
- q[high] = (n[high] << @intCast(Log2SingleInt, DoubleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr - SingleInt.bit_count));
+ // single_int_bits + 1 <= sr <= double_int_bits - 1
+ q[low] = n[low] << @intCast(Log2SingleInt, double_int_bits - sr);
+ q[high] = (n[high] << @intCast(Log2SingleInt, double_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr - single_int_bits));
r[high] = 0;
- r[low] = n[high] >> @intCast(Log2SingleInt, sr - SingleInt.bit_count);
+ r[low] = n[high] >> @intCast(Log2SingleInt, sr - single_int_bits);
}
} else {
// K X
// ---
// K K
sr = @bitCast(c_uint, @as(c_int, @clz(SingleInt, d[high])) - @as(c_int, @clz(SingleInt, n[high])));
- // 0 <= sr <= SingleInt.bit_count - 1 or sr large
- if (sr > SingleInt.bit_count - 1) {
+ // 0 <= sr <= single_int_bits - 1 or sr large
+ if (sr > single_int_bits - 1) {
if (maybe_rem) |rem| {
rem.* = a;
}
return 0;
}
sr += 1;
- // 1 <= sr <= SingleInt.bit_count
- // q.all = a << (DoubleInt.bit_count - sr);
+ // 1 <= sr <= single_int_bits
+ // q.all = a << (double_int_bits - sr);
// r.all = a >> sr;
q[low] = 0;
- if (sr == SingleInt.bit_count) {
+ if (sr == single_int_bits) {
q[high] = n[low];
r[high] = 0;
r[low] = n[high];
} else {
r[high] = n[high] >> @intCast(Log2SingleInt, sr);
- r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
- q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
+ r[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
+ q[high] = n[low] << @intCast(Log2SingleInt, single_int_bits - sr);
}
}
}
// Not a special case
// q and r are initialized with:
- // q.all = a << (DoubleInt.bit_count - sr);
+ // q.all = a << (double_int_bits - sr);
// r.all = a >> sr;
- // 1 <= sr <= DoubleInt.bit_count - 1
+ // 1 <= sr <= double_int_bits - 1
var carry: u32 = 0;
var r_all: DoubleInt = undefined;
while (sr > 0) : (sr -= 1) {
// r:q = ((r:q) << 1) | carry
- r[high] = (r[high] << 1) | (r[low] >> (SingleInt.bit_count - 1));
- r[low] = (r[low] << 1) | (q[high] >> (SingleInt.bit_count - 1));
- q[high] = (q[high] << 1) | (q[low] >> (SingleInt.bit_count - 1));
+ r[high] = (r[high] << 1) | (r[low] >> (single_int_bits - 1));
+ r[low] = (r[low] << 1) | (q[high] >> (single_int_bits - 1));
+ q[high] = (q[high] << 1) | (q[low] >> (single_int_bits - 1));
q[low] = (q[low] << 1) | carry;
// carry = 0;
// if (r.all >= b)
@@ -189,7 +191,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// carry = 1;
// }
r_all = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
- const s: SignedDoubleInt = @bitCast(SignedDoubleInt, b -% r_all -% 1) >> (DoubleInt.bit_count - 1);
+ const s: SignedDoubleInt = @bitCast(SignedDoubleInt, b -% r_all -% 1) >> (double_int_bits - 1);
carry = @intCast(u32, s & 1);
r_all -= b & @bitCast(DoubleInt, s);
r = @ptrCast(*[2]SingleInt, &r_all).*; // TODO issue #421
diff --git a/lib/std/start.zig b/lib/std/start.zig
index 8e443a7c77..c65cd08981 100644
--- a/lib/std/start.zig
+++ b/lib/std/start.zig
@@ -67,7 +67,7 @@ fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) callconv
uefi.handle = handle;
uefi.system_table = system_table;
- switch (@TypeOf(root.main).ReturnType) {
+ switch (@typeInfo(@TypeOf(root.main)).Fn.return_type.?) {
noreturn => {
root.main();
},
@@ -239,7 +239,7 @@ fn callMainAsync(loop: *std.event.Loop) callconv(.Async) u8 {
// This is not marked inline because it is called with @asyncCall when
// there is an event loop.
pub fn callMain() u8 {
- switch (@typeInfo(@TypeOf(root.main).ReturnType)) {
+ switch (@typeInfo(@typeInfo(@TypeOf(root.main)).Fn.return_type.?)) {
.NoReturn => {
root.main();
},
diff --git a/lib/std/target.zig b/lib/std/target.zig
index 080ddc1c2f..528e9918ee 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -468,6 +468,7 @@ pub const Target = struct {
/// TODO Get rid of this one.
unknown,
coff,
+ pe,
elf,
macho,
wasm,
@@ -771,6 +772,63 @@ pub const Target = struct {
};
}
+ pub fn toCoffMachine(arch: Arch) std.coff.MachineType {
+ return switch (arch) {
+ .avr => .Unknown,
+ .msp430 => .Unknown,
+ .arc => .Unknown,
+ .arm => .ARM,
+ .armeb => .Unknown,
+ .hexagon => .Unknown,
+ .le32 => .Unknown,
+ .mips => .Unknown,
+ .mipsel => .Unknown,
+ .powerpc => .POWERPC,
+ .r600 => .Unknown,
+ .riscv32 => .RISCV32,
+ .sparc => .Unknown,
+ .sparcel => .Unknown,
+ .tce => .Unknown,
+ .tcele => .Unknown,
+ .thumb => .Thumb,
+ .thumbeb => .Thumb,
+ .i386 => .I386,
+ .xcore => .Unknown,
+ .nvptx => .Unknown,
+ .amdil => .Unknown,
+ .hsail => .Unknown,
+ .spir => .Unknown,
+ .kalimba => .Unknown,
+ .shave => .Unknown,
+ .lanai => .Unknown,
+ .wasm32 => .Unknown,
+ .renderscript32 => .Unknown,
+ .aarch64_32 => .ARM64,
+ .aarch64 => .ARM64,
+ .aarch64_be => .Unknown,
+ .mips64 => .Unknown,
+ .mips64el => .Unknown,
+ .powerpc64 => .Unknown,
+ .powerpc64le => .Unknown,
+ .riscv64 => .RISCV64,
+ .x86_64 => .X64,
+ .nvptx64 => .Unknown,
+ .le64 => .Unknown,
+ .amdil64 => .Unknown,
+ .hsail64 => .Unknown,
+ .spir64 => .Unknown,
+ .wasm64 => .Unknown,
+ .renderscript64 => .Unknown,
+ .amdgcn => .Unknown,
+ .bpfel => .Unknown,
+ .bpfeb => .Unknown,
+ .sparcv9 => .Unknown,
+ .s390x => .Unknown,
+ .ve => .Unknown,
+ .spu_2 => .Unknown,
+ };
+ }
+
pub fn endian(arch: Arch) builtin.Endian {
return switch (arch) {
.avr,
diff --git a/lib/std/thread.zig b/lib/std/thread.zig
index d73907690e..330c425dd6 100644
--- a/lib/std/thread.zig
+++ b/lib/std/thread.zig
@@ -166,7 +166,7 @@ pub const Thread = struct {
fn threadMain(raw_arg: windows.LPVOID) callconv(.C) windows.DWORD {
const arg = if (@sizeOf(Context) == 0) {} else @ptrCast(*Context, @alignCast(@alignOf(Context), raw_arg)).*;
- switch (@typeInfo(@TypeOf(startFn).ReturnType)) {
+ switch (@typeInfo(@typeInfo(@TypeOf(startFn)).Fn.return_type.?)) {
.NoReturn => {
startFn(arg);
},
@@ -227,7 +227,7 @@ pub const Thread = struct {
fn linuxThreadMain(ctx_addr: usize) callconv(.C) u8 {
const arg = if (@sizeOf(Context) == 0) {} else @intToPtr(*const Context, ctx_addr).*;
- switch (@typeInfo(@TypeOf(startFn).ReturnType)) {
+ switch (@typeInfo(@typeInfo(@TypeOf(startFn)).Fn.return_type.?)) {
.NoReturn => {
startFn(arg);
},
@@ -259,7 +259,7 @@ pub const Thread = struct {
fn posixThreadMain(ctx: ?*c_void) callconv(.C) ?*c_void {
const arg = if (@sizeOf(Context) == 0) {} else @ptrCast(*Context, @alignCast(@alignOf(Context), ctx)).*;
- switch (@typeInfo(@TypeOf(startFn).ReturnType)) {
+ switch (@typeInfo(@typeInfo(@TypeOf(startFn)).Fn.return_type.?)) {
.NoReturn => {
startFn(arg);
},
diff --git a/lib/std/zig.zig b/lib/std/zig.zig
index e86a12884f..1dedce4067 100644
--- a/lib/std/zig.zig
+++ b/lib/std/zig.zig
@@ -22,7 +22,7 @@ pub const SrcHash = [16]u8;
/// If it is long, blake3 hash is computed.
pub fn hashSrc(src: []const u8) SrcHash {
var out: SrcHash = undefined;
- if (src.len <= SrcHash.len) {
+ if (src.len <= @typeInfo(SrcHash).Array.len) {
std.mem.copy(u8, &out, src);
std.mem.set(u8, out[src.len..], 0);
} else {
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index d273712cd1..dc48ae23e7 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -626,6 +626,7 @@ pub const Scope = struct {
module.gpa,
self.sub_file_path,
std.math.maxInt(u32),
+ null,
1,
0,
);
@@ -723,6 +724,7 @@ pub const Scope = struct {
module.gpa,
self.sub_file_path,
std.math.maxInt(u32),
+ null,
1,
0,
);
@@ -1820,6 +1822,9 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
try self.markOutdatedDecl(decl);
decl.contents_hash = contents_hash;
} else switch (self.bin_file.tag) {
+ .coff => {
+ // TODO Implement for COFF
+ },
.elf => if (decl.fn_link.elf.len != 0) {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
@@ -2078,12 +2083,14 @@ fn allocateNewDecl(
.deletion_flag = false,
.contents_hash = contents_hash,
.link = switch (self.bin_file.tag) {
+ .coff => .{ .coff = link.File.Coff.TextBlock.empty },
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
.macho => .{ .macho = link.File.MachO.TextBlock.empty },
.c => .{ .c = {} },
.wasm => .{ .wasm = {} },
},
.fn_link = switch (self.bin_file.tag) {
+ .coff => .{ .coff = {} },
.elf => .{ .elf = link.File.Elf.SrcFn.empty },
.macho => .{ .macho = link.File.MachO.SrcFn.empty },
.c => .{ .c = {} },
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index be86111b58..9405a5f72c 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -59,14 +59,21 @@ pub const GenerateSymbolError = error{
AnalysisFail,
};
+pub const DebugInfoOutput = union(enum) {
+ dwarf: struct {
+ dbg_line: *std.ArrayList(u8),
+ dbg_info: *std.ArrayList(u8),
+ dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
+ },
+ none,
+};
+
pub fn generateSymbol(
bin_file: *link.File,
src: usize,
typed_value: TypedValue,
code: *std.ArrayList(u8),
- dbg_line: *std.ArrayList(u8),
- dbg_info: *std.ArrayList(u8),
- dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
+ debug_output: DebugInfoOutput,
) GenerateSymbolError!Result {
const tracy = trace(@src());
defer tracy.end();
@@ -76,56 +83,56 @@ pub fn generateSymbol(
switch (bin_file.options.target.cpu.arch) {
.wasm32 => unreachable, // has its own code path
.wasm64 => unreachable, // has its own code path
- .arm => return Function(.arm).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- .armeb => return Function(.armeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.aarch64 => return Function(.aarch64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.arc => return Function(.arc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.avr => return Function(.avr).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.bpfel => return Function(.bpfel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.hexagon => return Function(.hexagon).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.mips => return Function(.mips).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.mipsel => return Function(.mipsel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.mips64 => return Function(.mips64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.mips64el => return Function(.mips64el).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.msp430 => return Function(.msp430).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.powerpc => return Function(.powerpc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.r600 => return Function(.r600).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- .riscv64 => return Function(.riscv64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.sparc => return Function(.sparc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.sparcel => return Function(.sparcel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.s390x => return Function(.s390x).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- .spu_2 => return Function(.spu_2).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.tce => return Function(.tce).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.tcele => return Function(.tcele).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.thumb => return Function(.thumb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.i386 => return Function(.i386).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- .x86_64 => return Function(.x86_64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.xcore => return Function(.xcore).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.nvptx => return Function(.nvptx).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.le32 => return Function(.le32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.le64 => return Function(.le64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.amdil => return Function(.amdil).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.hsail => return Function(.hsail).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.spir => return Function(.spir).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.spir64 => return Function(.spir64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.kalimba => return Function(.kalimba).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.shave => return Function(.shave).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.lanai => return Function(.lanai).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.ve => return Function(.ve).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ .arm => return Function(.arm).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ .armeb => return Function(.armeb).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.aarch64 => return Function(.aarch64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.arc => return Function(.arc).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.avr => return Function(.avr).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.bpfel => return Function(.bpfel).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.hexagon => return Function(.hexagon).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.mips => return Function(.mips).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.mipsel => return Function(.mipsel).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.mips64 => return Function(.mips64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.mips64el => return Function(.mips64el).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.msp430 => return Function(.msp430).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.powerpc => return Function(.powerpc).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.r600 => return Function(.r600).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ .riscv64 => return Function(.riscv64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.sparc => return Function(.sparc).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.sparcel => return Function(.sparcel).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.s390x => return Function(.s390x).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ .spu_2 => return Function(.spu_2).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.tce => return Function(.tce).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.tcele => return Function(.tcele).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.thumb => return Function(.thumb).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.i386 => return Function(.i386).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ .x86_64 => return Function(.x86_64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.xcore => return Function(.xcore).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.nvptx => return Function(.nvptx).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.le32 => return Function(.le32).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.le64 => return Function(.le64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.amdil => return Function(.amdil).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.hsail => return Function(.hsail).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.spir => return Function(.spir).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.spir64 => return Function(.spir64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.kalimba => return Function(.kalimba).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.shave => return Function(.shave).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.lanai => return Function(.lanai).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.ve => return Function(.ve).generateSymbol(bin_file, src, typed_value, code, debug_output),
else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."),
}
},
@@ -139,7 +146,7 @@ pub fn generateSymbol(
switch (try generateSymbol(bin_file, src, .{
.ty = typed_value.ty.elemType(),
.val = sentinel,
- }, code, dbg_line, dbg_info, dbg_info_type_relocs)) {
+ }, code, debug_output)) {
.appended => return Result{ .appended = {} },
.externally_managed => |slice| {
code.appendSliceAssumeCapacity(slice);
@@ -239,9 +246,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
target: *const std.Target,
mod_fn: *const Module.Fn,
code: *std.ArrayList(u8),
- dbg_line: *std.ArrayList(u8),
- dbg_info: *std.ArrayList(u8),
- dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
+ debug_output: DebugInfoOutput,
err_msg: ?*ErrorMsg,
args: []MCValue,
ret_mcv: MCValue,
@@ -419,9 +424,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
src: usize,
typed_value: TypedValue,
code: *std.ArrayList(u8),
- dbg_line: *std.ArrayList(u8),
- dbg_info: *std.ArrayList(u8),
- dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
+ debug_output: DebugInfoOutput,
) GenerateSymbolError!Result {
const module_fn = typed_value.val.cast(Value.Payload.Function).?.func;
@@ -457,9 +460,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.bin_file = bin_file,
.mod_fn = module_fn,
.code = code,
- .dbg_line = dbg_line,
- .dbg_info = dbg_info,
- .dbg_info_type_relocs = dbg_info_type_relocs,
+ .debug_output = debug_output,
.err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
@@ -598,35 +599,50 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
fn dbgSetPrologueEnd(self: *Self) InnerError!void {
- try self.dbg_line.append(DW.LNS_set_prologue_end);
- try self.dbgAdvancePCAndLine(self.prev_di_src);
+ switch (self.debug_output) {
+ .dwarf => |dbg_out| {
+ try dbg_out.dbg_line.append(DW.LNS_set_prologue_end);
+ try self.dbgAdvancePCAndLine(self.prev_di_src);
+ },
+ .none => {},
+ }
}
fn dbgSetEpilogueBegin(self: *Self) InnerError!void {
- try self.dbg_line.append(DW.LNS_set_epilogue_begin);
- try self.dbgAdvancePCAndLine(self.prev_di_src);
+ switch (self.debug_output) {
+ .dwarf => |dbg_out| {
+ try dbg_out.dbg_line.append(DW.LNS_set_epilogue_begin);
+ try self.dbgAdvancePCAndLine(self.prev_di_src);
+ },
+ .none => {},
+ }
}
fn dbgAdvancePCAndLine(self: *Self, src: usize) InnerError!void {
- // TODO Look into improving the performance here by adding a token-index-to-line
- // lookup table, and changing ir.Inst from storing byte offset to token. Currently
- // this involves scanning over the source code for newlines
- // (but only from the previous byte offset to the new one).
- const delta_line = std.zig.lineDelta(self.source, self.prev_di_src, src);
- const delta_pc = self.code.items.len - self.prev_di_pc;
self.prev_di_src = src;
self.prev_di_pc = self.code.items.len;
- // TODO Look into using the DWARF special opcodes to compress this data. It lets you emit
- // single-byte opcodes that add different numbers to both the PC and the line number
- // at the same time.
- try self.dbg_line.ensureCapacity(self.dbg_line.items.len + 11);
- self.dbg_line.appendAssumeCapacity(DW.LNS_advance_pc);
- leb128.writeULEB128(self.dbg_line.writer(), delta_pc) catch unreachable;
- if (delta_line != 0) {
- self.dbg_line.appendAssumeCapacity(DW.LNS_advance_line);
- leb128.writeILEB128(self.dbg_line.writer(), delta_line) catch unreachable;
+ switch (self.debug_output) {
+ .dwarf => |dbg_out| {
+ // TODO Look into improving the performance here by adding a token-index-to-line
+ // lookup table, and changing ir.Inst from storing byte offset to token. Currently
+ // this involves scanning over the source code for newlines
+ // (but only from the previous byte offset to the new one).
+ const delta_line = std.zig.lineDelta(self.source, self.prev_di_src, src);
+ const delta_pc = self.code.items.len - self.prev_di_pc;
+ // TODO Look into using the DWARF special opcodes to compress this data. It lets you emit
+ // single-byte opcodes that add different numbers to both the PC and the line number
+ // at the same time.
+ try dbg_out.dbg_line.ensureCapacity(dbg_out.dbg_line.items.len + 11);
+ dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_advance_pc);
+ leb128.writeULEB128(dbg_out.dbg_line.writer(), delta_pc) catch unreachable;
+ if (delta_line != 0) {
+ dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_advance_line);
+ leb128.writeILEB128(dbg_out.dbg_line.writer(), delta_line) catch unreachable;
+ }
+ dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_copy);
+ },
+ .none => {},
}
- self.dbg_line.appendAssumeCapacity(DW.LNS_copy);
}
/// Asserts there is already capacity to insert into top branch inst_table.
@@ -654,18 +670,23 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
/// Adds a Type to the .debug_info at the current position. The bytes will be populated later,
/// after codegen for this symbol is done.
fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void {
- assert(ty.hasCodeGenBits());
- const index = self.dbg_info.items.len;
- try self.dbg_info.resize(index + 4); // DW.AT_type, DW.FORM_ref4
-
- const gop = try self.dbg_info_type_relocs.getOrPut(self.gpa, ty);
- if (!gop.found_existing) {
- gop.entry.value = .{
- .off = undefined,
- .relocs = .{},
- };
+ switch (self.debug_output) {
+ .dwarf => |dbg_out| {
+ assert(ty.hasCodeGenBits());
+ const index = dbg_out.dbg_info.items.len;
+ try dbg_out.dbg_info.resize(index + 4); // DW.AT_type, DW.FORM_ref4
+
+ const gop = try dbg_out.dbg_info_type_relocs.getOrPut(self.gpa, ty);
+ if (!gop.found_existing) {
+ gop.entry.value = .{
+ .off = undefined,
+ .relocs = .{},
+ };
+ }
+ try gop.entry.value.relocs.append(self.gpa, @intCast(u32, index));
+ },
+ .none => {},
}
- try gop.entry.value.relocs.append(self.gpa, @intCast(u32, index));
}
fn genFuncInst(self: *Self, inst: *ir.Inst) !MCValue {
@@ -1258,14 +1279,19 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
self.registers.putAssumeCapacityNoClobber(toCanonicalReg(reg), &inst.base);
self.markRegUsed(reg);
- try self.dbg_info.ensureCapacity(self.dbg_info.items.len + 8 + name_with_null.len);
- self.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter);
- self.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT_location, DW.FORM_exprloc
- 1, // ULEB128 dwarf expression length
- reg.dwarfLocOp(),
- });
- try self.addDbgInfoTypeReloc(inst.base.ty); // DW.AT_type, DW.FORM_ref4
- self.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string
+ switch (self.debug_output) {
+ .dwarf => |dbg_out| {
+ try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 8 + name_with_null.len);
+ dbg_out.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter);
+ dbg_out.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT_location, DW.FORM_exprloc
+ 1, // ULEB128 dwarf expression length
+ reg.dwarfLocOp(),
+ });
+ try self.addDbgInfoTypeReloc(inst.base.ty); // DW.AT_type, DW.FORM_ref4
+ dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string
+ },
+ .none => {},
+ }
},
else => {},
}
@@ -1302,7 +1328,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// Due to incremental compilation, how function calls are generated depends
// on linking.
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
+ if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) {
switch (arch) {
.x86_64 => {
for (info.args) |mc_arg, arg_i| {
@@ -1341,10 +1367,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (inst.func.cast(ir.Inst.Constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
+
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
+ const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
+ const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
+ break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file|
+ @intCast(u32, coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes)
+ else
+ unreachable;
+
// ff 14 25 xx xx xx xx call [addr]
try self.code.ensureCapacity(self.code.items.len + 7);
self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 });
@@ -1362,10 +1395,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (inst.func.cast(ir.Inst.Constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
+
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
+ const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
+ const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
+ break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file|
+ coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes
+ else
+ unreachable;
try self.genSetReg(inst.base.src, .ra, .{ .memory = got_addr });
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.jalr(.ra, 0, .ra).toU32());
@@ -1383,8 +1422,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
- const got_addr = @intCast(u16, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * 2);
+ const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
+ const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
+ break :blk @intCast(u16, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * 2);
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file|
+ @intCast(u16, coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * 2)
+ else
+ unreachable;
+
const return_type = func.owner_decl.typed_value.most_recent.typed_value.ty.fnReturnType();
// First, push the return address, then jump; if noreturn, don't bother with the first step
// TODO: implement packed struct -> u16 at comptime and move the bitcast here
@@ -1420,10 +1465,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (inst.func.cast(ir.Inst.Constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
+ const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
+ const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
+ break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file|
+ coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes
+ else
+ unreachable;
// TODO only works with leaf functions
// at the moment, which works fine for
@@ -1983,7 +2033,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (mem.eql(u8, inst.asm_source, "syscall")) {
try self.code.appendSlice(&[_]u8{ 0x0f, 0x05 });
- } else {
+ } else if (inst.asm_source.len != 0) {
return self.fail(inst.base.src, "TODO implement support for more x86 assembly instructions", .{});
}
@@ -2541,6 +2591,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const got = &macho_file.sections.items[macho_file.got_section_index.?];
const got_addr = got.addr + decl.link.macho.offset_table_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
+ const decl = payload.decl;
+ const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
+ return MCValue{ .memory = got_addr };
} else {
return self.fail(src, "TODO codegen non-ELF const Decl pointer", .{});
}
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index ecf3876582..fff69a6bbd 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -34,6 +34,7 @@ pub const File = struct {
pub const LinkBlock = union {
elf: Elf.TextBlock,
+ coff: Coff.TextBlock,
macho: MachO.TextBlock,
c: void,
wasm: void,
@@ -41,6 +42,7 @@ pub const File = struct {
pub const LinkFn = union {
elf: Elf.SrcFn,
+ coff: Coff.SrcFn,
macho: MachO.SrcFn,
c: void,
wasm: ?Wasm.FnData,
@@ -66,7 +68,7 @@ pub const File = struct {
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: Options) !*File {
switch (options.object_format) {
.unknown => unreachable,
- .coff => return error.TODOImplementCoff,
+ .coff, .pe => return Coff.openPath(allocator, dir, sub_path, options),
.elf => return Elf.openPath(allocator, dir, sub_path, options),
.macho => return MachO.openPath(allocator, dir, sub_path, options),
.wasm => return Wasm.openPath(allocator, dir, sub_path, options),
@@ -85,7 +87,7 @@ pub const File = struct {
pub fn makeWritable(base: *File, dir: fs.Dir, sub_path: []const u8) !void {
switch (base.tag) {
- .elf, .macho => {
+ .coff, .elf, .macho => {
if (base.file != null) return;
base.file = try dir.createFile(sub_path, .{
.truncate = false,
@@ -112,6 +114,7 @@ pub const File = struct {
/// after allocateDeclIndexes for any given Decl.
pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) !void {
switch (base.tag) {
+ .coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl),
.elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl),
.c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl),
@@ -121,6 +124,7 @@ pub const File = struct {
pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) !void {
switch (base.tag) {
+ .coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
.c, .wasm => {},
@@ -131,6 +135,7 @@ pub const File = struct {
/// any given Decl.
pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) !void {
switch (base.tag) {
+ .coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl),
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl),
.macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl),
.c, .wasm => {},
@@ -140,6 +145,7 @@ pub const File = struct {
pub fn deinit(base: *File) void {
if (base.file) |f| f.close();
switch (base.tag) {
+ .coff => @fieldParentPtr(Coff, "base", base).deinit(),
.elf => @fieldParentPtr(Elf, "base", base).deinit(),
.macho => @fieldParentPtr(MachO, "base", base).deinit(),
.c => @fieldParentPtr(C, "base", base).deinit(),
@@ -149,6 +155,11 @@ pub const File = struct {
pub fn destroy(base: *File) void {
switch (base.tag) {
+ .coff => {
+ const parent = @fieldParentPtr(Coff, "base", base);
+ parent.deinit();
+ base.allocator.destroy(parent);
+ },
.elf => {
const parent = @fieldParentPtr(Elf, "base", base);
parent.deinit();
@@ -177,6 +188,7 @@ pub const File = struct {
defer tracy.end();
try switch (base.tag) {
+ .coff => @fieldParentPtr(Coff, "base", base).flush(module),
.elf => @fieldParentPtr(Elf, "base", base).flush(module),
.macho => @fieldParentPtr(MachO, "base", base).flush(module),
.c => @fieldParentPtr(C, "base", base).flush(module),
@@ -186,6 +198,7 @@ pub const File = struct {
pub fn freeDecl(base: *File, decl: *Module.Decl) void {
switch (base.tag) {
+ .coff => @fieldParentPtr(Coff, "base", base).freeDecl(decl),
.elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl),
.macho => @fieldParentPtr(MachO, "base", base).freeDecl(decl),
.c => unreachable,
@@ -195,6 +208,7 @@ pub const File = struct {
pub fn errorFlags(base: *File) ErrorFlags {
return switch (base.tag) {
+ .coff => @fieldParentPtr(Coff, "base", base).error_flags,
.elf => @fieldParentPtr(Elf, "base", base).error_flags,
.macho => @fieldParentPtr(MachO, "base", base).error_flags,
.c => return .{ .no_entry_point_found = false },
@@ -211,6 +225,7 @@ pub const File = struct {
exports: []const *Module.Export,
) !void {
switch (base.tag) {
+ .coff => return @fieldParentPtr(Coff, "base", base).updateDeclExports(module, decl, exports),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclExports(module, decl, exports),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclExports(module, decl, exports),
.c => return {},
@@ -220,6 +235,7 @@ pub const File = struct {
pub fn getDeclVAddr(base: *File, decl: *const Module.Decl) u64 {
switch (base.tag) {
+ .coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl),
.elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl),
.macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl),
.c => unreachable,
@@ -228,6 +244,7 @@ pub const File = struct {
}
pub const Tag = enum {
+ coff,
elf,
macho,
c,
@@ -239,6 +256,7 @@ pub const File = struct {
};
pub const C = @import("link/C.zig");
+ pub const Coff = @import("link/Coff.zig");
pub const Elf = @import("link/Elf.zig");
pub const MachO = @import("link/MachO.zig");
pub const Wasm = @import("link/Wasm.zig");
diff --git a/src-self-hosted/link/Coff.zig b/src-self-hosted/link/Coff.zig
new file mode 100644
index 0000000000..4d1f95e567
--- /dev/null
+++ b/src-self-hosted/link/Coff.zig
@@ -0,0 +1,792 @@
+const Coff = @This();
+
+const std = @import("std");
+const log = std.log.scoped(.link);
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const fs = std.fs;
+
+const trace = @import("../tracy.zig").trace;
+const Module = @import("../Module.zig");
+const codegen = @import("../codegen.zig");
+const link = @import("../link.zig");
+
+const allocation_padding = 4 / 3;
+const minimum_text_block_size = 64 * allocation_padding;
+
+const section_alignment = 4096;
+const file_alignment = 512;
+const image_base = 0x400_000;
+const section_table_size = 2 * 40;
+comptime {
+ std.debug.assert(std.mem.isAligned(image_base, section_alignment));
+}
+
+pub const base_tag: link.File.Tag = .coff;
+
+const msdos_stub = @embedFile("msdos-stub.bin");
+
+base: link.File,
+ptr_width: enum { p32, p64 },
+error_flags: link.File.ErrorFlags = .{},
+
+text_block_free_list: std.ArrayListUnmanaged(*TextBlock) = .{},
+last_text_block: ?*TextBlock = null,
+
+/// Section table file pointer.
+section_table_offset: u32 = 0,
+/// Section data file pointer.
+section_data_offset: u32 = 0,
+/// Optiona header file pointer.
+optional_header_offset: u32 = 0,
+
+/// Absolute virtual address of the offset table when the executable is loaded in memory.
+offset_table_virtual_address: u32 = 0,
+/// Current size of the offset table on disk, must be a multiple of `file_alignment`
+offset_table_size: u32 = 0,
+/// Contains absolute virtual addresses
+offset_table: std.ArrayListUnmanaged(u64) = .{},
+/// Free list of offset table indices
+offset_table_free_list: std.ArrayListUnmanaged(u32) = .{},
+
+/// Virtual address of the entry point procedure relative to `image_base`
+entry_addr: ?u32 = null,
+
+/// Absolute virtual address of the text section when the executable is loaded in memory.
+text_section_virtual_address: u32 = 0,
+/// Current size of the `.text` section on disk, must be a multiple of `file_alignment`
+text_section_size: u32 = 0,
+
+offset_table_size_dirty: bool = false,
+text_section_size_dirty: bool = false,
+/// This flag is set when the virtual size of the whole image file when loaded in memory has changed
+/// and needs to be updated in the optional header.
+size_of_image_dirty: bool = false,
+
+pub const TextBlock = struct {
+ /// Offset of the code relative to the start of the text section
+ text_offset: u32,
+ /// Used size of the text block
+ size: u32,
+ /// This field is undefined for symbols with size = 0.
+ offset_table_index: u32,
+ /// Points to the previous and next neighbors, based on the `text_offset`.
+ /// This can be used to find, for example, the capacity of this `TextBlock`.
+ prev: ?*TextBlock,
+ next: ?*TextBlock,
+
+ pub const empty = TextBlock{
+ .text_offset = 0,
+ .size = 0,
+ .offset_table_index = undefined,
+ .prev = null,
+ .next = null,
+ };
+
+ /// Returns how much room there is to grow in virtual address space.
+ fn capacity(self: TextBlock) u64 {
+ if (self.next) |next| {
+ return next.text_offset - self.text_offset;
+ }
+ // This is the last block, the capacity is only limited by the address space.
+ return std.math.maxInt(u32) - self.text_offset;
+ }
+
+ fn freeListEligible(self: TextBlock) bool {
+ // No need to keep a free list node for the last block.
+ const next = self.next orelse return false;
+ const cap = next.text_offset - self.text_offset;
+ const ideal_cap = self.size * allocation_padding;
+ if (cap <= ideal_cap) return false;
+ const surplus = cap - ideal_cap;
+ return surplus >= minimum_text_block_size;
+ }
+
+ /// Absolute virtual address of the text block when the file is loaded in memory.
+ fn getVAddr(self: TextBlock, coff: Coff) u32 {
+ return coff.text_section_virtual_address + self.text_offset;
+ }
+};
+
+pub const SrcFn = void;
+
+pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*link.File {
+ assert(options.object_format == .coff);
+
+ const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = link.determineMode(options) });
+ errdefer file.close();
+
+ var coff_file = try allocator.create(Coff);
+ errdefer allocator.destroy(coff_file);
+
+ coff_file.* = openFile(allocator, file, options) catch |err| switch (err) {
+ error.IncrFailed => try createFile(allocator, file, options),
+ else => |e| return e,
+ };
+
+ return &coff_file.base;
+}
+
+/// Returns error.IncrFailed if incremental update could not be performed.
+fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff {
+ switch (options.output_mode) {
+ .Exe => {},
+ .Obj => return error.IncrFailed,
+ .Lib => return error.IncrFailed,
+ }
+ var self: Coff = .{
+ .base = .{
+ .file = file,
+ .tag = .coff,
+ .options = options,
+ .allocator = allocator,
+ },
+ .ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
+ 32 => .p32,
+ 64 => .p64,
+ else => return error.UnsupportedELFArchitecture,
+ },
+ };
+ errdefer self.deinit();
+
+ // TODO implement reading the PE/COFF file
+ return error.IncrFailed;
+}
+
+/// Truncates the existing file contents and overwrites the contents.
+/// Returns an error if `file` is not already open with +read +write +seek abilities.
+fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff {
+ // TODO Write object specific relocations, COFF symbol table, then enable object file output.
+ switch (options.output_mode) {
+ .Exe => {},
+ .Obj => return error.TODOImplementWritingObjFiles,
+ .Lib => return error.TODOImplementWritingLibFiles,
+ }
+ var self: Coff = .{
+ .base = .{
+ .tag = .coff,
+ .options = options,
+ .allocator = allocator,
+ .file = file,
+ },
+ .ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
+ 32 => .p32,
+ 64 => .p64,
+ else => return error.UnsupportedCOFFArchitecture,
+ },
+ };
+ errdefer self.deinit();
+
+ var coff_file_header_offset: u32 = 0;
+ if (options.output_mode == .Exe) {
+ // Write the MS-DOS stub and the PE signature
+ try self.base.file.?.pwriteAll(msdos_stub ++ "PE\x00\x00", 0);
+ coff_file_header_offset = msdos_stub.len + 4;
+ }
+
+ // COFF file header
+ const data_directory_count = 0;
+ var hdr_data: [112 + data_directory_count * 8 + section_table_size]u8 = undefined;
+ var index: usize = 0;
+
+ const machine = self.base.options.target.cpu.arch.toCoffMachine();
+ if (machine == .Unknown) {
+ return error.UnsupportedCOFFArchitecture;
+ }
+ std.mem.writeIntLittle(u16, hdr_data[0..2], @enumToInt(machine));
+ index += 2;
+
+ // Number of sections (we only use .got, .text)
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 2);
+ index += 2;
+ // TimeDateStamp (u32), PointerToSymbolTable (u32), NumberOfSymbols (u32)
+ std.mem.set(u8, hdr_data[index..][0..12], 0);
+ index += 12;
+
+ const optional_header_size = switch (options.output_mode) {
+ .Exe => data_directory_count * 8 + switch (self.ptr_width) {
+ .p32 => @as(u16, 96),
+ .p64 => 112,
+ },
+ else => 0,
+ };
+
+ const section_table_offset = coff_file_header_offset + 20 + optional_header_size;
+ const default_offset_table_size = file_alignment;
+ const default_size_of_code = 0;
+
+ self.section_data_offset = std.mem.alignForwardGeneric(u32, self.section_table_offset + section_table_size, file_alignment);
+ const section_data_relative_virtual_address = std.mem.alignForwardGeneric(u32, self.section_table_offset + section_table_size, section_alignment);
+ self.offset_table_virtual_address = image_base + section_data_relative_virtual_address;
+ self.offset_table_size = default_offset_table_size;
+ self.section_table_offset = section_table_offset;
+ self.text_section_virtual_address = image_base + section_data_relative_virtual_address + section_alignment;
+ self.text_section_size = default_size_of_code;
+
+ // Size of file when loaded in memory
+ const size_of_image = std.mem.alignForwardGeneric(u32, self.text_section_virtual_address - image_base + default_size_of_code, section_alignment);
+
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], optional_header_size);
+ index += 2;
+
+ // Characteristics
+ var characteristics: u16 = std.coff.IMAGE_FILE_DEBUG_STRIPPED | std.coff.IMAGE_FILE_RELOCS_STRIPPED; // TODO Remove debug info stripped flag when necessary
+ if (options.output_mode == .Exe) {
+ characteristics |= std.coff.IMAGE_FILE_EXECUTABLE_IMAGE;
+ }
+ switch (self.ptr_width) {
+ .p32 => characteristics |= std.coff.IMAGE_FILE_32BIT_MACHINE,
+ .p64 => characteristics |= std.coff.IMAGE_FILE_LARGE_ADDRESS_AWARE,
+ }
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], characteristics);
+ index += 2;
+
+ assert(index == 20);
+ try self.base.file.?.pwriteAll(hdr_data[0..index], coff_file_header_offset);
+
+ if (options.output_mode == .Exe) {
+ self.optional_header_offset = coff_file_header_offset + 20;
+ // Optional header
+ index = 0;
+ std.mem.writeIntLittle(u16, hdr_data[0..2], switch (self.ptr_width) {
+ .p32 => @as(u16, 0x10b),
+ .p64 => 0x20b,
+ });
+ index += 2;
+
+ // Linker version (u8 + u8)
+ std.mem.set(u8, hdr_data[index..][0..2], 0);
+ index += 2;
+
+ // SizeOfCode (UNUSED, u32), SizeOfInitializedData (u32), SizeOfUninitializedData (u32), AddressOfEntryPoint (u32), BaseOfCode (UNUSED, u32)
+ std.mem.set(u8, hdr_data[index..][0..20], 0);
+ index += 20;
+
+ if (self.ptr_width == .p32) {
+ // Base of data relative to the image base (UNUSED)
+ std.mem.set(u8, hdr_data[index..][0..4], 0);
+ index += 4;
+
+ // Image base address
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], image_base);
+ index += 4;
+ } else {
+ // Image base address
+ std.mem.writeIntLittle(u64, hdr_data[index..][0..8], image_base);
+ index += 8;
+ }
+
+ // Section alignment
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], section_alignment);
+ index += 4;
+ // File alignment
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], file_alignment);
+ index += 4;
+ // Required OS version, 6.0 is vista
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 6);
+ index += 2;
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0);
+ index += 2;
+ // Image version
+ std.mem.set(u8, hdr_data[index..][0..4], 0);
+ index += 4;
+ // Required subsystem version, same as OS version
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 6);
+ index += 2;
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0);
+ index += 2;
+ // Reserved zeroes (u32)
+ std.mem.set(u8, hdr_data[index..][0..4], 0);
+ index += 4;
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], size_of_image);
+ index += 4;
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset);
+ index += 4;
+ // CheckSum (u32)
+ std.mem.set(u8, hdr_data[index..][0..4], 0);
+ index += 4;
+ // Subsystem, TODO: Let users specify the subsystem, always CUI for now
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 3);
+ index += 2;
+ // DLL characteristics
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0x0);
+ index += 2;
+
+ switch (self.ptr_width) {
+ .p32 => {
+ // Size of stack reserve + commit
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000_000);
+ index += 4;
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000);
+ index += 4;
+ // Size of heap reserve + commit
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x100_000);
+ index += 4;
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000);
+ index += 4;
+ },
+ .p64 => {
+ // Size of stack reserve + commit
+ std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000_000);
+ index += 8;
+ std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000);
+ index += 8;
+ // Size of heap reserve + commit
+ std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x100_000);
+ index += 8;
+ std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000);
+ index += 8;
+ },
+ }
+
+ // Reserved zeroes
+ std.mem.set(u8, hdr_data[index..][0..4], 0);
+ index += 4;
+
+ // Number of data directories
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], data_directory_count);
+ index += 4;
+ // Initialize data directories to zero
+ std.mem.set(u8, hdr_data[index..][0 .. data_directory_count * 8], 0);
+ index += data_directory_count * 8;
+
+ assert(index == optional_header_size);
+ }
+
+ // Write section table.
+ // First, the .got section
+ hdr_data[index..][0..8].* = ".got\x00\x00\x00\x00".*;
+ index += 8;
+ if (options.output_mode == .Exe) {
+ // Virtual size (u32)
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_offset_table_size);
+ index += 4;
+ // Virtual address (u32)
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.offset_table_virtual_address - image_base);
+ index += 4;
+ } else {
+ std.mem.set(u8, hdr_data[index..][0..8], 0);
+ index += 8;
+ }
+ // Size of raw data (u32)
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_offset_table_size);
+ index += 4;
+ // File pointer to the start of the section
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset);
+ index += 4;
+ // Pointer to relocations (u32), PointerToLinenumbers (u32), NumberOfRelocations (u16), NumberOfLinenumbers (u16)
+ std.mem.set(u8, hdr_data[index..][0..12], 0);
+ index += 12;
+ // Section flags
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], std.coff.IMAGE_SCN_CNT_INITIALIZED_DATA | std.coff.IMAGE_SCN_MEM_READ);
+ index += 4;
+ // Then, the .text section
+ hdr_data[index..][0..8].* = ".text\x00\x00\x00".*;
+ index += 8;
+ if (options.output_mode == .Exe) {
+ // Virtual size (u32)
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_size_of_code);
+ index += 4;
+ // Virtual address (u32)
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.text_section_virtual_address - image_base);
+ index += 4;
+ } else {
+ std.mem.set(u8, hdr_data[index..][0..8], 0);
+ index += 8;
+ }
+ // Size of raw data (u32)
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_size_of_code);
+ index += 4;
+ // File pointer to the start of the section
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset + default_offset_table_size);
+ index += 4;
+ // Pointer to relocations (u32), PointerToLinenumbers (u32), NumberOfRelocations (u16), NumberOfLinenumbers (u16)
+ std.mem.set(u8, hdr_data[index..][0..12], 0);
+ index += 12;
+ // Section flags
+ std.mem.writeIntLittle(
+ u32,
+ hdr_data[index..][0..4],
+ std.coff.IMAGE_SCN_CNT_CODE | std.coff.IMAGE_SCN_MEM_EXECUTE | std.coff.IMAGE_SCN_MEM_READ | std.coff.IMAGE_SCN_MEM_WRITE,
+ );
+ index += 4;
+
+ assert(index == optional_header_size + section_table_size);
+ try self.base.file.?.pwriteAll(hdr_data[0..index], self.optional_header_offset);
+ try self.base.file.?.setEndPos(self.section_data_offset + default_offset_table_size + default_size_of_code);
+
+ return self;
+}
+
+pub fn allocateDeclIndexes(self: *Coff, decl: *Module.Decl) !void {
+ try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
+
+ if (self.offset_table_free_list.popOrNull()) |i| {
+ decl.link.coff.offset_table_index = i;
+ } else {
+ decl.link.coff.offset_table_index = @intCast(u32, self.offset_table.items.len);
+ _ = self.offset_table.addOneAssumeCapacity();
+
+ const entry_size = self.base.options.target.cpu.arch.ptrBitWidth() / 8;
+ if (self.offset_table.items.len > self.offset_table_size / entry_size) {
+ self.offset_table_size_dirty = true;
+ }
+ }
+
+ self.offset_table.items[decl.link.coff.offset_table_index] = 0;
+}
+
+fn allocateTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
+ const new_block_min_capacity = new_block_size * allocation_padding;
+
+ // We use these to indicate our intention to update metadata, placing the new block,
+ // and possibly removing a free list node.
+ // It would be simpler to do it inside the for loop below, but that would cause a
+ // problem if an error was returned later in the function. So this action
+ // is actually carried out at the end of the function, when errors are no longer possible.
+ var block_placement: ?*TextBlock = null;
+ var free_list_removal: ?usize = null;
+
+ const vaddr = blk: {
+ var i: usize = 0;
+ while (i < self.text_block_free_list.items.len) {
+ const free_block = self.text_block_free_list.items[i];
+
+ const next_block_text_offset = free_block.text_offset + free_block.capacity();
+ const new_block_text_offset = std.mem.alignForwardGeneric(u64, free_block.getVAddr(self.*) + free_block.size, alignment) - self.text_section_virtual_address;
+ if (new_block_text_offset < next_block_text_offset and next_block_text_offset - new_block_text_offset >= new_block_min_capacity) {
+ block_placement = free_block;
+
+ const remaining_capacity = next_block_text_offset - new_block_text_offset - new_block_min_capacity;
+ if (remaining_capacity < minimum_text_block_size) {
+ free_list_removal = i;
+ }
+
+ break :blk new_block_text_offset + self.text_section_virtual_address;
+ } else {
+ if (!free_block.freeListEligible()) {
+ _ = self.text_block_free_list.swapRemove(i);
+ } else {
+ i += 1;
+ }
+ continue;
+ }
+ } else if (self.last_text_block) |last| {
+ const new_block_vaddr = std.mem.alignForwardGeneric(u64, last.getVAddr(self.*) + last.size, alignment);
+ block_placement = last;
+ break :blk new_block_vaddr;
+ } else {
+ break :blk self.text_section_virtual_address;
+ }
+ };
+
+ const expand_text_section = block_placement == null or block_placement.?.next == null;
+ if (expand_text_section) {
+ const needed_size = @intCast(u32, std.mem.alignForwardGeneric(u64, vaddr + new_block_size - self.text_section_virtual_address, file_alignment));
+ if (needed_size > self.text_section_size) {
+ const current_text_section_virtual_size = std.mem.alignForwardGeneric(u32, self.text_section_size, section_alignment);
+ const new_text_section_virtual_size = std.mem.alignForwardGeneric(u32, needed_size, section_alignment);
+ if (current_text_section_virtual_size != new_text_section_virtual_size) {
+ self.size_of_image_dirty = true;
+ // Write new virtual size
+ var buf: [4]u8 = undefined;
+ std.mem.writeIntLittle(u32, &buf, new_text_section_virtual_size);
+ try self.base.file.?.pwriteAll(&buf, self.section_table_offset + 40 + 8);
+ }
+
+ self.text_section_size = needed_size;
+ self.text_section_size_dirty = true;
+ }
+ self.last_text_block = text_block;
+ }
+ text_block.text_offset = @intCast(u32, vaddr - self.text_section_virtual_address);
+ text_block.size = @intCast(u32, new_block_size);
+
+ // This function can also reallocate a text block.
+ // In this case we need to "unplug" it from its previous location before
+ // plugging it in to its new location.
+ if (text_block.prev) |prev| {
+ prev.next = text_block.next;
+ }
+ if (text_block.next) |next| {
+ next.prev = text_block.prev;
+ }
+
+ if (block_placement) |big_block| {
+ text_block.prev = big_block;
+ text_block.next = big_block.next;
+ big_block.next = text_block;
+ } else {
+ text_block.prev = null;
+ text_block.next = null;
+ }
+ if (free_list_removal) |i| {
+ _ = self.text_block_free_list.swapRemove(i);
+ }
+ return vaddr;
+}
+
+fn growTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
+ const block_vaddr = text_block.getVAddr(self.*);
+ const align_ok = std.mem.alignBackwardGeneric(u64, block_vaddr, alignment) == block_vaddr;
+ const need_realloc = !align_ok or new_block_size > text_block.capacity();
+ if (!need_realloc) return @as(u64, block_vaddr);
+ return self.allocateTextBlock(text_block, new_block_size, alignment);
+}
+
+fn shrinkTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64) void {
+ text_block.size = @intCast(u32, new_block_size);
+ if (text_block.capacity() - text_block.size >= minimum_text_block_size) {
+ self.text_block_free_list.append(self.base.allocator, text_block) catch {};
+ }
+}
+
+fn freeTextBlock(self: *Coff, text_block: *TextBlock) void {
+ var already_have_free_list_node = false;
+ {
+ var i: usize = 0;
+ // TODO turn text_block_free_list into a hash map
+ while (i < self.text_block_free_list.items.len) {
+ if (self.text_block_free_list.items[i] == text_block) {
+ _ = self.text_block_free_list.swapRemove(i);
+ continue;
+ }
+ if (self.text_block_free_list.items[i] == text_block.prev) {
+ already_have_free_list_node = true;
+ }
+ i += 1;
+ }
+ }
+ if (self.last_text_block == text_block) {
+ self.last_text_block = text_block.prev;
+ }
+ if (text_block.prev) |prev| {
+ prev.next = text_block.next;
+
+ if (!already_have_free_list_node and prev.freeListEligible()) {
+ // The free list is heuristics, it doesn't have to be perfect, so we can
+ // ignore the OOM here.
+ self.text_block_free_list.append(self.base.allocator, prev) catch {};
+ }
+ }
+
+ if (text_block.next) |next| {
+ next.prev = text_block.prev;
+ }
+}
+
+fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
+ const entry_size = self.base.options.target.cpu.arch.ptrBitWidth() / 8;
+ const endian = self.base.options.target.cpu.arch.endian();
+
+ const offset_table_start = self.section_data_offset;
+ if (self.offset_table_size_dirty) {
+ const current_raw_size = self.offset_table_size;
+ const new_raw_size = self.offset_table_size * 2;
+ log.debug("growing offset table from raw size {} to {}\n", .{ current_raw_size, new_raw_size });
+
+ // Move the text section to a new place in the executable
+ const current_text_section_start = self.section_data_offset + current_raw_size;
+ const new_text_section_start = self.section_data_offset + new_raw_size;
+
+ const amt = try self.base.file.?.copyRangeAll(current_text_section_start, self.base.file.?, new_text_section_start, self.text_section_size);
+ if (amt != self.text_section_size) return error.InputOutput;
+
+ // Write the new raw size in the .got header
+ var buf: [8]u8 = undefined;
+ std.mem.writeIntLittle(u32, buf[0..4], new_raw_size);
+ try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 16);
+ // Write the new .text section file offset in the .text section header
+ std.mem.writeIntLittle(u32, buf[0..4], new_text_section_start);
+ try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 40 + 20);
+
+ const current_virtual_size = std.mem.alignForwardGeneric(u32, self.offset_table_size, section_alignment);
+ const new_virtual_size = std.mem.alignForwardGeneric(u32, new_raw_size, section_alignment);
+ // If we had to move in the virtual address space, we need to fix the VAs in the offset table, as well as the virtual address of the `.text` section
+ // and the virutal size of the `.got` section
+
+ if (new_virtual_size != current_virtual_size) {
+ log.debug("growing offset table from virtual size {} to {}\n", .{ current_virtual_size, new_virtual_size });
+ self.size_of_image_dirty = true;
+ const va_offset = new_virtual_size - current_virtual_size;
+
+ // Write .got virtual size
+ std.mem.writeIntLittle(u32, buf[0..4], new_virtual_size);
+ try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 8);
+
+ // Write .text new virtual address
+ self.text_section_virtual_address = self.text_section_virtual_address + va_offset;
+ std.mem.writeIntLittle(u32, buf[0..4], self.text_section_virtual_address - image_base);
+ try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 40 + 12);
+
+ // Fix the VAs in the offset table
+ for (self.offset_table.items) |*va, idx| {
+ if (va.* != 0) {
+ va.* += va_offset;
+
+ switch (entry_size) {
+ 4 => {
+ std.mem.writeInt(u32, buf[0..4], @intCast(u32, va.*), endian);
+ try self.base.file.?.pwriteAll(buf[0..4], offset_table_start + idx * entry_size);
+ },
+ 8 => {
+ std.mem.writeInt(u64, &buf, va.*, endian);
+ try self.base.file.?.pwriteAll(&buf, offset_table_start + idx * entry_size);
+ },
+ else => unreachable,
+ }
+ }
+ }
+ }
+ self.offset_table_size = new_raw_size;
+ self.offset_table_size_dirty = false;
+ }
+ // Write the new entry
+ switch (entry_size) {
+ 4 => {
+ var buf: [4]u8 = undefined;
+ std.mem.writeInt(u32, &buf, @intCast(u32, self.offset_table.items[index]), endian);
+ try self.base.file.?.pwriteAll(&buf, offset_table_start + index * entry_size);
+ },
+ 8 => {
+ var buf: [8]u8 = undefined;
+ std.mem.writeInt(u64, &buf, self.offset_table.items[index], endian);
+ try self.base.file.?.pwriteAll(&buf, offset_table_start + index * entry_size);
+ },
+ else => unreachable,
+ }
+}
+
+pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
+ // TODO COFF/PE debug information
+ // TODO Implement exports
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer code_buffer.deinit();
+
+ const typed_value = decl.typed_value.most_recent.typed_value;
+ const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .none);
+ const code = switch (res) {
+ .externally_managed => |x| x,
+ .appended => code_buffer.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, decl, em);
+ return;
+ },
+ };
+
+ const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
+ const curr_size = decl.link.coff.size;
+ if (curr_size != 0) {
+ const capacity = decl.link.coff.capacity();
+ const need_realloc = code.len > capacity or
+ !std.mem.isAlignedGeneric(u32, decl.link.coff.text_offset, required_alignment);
+ if (need_realloc) {
+ const curr_vaddr = self.getDeclVAddr(decl);
+ const vaddr = try self.growTextBlock(&decl.link.coff, code.len, required_alignment);
+ log.debug("growing {} from 0x{x} to 0x{x}\n", .{ decl.name, curr_vaddr, vaddr });
+ if (vaddr != curr_vaddr) {
+ log.debug(" (writing new offset table entry)\n", .{});
+ self.offset_table.items[decl.link.coff.offset_table_index] = vaddr;
+ try self.writeOffsetTableEntry(decl.link.coff.offset_table_index);
+ }
+ } else if (code.len < curr_size) {
+ self.shrinkTextBlock(&decl.link.coff, code.len);
+ }
+ } else {
+ const vaddr = try self.allocateTextBlock(&decl.link.coff, code.len, required_alignment);
+ log.debug("allocated text block for {} at 0x{x} (size: {Bi})\n", .{ std.mem.spanZ(decl.name), vaddr, code.len });
+ errdefer self.freeTextBlock(&decl.link.coff);
+ self.offset_table.items[decl.link.coff.offset_table_index] = vaddr;
+ try self.writeOffsetTableEntry(decl.link.coff.offset_table_index);
+ }
+
+ // Write the code into the file
+ try self.base.file.?.pwriteAll(code, self.section_data_offset + self.offset_table_size + decl.link.coff.text_offset);
+
+ // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
+ const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
+ return self.updateDeclExports(module, decl, decl_exports);
+}
+
+pub fn freeDecl(self: *Coff, decl: *Module.Decl) void {
+ // Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
+ self.freeTextBlock(&decl.link.coff);
+ self.offset_table_free_list.append(self.base.allocator, decl.link.coff.offset_table_index) catch {};
+}
+
+pub fn updateDeclExports(self: *Coff, module: *Module, decl: *const Module.Decl, exports: []const *Module.Export) !void {
+ for (exports) |exp| {
+ if (exp.options.section) |section_name| {
+ if (!std.mem.eql(u8, section_name, ".text")) {
+ try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
+ module.failed_exports.putAssumeCapacityNoClobber(
+ exp,
+ try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
+ );
+ continue;
+ }
+ }
+ if (std.mem.eql(u8, exp.options.name, "_start")) {
+ self.entry_addr = decl.link.coff.getVAddr(self.*) - image_base;
+ } else {
+ try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
+ module.failed_exports.putAssumeCapacityNoClobber(
+ exp,
+ try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: Exports other than '_start'", .{}),
+ );
+ continue;
+ }
+ }
+}
+
+pub fn flush(self: *Coff, module: *Module) !void {
+ if (self.text_section_size_dirty) {
+ // Write the new raw size in the .text header
+ var buf: [4]u8 = undefined;
+ std.mem.writeIntLittle(u32, &buf, self.text_section_size);
+ try self.base.file.?.pwriteAll(&buf, self.section_table_offset + 40 + 16);
+ try self.base.file.?.setEndPos(self.section_data_offset + self.offset_table_size + self.text_section_size);
+ self.text_section_size_dirty = false;
+ }
+
+ if (self.base.options.output_mode == .Exe and self.size_of_image_dirty) {
+ const new_size_of_image = std.mem.alignForwardGeneric(u32, self.text_section_virtual_address - image_base + self.text_section_size, section_alignment);
+ var buf: [4]u8 = undefined;
+ std.mem.writeIntLittle(u32, &buf, new_size_of_image);
+ try self.base.file.?.pwriteAll(&buf, self.optional_header_offset + 56);
+ self.size_of_image_dirty = false;
+ }
+
+ if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
+ log.debug("flushing. no_entry_point_found = true\n", .{});
+ self.error_flags.no_entry_point_found = true;
+ } else {
+ log.debug("flushing. no_entry_point_found = false\n", .{});
+ self.error_flags.no_entry_point_found = false;
+
+ if (self.base.options.output_mode == .Exe) {
+ // Write AddressOfEntryPoint
+ var buf: [4]u8 = undefined;
+ std.mem.writeIntLittle(u32, &buf, self.entry_addr.?);
+ try self.base.file.?.pwriteAll(&buf, self.optional_header_offset + 16);
+ }
+ }
+}
+
+pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl) u64 {
+ return self.text_section_virtual_address + decl.link.coff.text_offset;
+}
+
+pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void {
+ // TODO Implement this
+}
+
+pub fn deinit(self: *Coff) void {
+ self.text_block_free_list.deinit(self.base.allocator);
+ self.offset_table.deinit(self.base.allocator);
+ self.offset_table_free_list.deinit(self.base.allocator);
+}
diff --git a/src-self-hosted/link/Elf.zig b/src-self-hosted/link/Elf.zig
index 451160630a..e5acde947c 100644
--- a/src-self-hosted/link/Elf.zig
+++ b/src-self-hosted/link/Elf.zig
@@ -1735,7 +1735,13 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
} else {
// TODO implement .debug_info for global variables
}
- const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, &dbg_line_buffer, &dbg_info_buffer, &dbg_info_type_relocs);
+ const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .{
+ .dwarf = .{
+ .dbg_line = &dbg_line_buffer,
+ .dbg_info = &dbg_info_buffer,
+ .dbg_info_type_relocs = &dbg_info_type_relocs,
+ },
+ });
const code = switch (res) {
.externally_managed => |x| x,
.appended => code_buffer.items,
diff --git a/src-self-hosted/link/MachO.zig b/src-self-hosted/link/MachO.zig
index 93d7b2381a..27d0488f25 100644
--- a/src-self-hosted/link/MachO.zig
+++ b/src-self-hosted/link/MachO.zig
@@ -316,31 +316,8 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator);
- defer dbg_line_buffer.deinit();
-
- var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator);
- defer dbg_info_buffer.deinit();
-
- var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{};
- defer {
- var it = dbg_info_type_relocs.iterator();
- while (it.next()) |entry| {
- entry.value.relocs.deinit(self.base.allocator);
- }
- dbg_info_type_relocs.deinit(self.base.allocator);
- }
-
const typed_value = decl.typed_value.most_recent.typed_value;
- const res = try codegen.generateSymbol(
- &self.base,
- decl.src(),
- typed_value,
- &code_buffer,
- &dbg_line_buffer,
- &dbg_info_buffer,
- &dbg_info_type_relocs,
- );
+ const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .none);
const code = switch (res) {
.externally_managed => |x| x,
diff --git a/src-self-hosted/link/msdos-stub.bin b/src-self-hosted/link/msdos-stub.bin
new file mode 100644
index 0000000000..96ad91198f
--- /dev/null
+++ b/src-self-hosted/link/msdos-stub.bin
Binary files differ
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 8c5c034238..fb20a09f1d 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -153,8 +153,8 @@ const usage_build_generic =
\\ elf Executable and Linking Format
\\ c Compile to C source code
\\ wasm WebAssembly
+ \\ pe Portable Executable (Windows)
\\ coff (planned) Common Object File Format (Windows)
- \\ pe (planned) Portable Executable (Windows)
\\ macho (planned) macOS relocatables
\\ hex (planned) Intel IHEX
\\ raw (planned) Dump machine code directly
@@ -451,7 +451,7 @@ fn buildOutputType(
} else if (mem.eql(u8, ofmt, "coff")) {
break :blk .coff;
} else if (mem.eql(u8, ofmt, "pe")) {
- break :blk .coff;
+ break :blk .pe;
} else if (mem.eql(u8, ofmt, "macho")) {
break :blk .macho;
} else if (mem.eql(u8, ofmt, "wasm")) {
@@ -524,17 +524,19 @@ fn buildOutputType(
try stderr.print("\nUnable to parse command: {}\n", .{@errorName(err)});
continue;
}) |line| {
- if (mem.eql(u8, line, "update")) {
+ const actual_line = mem.trimRight(u8, line, "\r\n ");
+
+ if (mem.eql(u8, actual_line, "update")) {
if (output_mode == .Exe) {
try module.makeBinFileWritable();
}
try updateModule(gpa, &module, zir_out_path);
- } else if (mem.eql(u8, line, "exit")) {
+ } else if (mem.eql(u8, actual_line, "exit")) {
break;
- } else if (mem.eql(u8, line, "help")) {
+ } else if (mem.eql(u8, actual_line, "help")) {
try stderr.writeAll(repl_help);
} else {
- try stderr.print("unknown command: {}\n", .{line});
+ try stderr.print("unknown command: {}\n", .{actual_line});
}
} else {
break;
@@ -742,6 +744,7 @@ const FmtError = error{
LinkQuotaExceeded,
FileBusy,
EndOfStream,
+ Unseekable,
NotOpenForWriting,
} || fs.File.OpenError;
@@ -805,7 +808,13 @@ fn fmtPathFile(
if (stat.kind == .Directory)
return error.IsDir;
- const source_code = source_file.readAllAlloc(fmt.gpa, stat.size, max_src_size) catch |err| switch (err) {
+ const source_code = source_file.readToEndAllocOptions(
+ fmt.gpa,
+ max_src_size,
+ stat.size,
+ @alignOf(u8),
+ null,
+ ) catch |err| switch (err) {
error.ConnectionResetByPeer => unreachable,
error.ConnectionTimedOut => unreachable,
error.NotOpenForReading => unreachable,
diff --git a/src-self-hosted/stage2.zig b/src-self-hosted/stage2.zig
index 30d2ea44db..45b8ad3073 100644
--- a/src-self-hosted/stage2.zig
+++ b/src-self-hosted/stage2.zig
@@ -615,7 +615,6 @@ export fn stage2_libc_parse(stage1_libc: *Stage2LibCInstallation, libc_file_z: [
error.NotOpenForWriting => unreachable,
error.NotOpenForReading => unreachable,
error.Unexpected => return .Unexpected,
- error.EndOfStream => return .EndOfFile,
error.IsDir => return .IsDir,
error.ConnectionResetByPeer => unreachable,
error.ConnectionTimedOut => unreachable,
diff --git a/src/analyze.cpp b/src/analyze.cpp
index acdbf3e933..b1d362f6e9 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -1810,7 +1810,7 @@ Error type_allowed_in_extern(CodeGen *g, ZigType *type_entry, bool *result) {
ZigType *get_auto_err_set_type(CodeGen *g, ZigFn *fn_entry) {
ZigType *err_set_type = new_type_table_entry(ZigTypeIdErrorSet);
buf_resize(&err_set_type->name, 0);
- buf_appendf(&err_set_type->name, "@TypeOf(%s).ReturnType.ErrorSet", buf_ptr(&fn_entry->symbol_name));
+ buf_appendf(&err_set_type->name, "@typeInfo(@typeInfo(@TypeOf(%s)).Fn.return_type.?).ErrorUnion.error_set", buf_ptr(&fn_entry->symbol_name));
err_set_type->data.error_set.err_count = 0;
err_set_type->data.error_set.errors = nullptr;
err_set_type->data.error_set.infer_fn = fn_entry;
diff --git a/src/ir.cpp b/src/ir.cpp
index 63b979ffbc..5fe9dfa0ce 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -22836,167 +22836,9 @@ static IrInstGen *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstSrcFiel
bool ptr_is_volatile = false;
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, const_val,
err_set_type, ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
- } else if (child_type->id == ZigTypeIdInt) {
- if (buf_eql_str(field_name, "bit_count")) {
- bool ptr_is_const = true;
- bool ptr_is_volatile = false;
- return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
- create_const_unsigned_negative(ira->codegen, ira->codegen->builtin_types.entry_num_lit_int,
- child_type->data.integral.bit_count, false),
- ira->codegen->builtin_types.entry_num_lit_int,
- ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
- } else if (buf_eql_str(field_name, "is_signed")) {
- bool ptr_is_const = true;
- bool ptr_is_volatile = false;
- return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
- create_const_bool(ira->codegen, child_type->data.integral.is_signed),
- ira->codegen->builtin_types.entry_bool,
- ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
- } else {
- ir_add_error(ira, &field_ptr_instruction->base.base,
- buf_sprintf("type '%s' has no member called '%s'",
- buf_ptr(&child_type->name), buf_ptr(field_name)));
- return ira->codegen->invalid_inst_gen;
- }
- } else if (child_type->id == ZigTypeIdFloat) {
- if (buf_eql_str(field_name, "bit_count")) {
- bool ptr_is_const = true;
- bool ptr_is_volatile = false;
- return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
- create_const_unsigned_negative(ira->codegen, ira->codegen->builtin_types.entry_num_lit_int,
- child_type->data.floating.bit_count, false),
- ira->codegen->builtin_types.entry_num_lit_int,
- ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
- } else {
- ir_add_error(ira, &field_ptr_instruction->base.base,
- buf_sprintf("type '%s' has no member called '%s'",
- buf_ptr(&child_type->name), buf_ptr(field_name)));
- return ira->codegen->invalid_inst_gen;
- }
- } else if (child_type->id == ZigTypeIdPointer) {
- if (buf_eql_str(field_name, "Child")) {
- bool ptr_is_const = true;
- bool ptr_is_volatile = false;
- return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
- create_const_type(ira->codegen, child_type->data.pointer.child_type),
- ira->codegen->builtin_types.entry_type,
- ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
- } else if (buf_eql_str(field_name, "alignment")) {
- bool ptr_is_const = true;
- bool ptr_is_volatile = false;
- if ((err = type_resolve(ira->codegen, child_type->data.pointer.child_type,
- ResolveStatusAlignmentKnown)))
- {
- return ira->codegen->invalid_inst_gen;
- }
- return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
- create_const_unsigned_negative(ira->codegen, ira->codegen->builtin_types.entry_num_lit_int,
- get_ptr_align(ira->codegen, child_type), false),
- ira->codegen->builtin_types.entry_num_lit_int,
- ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
- } else {
- ir_add_error(ira, &field_ptr_instruction->base.base,
- buf_sprintf("type '%s' has no member called '%s'",
- buf_ptr(&child_type->name), buf_ptr(field_name)));
- return ira->codegen->invalid_inst_gen;
- }
- } else if (child_type->id == ZigTypeIdArray) {
- if (buf_eql_str(field_name, "Child")) {
- bool ptr_is_const = true;
- bool ptr_is_volatile = false;
- return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
- create_const_type(ira->codegen, child_type->data.array.child_type),
- ira->codegen->builtin_types.entry_type,
- ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
- } else if (buf_eql_str(field_name, "len")) {
- bool ptr_is_const = true;
- bool ptr_is_volatile = false;
- return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
- create_const_unsigned_negative(ira->codegen, ira->codegen->builtin_types.entry_num_lit_int,
- child_type->data.array.len, false),
- ira->codegen->builtin_types.entry_num_lit_int,
- ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
- } else {
- ir_add_error(ira, &field_ptr_instruction->base.base,
- buf_sprintf("type '%s' has no member called '%s'",
- buf_ptr(&child_type->name), buf_ptr(field_name)));
- return ira->codegen->invalid_inst_gen;
- }
- } else if (child_type->id == ZigTypeIdErrorUnion) {
- if (buf_eql_str(field_name, "Payload")) {
- bool ptr_is_const = true;
- bool ptr_is_volatile = false;
- return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
- create_const_type(ira->codegen, child_type->data.error_union.payload_type),
- ira->codegen->builtin_types.entry_type,
- ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
- } else if (buf_eql_str(field_name, "ErrorSet")) {
- bool ptr_is_const = true;
- bool ptr_is_volatile = false;
- return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
- create_const_type(ira->codegen, child_type->data.error_union.err_set_type),
- ira->codegen->builtin_types.entry_type,
- ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
- } else {
- ir_add_error(ira, &field_ptr_instruction->base.base,
- buf_sprintf("type '%s' has no member called '%s'",
- buf_ptr(&child_type->name), buf_ptr(field_name)));
- return ira->codegen->invalid_inst_gen;
- }
- } else if (child_type->id == ZigTypeIdOptional) {
- if (buf_eql_str(field_name, "Child")) {
- bool ptr_is_const = true;
- bool ptr_is_volatile = false;
- return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
- create_const_type(ira->codegen, child_type->data.maybe.child_type),
- ira->codegen->builtin_types.entry_type,
- ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
- } else {
- ir_add_error(ira, &field_ptr_instruction->base.base,
- buf_sprintf("type '%s' has no member called '%s'",
- buf_ptr(&child_type->name), buf_ptr(field_name)));
- return ira->codegen->invalid_inst_gen;
- }
- } else if (child_type->id == ZigTypeIdFn) {
- if (buf_eql_str(field_name, "ReturnType")) {
- if (child_type->data.fn.fn_type_id.return_type == nullptr) {
- // Return type can only ever be null, if the function is generic
- assert(child_type->data.fn.is_generic);
-
- ir_add_error(ira, &field_ptr_instruction->base.base,
- buf_sprintf("ReturnType has not been resolved because '%s' is generic", buf_ptr(&child_type->name)));
- return ira->codegen->invalid_inst_gen;
- }
-
- bool ptr_is_const = true;
- bool ptr_is_volatile = false;
- return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
- create_const_type(ira->codegen, child_type->data.fn.fn_type_id.return_type),
- ira->codegen->builtin_types.entry_type,
- ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
- } else if (buf_eql_str(field_name, "is_var_args")) {
- bool ptr_is_const = true;
- bool ptr_is_volatile = false;
- return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
- create_const_bool(ira->codegen, child_type->data.fn.fn_type_id.is_var_args),
- ira->codegen->builtin_types.entry_bool,
- ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
- } else if (buf_eql_str(field_name, "arg_count")) {
- bool ptr_is_const = true;
- bool ptr_is_volatile = false;
- return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
- create_const_usize(ira->codegen, child_type->data.fn.fn_type_id.param_count),
- ira->codegen->builtin_types.entry_usize,
- ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
- } else {
- ir_add_error(ira, &field_ptr_instruction->base.base,
- buf_sprintf("type '%s' has no member called '%s'",
- buf_ptr(&child_type->name), buf_ptr(field_name)));
- return ira->codegen->invalid_inst_gen;
- }
} else {
ir_add_error(ira, &field_ptr_instruction->base.base,
- buf_sprintf("type '%s' does not support field access", buf_ptr(&child_type->name)));
+ buf_sprintf("type '%s' does not support field access", buf_ptr(&container_type->name)));
return ira->codegen->invalid_inst_gen;
}
} else if (field_ptr_instruction->initializing) {
@@ -26753,7 +26595,7 @@ static IrInstGen *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstSrcCmpxch
if (operand_type->id == ZigTypeIdFloat) {
ir_add_error(ira, &instruction->type_value->child->base,
- buf_sprintf("expected integer, enum or pointer type, found '%s'", buf_ptr(&operand_type->name)));
+ buf_sprintf("expected bool, integer, enum or pointer type, found '%s'", buf_ptr(&operand_type->name)));
return ira->codegen->invalid_inst_gen;
}
@@ -30408,7 +30250,7 @@ static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op) {
return ira->codegen->builtin_types.entry_invalid;
if (operand_ptr_type == nullptr) {
ir_add_error(ira, &op->base,
- buf_sprintf("expected integer, float, enum or pointer type, found '%s'",
+ buf_sprintf("expected bool, integer, float, enum or pointer type, found '%s'",
buf_ptr(&operand_type->name)));
return ira->codegen->builtin_types.entry_invalid;
}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index f6e00e1dbb..31f2b57dc8 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -176,11 +176,11 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
, &[_][]const u8{
"tmp.zig:2:17: error: expected type 'u32', found 'error{Ohno}'",
"tmp.zig:1:17: note: function cannot return an error",
- "tmp.zig:8:5: error: expected type 'void', found '@TypeOf(bar).ReturnType.ErrorSet'",
+ "tmp.zig:8:5: error: expected type 'void', found '@typeInfo(@typeInfo(@TypeOf(bar)).Fn.return_type.?).ErrorUnion.error_set'",
"tmp.zig:7:17: note: function cannot return an error",
- "tmp.zig:11:15: error: expected type 'u32', found '@TypeOf(bar).ReturnType.ErrorSet!u32'",
+ "tmp.zig:11:15: error: expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(bar)).Fn.return_type.?).ErrorUnion.error_set!u32'",
"tmp.zig:10:17: note: function cannot return an error",
- "tmp.zig:15:14: error: expected type 'u32', found '@TypeOf(bar).ReturnType.ErrorSet!u32'",
+ "tmp.zig:15:14: error: expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(bar)).Fn.return_type.?).ErrorUnion.error_set!u32'",
"tmp.zig:14:5: note: cannot store an error in type 'u32'",
});
@@ -899,7 +899,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ _ = @cmpxchgWeak(f32, &x, 1, 2, .SeqCst, .SeqCst);
\\}
, &[_][]const u8{
- "tmp.zig:3:22: error: expected integer, enum or pointer type, found 'f32'",
+ "tmp.zig:3:22: error: expected bool, integer, enum or pointer type, found 'f32'",
});
cases.add("atomicrmw with float op not .Xchg, .Add or .Sub",
@@ -1224,7 +1224,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ };
\\}
, &[_][]const u8{
- "tmp.zig:11:25: error: expected type 'u32', found '@TypeOf(get_uval).ReturnType.ErrorSet!u32'",
+ "tmp.zig:11:25: error: expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(get_uval)).Fn.return_type.?).ErrorUnion.error_set!u32'",
});
cases.add("assigning to struct or union fields that are not optionals with a function that returns an optional",
@@ -1929,7 +1929,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ const info = @TypeOf(slice).unknown;
\\}
, &[_][]const u8{
- "tmp.zig:3:32: error: type '[]i32' does not support field access",
+ "tmp.zig:3:32: error: type 'type' does not support field access",
});
cases.add("peer cast then implicit cast const pointer to mutable C pointer",
@@ -3542,7 +3542,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ }
\\}
, &[_][]const u8{
- "tmp.zig:5:14: error: duplicate switch value: '@TypeOf(foo).ReturnType.ErrorSet.Foo'",
+ "tmp.zig:5:14: error: duplicate switch value: '@typeInfo(@typeInfo(@TypeOf(foo)).Fn.return_type.?).ErrorUnion.error_set.Foo'",
"tmp.zig:3:14: note: other value is here",
});
@@ -3674,7 +3674,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ try foo();
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: cannot resolve inferred error set '@TypeOf(foo).ReturnType.ErrorSet': function 'foo' not fully analyzed yet",
+ "tmp.zig:5:5: error: cannot resolve inferred error set '@typeInfo(@typeInfo(@TypeOf(foo)).Fn.return_type.?).ErrorUnion.error_set': function 'foo' not fully analyzed yet",
});
cases.add("implicit cast of error set not a subset",
@@ -7206,15 +7206,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
"tmp.zig:7:24: error: accessing union field 'Bar' while field 'Baz' is set",
});
- cases.add("getting return type of generic function",
- \\fn generic(a: anytype) void {}
- \\comptime {
- \\ _ = @TypeOf(generic).ReturnType;
- \\}
- , &[_][]const u8{
- "tmp.zig:3:25: error: ReturnType has not been resolved because 'fn(anytype) anytype' is generic",
- });
-
cases.add("unsupported modifier at start of asm output constraint",
\\export fn foo() void {
\\ var bar: u32 = 3;
diff --git a/test/stage1/behavior/align.zig b/test/stage1/behavior/align.zig
index 62f439d6df..0a0cc3bcc0 100644
--- a/test/stage1/behavior/align.zig
+++ b/test/stage1/behavior/align.zig
@@ -5,7 +5,7 @@ const builtin = @import("builtin");
var foo: u8 align(4) = 100;
test "global variable alignment" {
- comptime expect(@TypeOf(&foo).alignment == 4);
+ comptime expect(@typeInfo(@TypeOf(&foo)).Pointer.alignment == 4);
comptime expect(@TypeOf(&foo) == *align(4) u8);
{
const slice = @as(*[1]u8, &foo)[0..];
diff --git a/test/stage1/behavior/array.zig b/test/stage1/behavior/array.zig
index d5ca44f0a2..9e1d2cbac4 100644
--- a/test/stage1/behavior/array.zig
+++ b/test/stage1/behavior/array.zig
@@ -136,16 +136,6 @@ test "array literal with specified size" {
expect(array[1] == 2);
}
-test "array child property" {
- var x: [5]i32 = undefined;
- expect(@TypeOf(x).Child == i32);
-}
-
-test "array len property" {
- var x: [5]i32 = undefined;
- expect(@TypeOf(x).len == 5);
-}
-
test "array len field" {
var arr = [4]u8{ 0, 0, 0, 0 };
var ptr = &arr;
diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig
index 807e4c6275..e2cececf69 100644
--- a/test/stage1/behavior/async_fn.zig
+++ b/test/stage1/behavior/async_fn.zig
@@ -331,7 +331,7 @@ test "async fn with inferred error set" {
fn doTheTest() void {
var frame: [1]@Frame(middle) = undefined;
var fn_ptr = middle;
- var result: @TypeOf(fn_ptr).ReturnType.ErrorSet!void = undefined;
+ var result: @typeInfo(@typeInfo(@TypeOf(fn_ptr)).Fn.return_type.?).ErrorUnion.error_set!void = undefined;
_ = @asyncCall(std.mem.sliceAsBytes(frame[0..]), &result, fn_ptr, .{});
resume global_frame;
std.testing.expectError(error.Fail, result);
@@ -950,7 +950,7 @@ test "@asyncCall with comptime-known function, but not awaited directly" {
fn doTheTest() void {
var frame: [1]@Frame(middle) = undefined;
- var result: @TypeOf(middle).ReturnType.ErrorSet!void = undefined;
+ var result: @typeInfo(@typeInfo(@TypeOf(middle)).Fn.return_type.?).ErrorUnion.error_set!void = undefined;
_ = @asyncCall(std.mem.sliceAsBytes(frame[0..]), &result, middle, .{});
resume global_frame;
std.testing.expectError(error.Fail, result);
@@ -1018,7 +1018,7 @@ test "@TypeOf an async function call of generic fn with error union type" {
const S = struct {
fn func(comptime x: anytype) anyerror!i32 {
const T = @TypeOf(async func(x));
- comptime expect(T == @TypeOf(@frame()).Child);
+ comptime expect(T == @typeInfo(@TypeOf(@frame())).Pointer.child);
return undefined;
}
};
diff --git a/test/stage1/behavior/bit_shifting.zig b/test/stage1/behavior/bit_shifting.zig
index 7306acba4a..786cef0802 100644
--- a/test/stage1/behavior/bit_shifting.zig
+++ b/test/stage1/behavior/bit_shifting.zig
@@ -2,16 +2,18 @@ const std = @import("std");
const expect = std.testing.expect;
fn ShardedTable(comptime Key: type, comptime mask_bit_count: comptime_int, comptime V: type) type {
- expect(Key == std.meta.Int(false, Key.bit_count));
- expect(Key.bit_count >= mask_bit_count);
+ const key_bits = @typeInfo(Key).Int.bits;
+ expect(Key == std.meta.Int(false, key_bits));
+ expect(key_bits >= mask_bit_count);
+ const shard_key_bits = mask_bit_count;
const ShardKey = std.meta.Int(false, mask_bit_count);
- const shift_amount = Key.bit_count - ShardKey.bit_count;
+ const shift_amount = key_bits - shard_key_bits;
return struct {
const Self = @This();
- shards: [1 << ShardKey.bit_count]?*Node,
+ shards: [1 << shard_key_bits]?*Node,
pub fn create() Self {
- return Self{ .shards = [_]?*Node{null} ** (1 << ShardKey.bit_count) };
+ return Self{ .shards = [_]?*Node{null} ** (1 << shard_key_bits) };
}
fn getShardKey(key: Key) ShardKey {
diff --git a/test/stage1/behavior/bugs/5487.zig b/test/stage1/behavior/bugs/5487.zig
index 05967b6de4..02fa677a44 100644
--- a/test/stage1/behavior/bugs/5487.zig
+++ b/test/stage1/behavior/bugs/5487.zig
@@ -3,8 +3,8 @@ const io = @import("std").io;
pub fn write(_: void, bytes: []const u8) !usize {
return 0;
}
-pub fn outStream() io.OutStream(void, @TypeOf(write).ReturnType.ErrorSet, write) {
- return io.OutStream(void, @TypeOf(write).ReturnType.ErrorSet, write){ .context = {} };
+pub fn outStream() io.OutStream(void, @typeInfo(@typeInfo(@TypeOf(write)).Fn.return_type.?).ErrorUnion.error_set, write) {
+ return io.OutStream(void, @typeInfo(@typeInfo(@TypeOf(write)).Fn.return_type.?).ErrorUnion.error_set, write){ .context = {} };
}
test "crash" {
diff --git a/test/stage1/behavior/error.zig b/test/stage1/behavior/error.zig
index 975e08b04f..9635f2870c 100644
--- a/test/stage1/behavior/error.zig
+++ b/test/stage1/behavior/error.zig
@@ -84,8 +84,8 @@ fn testErrorUnionType() void {
const x: anyerror!i32 = 1234;
if (x) |value| expect(value == 1234) else |_| unreachable;
expect(@typeInfo(@TypeOf(x)) == .ErrorUnion);
- expect(@typeInfo(@TypeOf(x).ErrorSet) == .ErrorSet);
- expect(@TypeOf(x).ErrorSet == anyerror);
+ expect(@typeInfo(@typeInfo(@TypeOf(x)).ErrorUnion.error_set) == .ErrorSet);
+ expect(@typeInfo(@TypeOf(x)).ErrorUnion.error_set == anyerror);
}
test "error set type" {
diff --git a/test/stage1/behavior/misc.zig b/test/stage1/behavior/misc.zig
index 57a9ba2576..a71d6f86f3 100644
--- a/test/stage1/behavior/misc.zig
+++ b/test/stage1/behavior/misc.zig
@@ -24,12 +24,6 @@ test "call disabled extern fn" {
disabledExternFn();
}
-test "floating point primitive bit counts" {
- expect(f16.bit_count == 16);
- expect(f32.bit_count == 32);
- expect(f64.bit_count == 64);
-}
-
test "short circuit" {
testShortCircuit(false, true);
comptime testShortCircuit(false, true);
@@ -577,10 +571,6 @@ test "slice string literal has correct type" {
comptime expect(@TypeOf(array[runtime_zero..]) == []const i32);
}
-test "pointer child field" {
- expect((*u32).Child == u32);
-}
-
test "struct inside function" {
testStructInFn();
comptime testStructInFn();
diff --git a/test/stage1/behavior/reflection.zig b/test/stage1/behavior/reflection.zig
index ab0a55092c..6d1c341713 100644
--- a/test/stage1/behavior/reflection.zig
+++ b/test/stage1/behavior/reflection.zig
@@ -2,23 +2,15 @@ const expect = @import("std").testing.expect;
const mem = @import("std").mem;
const reflection = @This();
-test "reflection: array, pointer, optional, error union type child" {
- comptime {
- expect(([10]u8).Child == u8);
- expect((*u8).Child == u8);
- expect((anyerror!u8).Payload == u8);
- expect((?u8).Child == u8);
- }
-}
-
test "reflection: function return type, var args, and param types" {
comptime {
- expect(@TypeOf(dummy).ReturnType == i32);
- expect(!@TypeOf(dummy).is_var_args);
- expect(@TypeOf(dummy).arg_count == 3);
- expect(@typeInfo(@TypeOf(dummy)).Fn.args[0].arg_type.? == bool);
- expect(@typeInfo(@TypeOf(dummy)).Fn.args[1].arg_type.? == i32);
- expect(@typeInfo(@TypeOf(dummy)).Fn.args[2].arg_type.? == f32);
+ const info = @typeInfo(@TypeOf(dummy)).Fn;
+ expect(info.return_type.? == i32);
+ expect(!info.is_var_args);
+ expect(info.args.len == 3);
+ expect(info.args[0].arg_type.? == bool);
+ expect(info.args[1].arg_type.? == i32);
+ expect(info.args[2].arg_type.? == f32);
}
}