From d8fada6b6325e07015fddd68bb4c6369a66f23f3 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 20 Feb 2023 21:31:57 -0500 Subject: CBE: add CType interning --- src/codegen/c.zig | 13 +- src/codegen/c/type.zig | 1457 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1469 insertions(+), 1 deletion(-) create mode 100644 src/codegen/c/type.zig (limited to 'src/codegen') diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 0beb00b236..872d5fd344 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -27,6 +27,8 @@ const Mutability = enum { Const, ConstArgument, Mut }; const BigIntLimb = std.math.big.Limb; const BigInt = std.math.big.int; +pub const CType = @import("c/type.zig").CType; + pub const CValue = union(enum) { none: void, local: LocalIndex, @@ -446,7 +448,8 @@ pub const Function = struct { return f.object.dg.fmtIntLiteral(ty, val); } - pub fn deinit(f: *Function, gpa: mem.Allocator) void { + pub fn deinit(f: *Function) void { + const gpa = f.object.dg.gpa; f.allocs.deinit(gpa); f.locals.deinit(gpa); for (f.free_locals_stack.items) |*free_locals| { @@ -460,6 +463,7 @@ pub const Function = struct { gpa.free(typedef.rendered); } f.object.dg.typedefs.deinit(); + f.object.dg.ctypes.deinit(gpa); f.object.dg.fwd_decl.deinit(); f.arena.deinit(); } @@ -487,6 +491,7 @@ pub const DeclGen = struct { decl_index: Decl.Index, fwd_decl: std.ArrayList(u8), error_msg: ?*Module.ErrorMsg, + ctypes: CType.Store, /// The key of this map is Type which has references to typedefs_arena. typedefs: TypedefMap, typedefs_arena: std.mem.Allocator, @@ -1949,6 +1954,10 @@ pub const DeclGen = struct { return name; } + fn typeToCType(dg: *DeclGen, ty: Type) !CType { + return dg.ctypes.typeToCType(dg.gpa, ty, dg.module); + } + /// Renders a type as a single identifier, generating intermediate typedefs /// if necessary. /// @@ -1967,6 +1976,8 @@ pub const DeclGen = struct { t: Type, kind: TypedefKind, ) error{ OutOfMemory, AnalysisFail }!void { + _ = try dg.typeToCType(t); + const target = dg.module.getTarget(); switch (t.zigTypeTag()) { diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig new file mode 100644 index 0000000000..c9aca79458 --- /dev/null +++ b/src/codegen/c/type.zig @@ -0,0 +1,1457 @@ +const std = @import("std"); +const cstr = std.cstr; +const mem = std.mem; +const Allocator = mem.Allocator; +const assert = std.debug.assert; +const autoHash = std.hash.autoHash; +const Target = std.Target; + +const Module = @import("../../Module.zig"); +const Type = @import("../../type.zig").Type; + +pub const CType = extern union { + /// If the tag value is less than Tag.no_payload_count, then no pointer + /// dereference is needed. + tag_if_small_enough: Tag, + ptr_otherwise: *const Payload, + + pub fn initTag(small_tag: Tag) CType { + assert(!small_tag.hasPayload()); + return .{ .tag_if_small_enough = small_tag }; + } + + pub fn initPayload(pl: anytype) CType { + const T = @typeInfo(@TypeOf(pl)).Pointer.child; + return switch (pl.base.tag) { + inline else => |t| if (comptime t.hasPayload() and t.Type() == T) .{ + .ptr_otherwise = &pl.base, + } else unreachable, + }; + } + + pub fn hasPayload(self: CType) bool { + return self.tag_if_small_enough.hasPayload(); + } + + pub fn tag(self: CType) Tag { + return if (self.hasPayload()) self.ptr_otherwise.tag else self.tag_if_small_enough; + } + + pub fn cast(self: CType, comptime T: type) ?*const T { + if (!self.hasPayload()) return null; + const pl = self.ptr_otherwise; + return switch (pl.tag) { + inline else => |t| if (comptime t.hasPayload() and t.Type() == T) + @fieldParentPtr(T, "base", pl) + else + null, + }; + } + + pub fn castTag(self: CType, comptime t: Tag) ?*const t.Type() { + return if (self.tag() == t) @fieldParentPtr(t.Type(), "base", self.ptr_otherwise) else null; + } + + pub const Tag = enum(usize) { + // The first section of this enum are tags that require no payload. + void, + + // C basic types + char, + + @"signed char", + short, + int, + long, + @"long long", + + _Bool, + @"unsigned char", + @"unsigned short", + @"unsigned int", + @"unsigned long", + @"unsigned long long", + + float, + double, + @"long double", + + // C header types + bool, // stdbool.h + size_t, // stddef.h + ptrdiff_t, // stddef.h + + // zig.h types + zig_u8, + zig_i8, + zig_u16, + zig_i16, + zig_u32, + zig_i32, + zig_u64, + zig_i64, + zig_u128, + zig_i128, + zig_f16, + zig_f32, + zig_f64, + zig_f80, + zig_f128, + + // After this, the tag requires a payload. + pointer, + pointer_const, + pointer_volatile, + pointer_const_volatile, + array, + vector, + fwd_struct, + fwd_union, + anon_struct, + packed_anon_struct, + @"struct", + @"union", + packed_struct, + packed_union, + function, + varargs_function, + + pub const last_no_payload_tag = Tag.zig_f128; + pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; + + pub fn hasPayload(self: Tag) bool { + return @enumToInt(self) >= no_payload_count; + } + + pub fn toIndex(self: Tag) Index { + assert(!self.hasPayload()); + return @intCast(Index, @enumToInt(self)); + } + + pub fn Type(comptime self: Tag) type { + return switch (self) { + .void, + .char, + .@"signed char", + .short, + .int, + .long, + .@"long long", + ._Bool, + .@"unsigned char", + .@"unsigned short", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .float, + .double, + .@"long double", + .bool, + .size_t, + .ptrdiff_t, + .zig_u8, + .zig_i8, + .zig_u16, + .zig_i16, + .zig_u32, + .zig_i32, + .zig_u64, + .zig_i64, + .zig_u128, + .zig_i128, + .zig_f16, + .zig_f32, + .zig_f64, + .zig_f80, + .zig_f128, + => @compileError("Type Tag " ++ @tagName(self) ++ " has no payload"), + + .pointer, + .pointer_const, + .pointer_volatile, + .pointer_const_volatile, + => Payload.Child, + + .array, + .vector, + => Payload.Sequence, + + .fwd_struct, + .fwd_union, + => Payload.FwdDecl, + + .anon_struct, + .packed_anon_struct, + => Payload.Fields, + + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => Payload.Aggregate, + + .function, + .varargs_function, + => Payload.Function, + }; + } + }; + + pub const Payload = struct { + tag: Tag, + + pub const Child = struct { + base: Payload, + data: Index, + }; + + pub const Sequence = struct { + base: Payload, + data: struct { + len: u64, + elem_type: Index, + }, + }; + + pub const FwdDecl = struct { + base: Payload, + data: Module.Decl.Index, + }; + + pub const Fields = struct { + base: Payload, + data: Data, + + const Data = []const Field; + const Field = struct { + name: [*:0]const u8, + type: Index, + alignas: u32, + }; + }; + + pub const Aggregate = struct { + base: Payload, + data: struct { + fields: Fields.Data, + fwd_decl: Index, + }, + }; + + pub const Function = struct { + base: Payload, + data: struct { + return_type: Index, + param_types: []const Index, + }, + }; + }; + + pub const Index = u32; + pub const Store = struct { + arena: std.heap.ArenaAllocator.State = .{}, + set: Set = .{}, + + const Set = struct { + const Map = std.ArrayHashMapUnmanaged(CType, void, HashContext32, true); + + map: Map = .{}, + + fn indexToCType(self: Set, index: Index) CType { + if (index < Tag.no_payload_count) return initTag(@intToEnum(Tag, index)); + return self.map.keys()[index - Tag.no_payload_count]; + } + + fn indexToHash(self: Set, index: Index) Map.Hash { + if (index < Tag.no_payload_count) return self.indexToCType(index).hash(self); + return self.map.entries.items(.hash)[index - Tag.no_payload_count]; + } + + fn typeToIndex(self: Set, ty: Type, target: Target, kind: Kind) ?Index { + const lookup = Convert.Lookup{ .imm = .{ .set = &self, .target = target } }; + + var convert: Convert = undefined; + convert.initType(ty, kind, lookup) catch unreachable; + + const t = convert.tag(); + if (!t.hasPayload()) return t.toIndex(); + + return if (self.map.getIndexAdapted( + ty, + TypeAdapter32{ .kind = kind, .lookup = lookup, .convert = &convert }, + )) |idx| @intCast(Index, Tag.no_payload_count + idx) else null; + } + }; + + const Promoted = struct { + arena: std.heap.ArenaAllocator, + set: Set, + + fn gpa(self: *Promoted) Allocator { + return self.arena.child_allocator; + } + + fn cTypeToIndex(self: *Promoted, cty: CType) Allocator.Error!Index { + const t = cty.tag(); + if (@enumToInt(t) < Tag.no_payload_count) return @intCast(Index, @enumToInt(t)); + + const gop = try self.set.map.getOrPutContext(self.gpa(), cty, .{ .store = &self.set }); + if (!gop.found_existing) gop.key_ptr.* = cty; + if (std.debug.runtime_safety) { + const key = self.set.map.entries.items(.key)[gop.index]; + assert(key.eql(cty)); + assert(cty.hash(self.set) == key.hash(self.set)); + } + return @intCast(Index, Tag.no_payload_count + gop.index); + } + + fn typeToIndex(self: *Promoted, ty: Type, mod: *Module, kind: Kind) Allocator.Error!Index { + const lookup = Convert.Lookup{ .mut = .{ .promoted = self, .mod = mod } }; + + var convert: Convert = undefined; + try convert.initType(ty, kind, lookup); + + const t = convert.tag(); + if (!t.hasPayload()) return t.toIndex(); + + const gop = try self.set.map.getOrPutContextAdapted( + self.gpa(), + ty, + TypeAdapter32{ .kind = kind, .lookup = lookup.freeze(), .convert = &convert }, + .{ .store = &self.set }, + ); + if (!gop.found_existing) { + errdefer _ = self.set.map.pop(); + gop.key_ptr.* = try createFromConvert(self, ty, lookup.getTarget(), kind, convert); + } + if (std.debug.runtime_safety) { + const adapter = TypeAdapter64{ + .kind = kind, + .lookup = lookup.freeze(), + .convert = &convert, + }; + const key = self.set.map.entries.items(.key)[gop.index]; + assert(adapter.eql(ty, key)); + assert(adapter.hash(ty) == key.hash(self.set)); + } + return @intCast(Index, Tag.no_payload_count + gop.index); + } + }; + + fn promote(self: Store, gpa: Allocator) Promoted { + return .{ .arena = self.arena.promote(gpa), .set = self.set }; + } + + fn demote(self: *Store, promoted: Promoted) void { + self.arena = promoted.arena.state; + self.set = promoted.set; + } + + pub fn indexToCType(self: Store, index: Index) CType { + return self.set.indexToCType(index); + } + + pub fn cTypeToIndex(self: *Store, gpa: Allocator, cty: CType) !Index { + var promoted = self.promote(gpa); + defer self.demote(promoted); + return promoted.cTypeToIndex(cty); + } + + pub fn typeToCType(self: *Store, gpa: Allocator, ty: Type, mod: *Module) !CType { + const idx = try self.typeToIndex(gpa, ty, mod); + return self.indexToCType(idx); + } + + pub fn typeToIndex(self: *Store, gpa: Allocator, ty: Type, mod: *Module) !Index { + var promoted = self.promote(gpa); + defer self.demote(promoted); + return promoted.typeToIndex(ty, mod, .complete); + } + + pub fn clearRetainingCapacity(self: *Store, gpa: Allocator) void { + var promoted = self.promote(gpa); + defer self.demote(promoted); + promoted.set.map.clearRetainingCapacity(); + _ = promoted.arena.reset(.retain_capacity); + } + + pub fn shrinkToFit(self: *Store, gpa: Allocator) void { + self.map.shrinkAndFree(gpa, self.map.entries.len); + } + + pub fn shrinkAndFree(self: *Store, gpa: Allocator) void { + var promoted = self.promote(gpa); + defer self.demote(promoted); + promoted.set.map.clearAndFree(gpa); + _ = promoted.arena.reset(.free_all); + } + + pub fn move(self: *Store) Store { + const moved = self.*; + self.* = .{}; + return moved; + } + + pub fn deinit(self: *Store, gpa: Allocator) void { + var promoted = self.promote(gpa); + promoted.set.map.deinit(gpa); + _ = promoted.arena.deinit(); + self.* = undefined; + } + }; + + pub fn eql(lhs: CType, rhs: CType) bool { + // As a shortcut, if the small tags / addresses match, we're done. + if (lhs.tag_if_small_enough == rhs.tag_if_small_enough) return true; + + const lhs_tag = lhs.tag(); + const rhs_tag = rhs.tag(); + if (lhs_tag != rhs_tag) return false; + + return switch (lhs_tag) { + .void, + .char, + .@"signed char", + .short, + .int, + .long, + .@"long long", + ._Bool, + .@"unsigned char", + .@"unsigned short", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .float, + .double, + .@"long double", + .bool, + .size_t, + .ptrdiff_t, + .zig_u8, + .zig_i8, + .zig_u16, + .zig_i16, + .zig_u32, + .zig_i32, + .zig_u64, + .zig_i64, + .zig_u128, + .zig_i128, + .zig_f16, + .zig_f32, + .zig_f64, + .zig_f80, + .zig_f128, + => false, + + .pointer, + .pointer_const, + .pointer_volatile, + .pointer_const_volatile, + => lhs.cast(Payload.Child).?.data == rhs.cast(Payload.Child).?.data, + + .array, + .vector, + => std.meta.eql(lhs.cast(Payload.Sequence).?.data, rhs.cast(Payload.Sequence).?.data), + + .fwd_struct, + .fwd_union, + => lhs.cast(Payload.FwdDecl).?.data == rhs.cast(Payload.FwdDecl).?.data, + + .anon_struct, + .packed_anon_struct, + => { + const lhs_data = lhs.cast(Payload.Fields).?.data; + const rhs_data = rhs.cast(Payload.Fields).?.data; + if (lhs_data.len != rhs_data.len) return false; + for (lhs_data, rhs_data) |lhs_field, rhs_field| { + if (lhs_field.type != rhs_field.type) return false; + if (lhs_field.alignas != rhs_field.alignas) return false; + if (cstr.cmp(lhs_field.name, rhs_field.name) != 0) return false; + } + return true; + }, + + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => std.meta.eql( + lhs.cast(Payload.Aggregate).?.data.fwd_decl, + rhs.cast(Payload.Aggregate).?.data.fwd_decl, + ), + + .function, + .varargs_function, + => { + const lhs_data = lhs.cast(Payload.Function).?.data; + const rhs_data = rhs.cast(Payload.Function).?.data; + if (lhs_data.return_type != rhs_data.return_type) return false; + if (lhs_data.param_types.len != rhs_data.param_types.len) return false; + for (lhs_data.param_types, rhs_data.param_types) |lhs_param_cty, rhs_param_cty| { + if (lhs_param_cty != rhs_param_cty) return false; + } + return true; + }, + }; + } + + pub fn hash(self: CType, store: Store.Set) u64 { + var hasher = std.hash.Wyhash.init(0); + self.updateHasher(&hasher, store); + return hasher.final(); + } + + pub fn updateHasher(self: CType, hasher: anytype, store: Store.Set) void { + const t = self.tag(); + autoHash(hasher, t); + switch (t) { + .void, + .char, + .@"signed char", + .short, + .int, + .long, + .@"long long", + ._Bool, + .@"unsigned char", + .@"unsigned short", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .float, + .double, + .@"long double", + .bool, + .size_t, + .ptrdiff_t, + .zig_u8, + .zig_i8, + .zig_u16, + .zig_i16, + .zig_u32, + .zig_i32, + .zig_u64, + .zig_i64, + .zig_u128, + .zig_i128, + .zig_f16, + .zig_f32, + .zig_f64, + .zig_f80, + .zig_f128, + => {}, + + .pointer, + .pointer_const, + .pointer_volatile, + .pointer_const_volatile, + => store.indexToCType(self.cast(Payload.Child).?.data).updateHasher(hasher, store), + + .array, + .vector, + => { + const data = self.cast(Payload.Sequence).?.data; + autoHash(hasher, data.len); + store.indexToCType(data.elem_type).updateHasher(hasher, store); + }, + + .fwd_struct, + .fwd_union, + => autoHash(hasher, self.cast(Payload.FwdDecl).?.data), + + .anon_struct, + .packed_anon_struct, + => for (self.cast(Payload.Fields).?.data) |field| { + store.indexToCType(field.type).updateHasher(hasher, store); + hasher.update(mem.span(field.name)); + autoHash(hasher, field.alignas); + }, + + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => store.indexToCType(self.cast(Payload.Aggregate).?.data.fwd_decl) + .updateHasher(hasher, store), + + .function, + .varargs_function, + => { + const data = self.cast(Payload.Function).?.data; + store.indexToCType(data.return_type).updateHasher(hasher, store); + for (data.param_types) |param_ty| { + store.indexToCType(param_ty).updateHasher(hasher, store); + } + }, + } + } + + pub const Kind = enum { forward, complete, global, parameter }; + + const Convert = struct { + storage: union { + none: void, + child: Payload.Child, + seq: Payload.Sequence, + fwd: Payload.FwdDecl, + anon: struct { + fields: [2]Payload.Fields.Field, + pl: Payload.Fields, + }, + agg: Payload.Aggregate, + }, + value: union(enum) { + tag: Tag, + cty: CType, + }, + + pub fn init(self: *@This(), t: Tag) void { + self.* = if (t.hasPayload()) .{ + .storage = .{ .none = {} }, + .value = .{ .tag = t }, + } else .{ + .storage = .{ .none = {} }, + .value = .{ .cty = initTag(t) }, + }; + } + + pub fn tag(self: @This()) Tag { + return switch (self.value) { + .tag => |t| t, + .cty => |c| c.tag(), + }; + } + + fn tagFromIntInfo(signedness: std.builtin.Signedness, bits: u16) Tag { + return switch (bits) { + 0 => .void, + 1...8 => switch (signedness) { + .unsigned => .zig_u8, + .signed => .zig_i8, + }, + 9...16 => switch (signedness) { + .unsigned => .zig_u16, + .signed => .zig_i16, + }, + 17...32 => switch (signedness) { + .unsigned => .zig_u32, + .signed => .zig_i32, + }, + 33...64 => switch (signedness) { + .unsigned => .zig_u64, + .signed => .zig_i64, + }, + 65...128 => switch (signedness) { + .unsigned => .zig_u128, + .signed => .zig_i128, + }, + else => .array, + }; + } + + pub const Lookup = union(enum) { + fail: Target, + imm: struct { + set: *const Store.Set, + target: Target, + }, + mut: struct { + promoted: *Store.Promoted, + mod: *Module, + }, + + pub fn isMutable(self: @This()) bool { + return switch (self) { + .fail, .imm => false, + .mut => true, + }; + } + + pub fn getTarget(self: @This()) Target { + return switch (self) { + .fail => |target| target, + .imm => |imm| imm.target, + .mut => |mut| mut.mod.getTarget(), + }; + } + + pub fn getSet(self: @This()) ?*const Store.Set { + return switch (self) { + .fail => null, + .imm => |imm| imm.set, + .mut => |mut| &mut.promoted.set, + }; + } + + pub fn typeToIndex(self: @This(), ty: Type, kind: Kind) !?Index { + return switch (self) { + .fail => null, + .imm => |imm| imm.set.typeToIndex(ty, imm.target, kind), + .mut => |mut| try mut.promoted.typeToIndex(ty, mut.mod, kind), + }; + } + + pub fn indexToCType(self: @This(), index: Index) ?CType { + return if (self.getSet()) |set| set.indexToCType(index) else null; + } + + pub fn freeze(self: @This()) @This() { + return switch (self) { + .fail, .imm => self, + .mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .target = self.getTarget() } }, + }; + } + }; + + pub fn initType(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void { + const target = lookup.getTarget(); + + self.* = undefined; + if (!ty.isFnOrHasRuntimeBitsIgnoreComptime()) + self.init(.void) + else if (ty.isAbiInt()) switch (ty.tag()) { + .usize => self.init(.size_t), + .isize => self.init(.ptrdiff_t), + .c_short => self.init(.short), + .c_ushort => self.init(.@"unsigned short"), + .c_int => self.init(.int), + .c_uint => self.init(.@"unsigned int"), + .c_long => self.init(.long), + .c_ulong => self.init(.@"unsigned long"), + .c_longlong => self.init(.@"long long"), + .c_ulonglong => self.init(.@"unsigned long long"), + else => { + const info = ty.intInfo(target); + const t = tagFromIntInfo(info.signedness, info.bits); + switch (t) { + .void => unreachable, + else => self.init(t), + .array => { + const abi_size = ty.abiSize(target); + const abi_align = ty.abiAlignment(target); + self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{ + .len = @divExact(abi_size, abi_align), + .elem_type = tagFromIntInfo( + .unsigned, + @intCast(u16, abi_align * 8), + ).toIndex(), + } } }; + self.value = .{ .cty = initPayload(&self.storage.seq) }; + }, + } + }, + } else switch (ty.zigTypeTag()) { + .Frame => unreachable, + .AnyFrame => unreachable, + + .Int, + .Enum, + .ErrorSet, + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .EnumLiteral, + => unreachable, + + .Bool => self.init(.bool), + + .Float => self.init(switch (ty.tag()) { + .f16 => .zig_f16, + .f32 => .zig_f32, + .f64 => .zig_f64, + .f80 => .zig_f80, + .f128 => .zig_f128, + .c_longdouble => .@"long double", + else => unreachable, + }), + + .Pointer => switch (ty.ptrSize()) { + .Slice => { + var buf: Type.SlicePtrFieldTypeBuffer = undefined; + const ptr_ty = ty.slicePtrFieldType(&buf); + if (try lookup.typeToIndex(ptr_ty, kind)) |ptr_idx| { + self.storage = .{ .anon = .{ .fields = .{ + .{ + .name = "ptr", + .type = ptr_idx, + .alignas = ptr_ty.abiAlignment(target), + }, + .{ + .name = "len", + .type = Tag.size_t.toIndex(), + .alignas = Type.usize.abiAlignment(target), + }, + }, .pl = undefined } }; + self.storage.anon.pl = .{ + .base = .{ .tag = .anon_struct }, + .data = self.storage.anon.fields[0..2], + }; + self.value = .{ .cty = initPayload(&self.storage.anon.pl) }; + } else self.init(.anon_struct); + }, + + .One, .Many, .C => { + const t: Tag = switch (ty.isVolatilePtr()) { + false => switch (ty.isConstPtr()) { + false => .pointer, + true => .pointer_const, + }, + true => switch (ty.isConstPtr()) { + false => .pointer_volatile, + true => .pointer_const_volatile, + }, + }; + if (try lookup.typeToIndex(ty.childType(), .forward)) |child_idx| { + self.storage = .{ .child = .{ .base = .{ .tag = t }, .data = child_idx } }; + self.value = .{ .cty = initPayload(&self.storage.child) }; + } else self.init(t); + }, + }, + + .Struct, .Union => |zig_tag| if (ty.isTupleOrAnonStruct()) { + if (lookup.isMutable()) { + for (0..ty.structFieldCount()) |field_i| { + if (ty.structFieldIsComptime(field_i)) continue; + _ = try lookup.typeToIndex(ty.structFieldType(field_i), switch (kind) { + .forward, .complete, .parameter => .complete, + .global => .global, + }); + } + } + self.init(.anon_struct); + } else { + const is_struct = zig_tag == .Struct or ty.unionTagTypeSafety() != null; + switch (kind) { + .forward => { + self.storage = .{ .fwd = .{ + .base = .{ .tag = if (is_struct) .fwd_struct else .fwd_union }, + .data = ty.getOwnerDecl(), + } }; + self.value = .{ .cty = initPayload(&self.storage.fwd) }; + }, + else => { + if (lookup.isMutable()) { + for (0..switch (zig_tag) { + .Struct => ty.structFieldCount(), + .Union => ty.cast(Type.Payload.Union).?.data.fields.count(), + else => unreachable, + }) |field_i| { + if (zig_tag == .Struct and ty.structFieldIsComptime(field_i)) + continue; + _ = try lookup.typeToIndex( + ty.structFieldType(field_i), + switch (kind) { + .forward => unreachable, + .complete, .parameter => .complete, + .global => .global, + }, + ); + } + _ = try lookup.typeToIndex(ty, .forward); + } + self.init(if (is_struct) .@"struct" else .@"union"); + }, + } + }, + + .Array, .Vector => |zig_tag| { + const t: Tag = switch (zig_tag) { + .Array => .array, + .Vector => .vector, + else => unreachable, + }; + if (try lookup.typeToIndex(ty.childType(), kind)) |child_idx| { + self.storage = .{ .seq = .{ .base = .{ .tag = t }, .data = .{ + .len = ty.arrayLenIncludingSentinel(), + .elem_type = child_idx, + } } }; + self.value = .{ .cty = initPayload(&self.storage.seq) }; + } else self.init(t); + }, + + .Optional => { + var buf: Type.Payload.ElemType = undefined; + const payload_ty = ty.optionalChild(&buf); + if (payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (ty.optionalReprIsPayload()) + try self.initType(payload_ty, kind, lookup) + else if (try lookup.typeToIndex(payload_ty, kind)) |payload_idx| { + self.storage = .{ .anon = .{ .fields = .{ + .{ + .name = "payload", + .type = payload_idx, + .alignas = payload_ty.abiAlignment(target), + }, + .{ + .name = "is_null", + .type = Tag.bool.toIndex(), + .alignas = Type.bool.abiAlignment(target), + }, + }, .pl = undefined } }; + self.storage.anon.pl = .{ + .base = .{ .tag = .anon_struct }, + .data = self.storage.anon.fields[0..2], + }; + self.value = .{ .cty = initPayload(&self.storage.anon.pl) }; + } else self.init(.anon_struct); + } else self.init(.bool); + }, + + .ErrorUnion => { + const payload_ty = ty.errorUnionPayload(); + if (try lookup.typeToIndex(payload_ty, switch (kind) { + .forward, .complete, .parameter => .complete, + .global => .global, + })) |payload_idx| { + const error_ty = ty.errorUnionSet(); + if (payload_idx == Tag.void.toIndex()) + try self.initType(error_ty, kind, lookup) + else if (try lookup.typeToIndex(error_ty, kind)) |error_idx| { + self.storage = .{ .anon = .{ .fields = .{ + .{ + .name = "payload", + .type = payload_idx, + .alignas = payload_ty.abiAlignment(target), + }, + .{ + .name = "error", + .type = error_idx, + .alignas = error_ty.abiAlignment(target), + }, + }, .pl = undefined } }; + self.storage.anon.pl = .{ + .base = .{ .tag = .anon_struct }, + .data = self.storage.anon.fields[0..2], + }; + self.value = .{ .cty = initPayload(&self.storage.anon.pl) }; + } else self.init(.anon_struct); + } else self.init(.anon_struct); + }, + + .Opaque => switch (ty.tag()) { + .anyopaque => self.init(.void), + .@"opaque" => { + self.storage = .{ .fwd = .{ + .base = .{ .tag = .fwd_struct }, + .data = ty.getOwnerDecl(), + } }; + self.value = .{ .cty = initPayload(&self.storage.fwd) }; + }, + else => unreachable, + }, + + .Fn => { + const info = ty.fnInfo(); + if (lookup.isMutable()) { + _ = try lookup.typeToIndex(info.return_type, switch (kind) { + .forward => .forward, + .complete, .parameter, .global => .complete, + }); + for (info.param_types, 0..) |param_ty, param_i| { + if (info.paramIsComptime(param_i)) continue; + _ = try lookup.typeToIndex(param_ty, switch (kind) { + .forward => .forward, + .complete, .parameter, .global => unreachable, + }); + } + } + self.init(if (info.is_var_args) .varargs_function else .function); + }, + } + } + }; + + fn copyFields(arena: Allocator, fields: Payload.Fields.Data) !Payload.Fields.Data { + const new_fields = try arena.dupe(Payload.Fields.Field, fields); + for (new_fields) |*new_field| { + new_field.name = try arena.dupeZ(u8, mem.span(new_field.name)); + new_field.type = new_field.type; + } + return new_fields; + } + + pub fn copy(self: CType, arena: Allocator) !CType { + switch (self.tag()) { + .void, + .char, + .@"signed char", + .short, + .int, + .long, + .@"long long", + ._Bool, + .@"unsigned char", + .@"unsigned short", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .float, + .double, + .@"long double", + .bool, + .size_t, + .ptrdiff_t, + .zig_u8, + .zig_i8, + .zig_u16, + .zig_i16, + .zig_u32, + .zig_i32, + .zig_u64, + .zig_i64, + .zig_u128, + .zig_i128, + .zig_f16, + .zig_f32, + .zig_f64, + .zig_f80, + .zig_f128, + => return self, + + .pointer, + .pointer_const, + .pointer_volatile, + .pointer_const_volatile, + => { + const pl = self.cast(Payload.Child).?; + const new_pl = try arena.create(Payload.Child); + new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = pl.data }; + return initPayload(new_pl); + }, + + .array, + .vector, + => { + const pl = self.cast(Payload.Sequence).?; + const new_pl = try arena.create(Payload.Sequence); + new_pl.* = .{ + .base = .{ .tag = pl.base.tag }, + .data = .{ .len = pl.data.len, .elem_type = pl.data.elem_type }, + }; + return initPayload(new_pl); + }, + + .fwd_struct, + .fwd_union, + => { + const pl = self.cast(Payload.FwdDecl).?; + const new_pl = try arena.create(Payload.FwdDecl); + new_pl.* = .{ + .base = .{ .tag = pl.base.tag }, + .data = pl.data, + }; + return initPayload(new_pl); + }, + + .anon_struct, + .packed_anon_struct, + => { + const pl = self.cast(Payload.Fields).?; + const new_pl = try arena.create(Payload.Fields); + new_pl.* = .{ + .base = .{ .tag = pl.base.tag }, + .data = try copyFields(arena, pl.data), + }; + return initPayload(new_pl); + }, + + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => { + const pl = self.cast(Payload.Aggregate).?; + const new_pl = try arena.create(Payload.Aggregate); + new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = .{ + .fields = try copyFields(arena, pl.data.fields), + .fwd_decl = pl.data.fwd_decl, + } }; + return initPayload(new_pl); + }, + + .function, + .varargs_function, + => { + const pl = self.cast(Payload.Function).?; + const new_pl = try arena.create(Payload.Function); + new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = .{ + .return_type = pl.data.return_type, + .param_types = try arena.dupe(Index, pl.data.param_types), + } }; + return initPayload(new_pl); + }, + } + } + + fn createFromType(store: *Store.Promoted, ty: Type, target: Target, kind: Kind) !CType { + var convert: Convert = undefined; + try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .target = target } }); + return createFromConvert(store, ty, target, kind, &convert); + } + + fn createFromConvert( + store: *Store.Promoted, + ty: Type, + target: Target, + kind: Kind, + convert: Convert, + ) !CType { + const arena = store.arena.allocator(); + switch (convert.value) { + .cty => |c| return c.copy(arena), + .tag => |t| switch (t) { + .anon_struct, + .packed_anon_struct, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => switch (ty.zigTypeTag()) { + .Struct => { + const fields_len = ty.structFieldCount(); + + var c_fields_len: usize = 0; + for (0..fields_len) |field_i| { + if (ty.structFieldIsComptime(field_i)) continue; + c_fields_len += 1; + } + + const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len); + var c_field_i: usize = 0; + for (0..fields_len) |field_i| { + if (ty.structFieldIsComptime(field_i)) continue; + + fields_pl[c_field_i] = .{ + .name = try if (ty.isSimpleTuple()) + std.fmt.allocPrintZ(arena, "f{}", .{field_i}) + else + arena.dupeZ(u8, ty.structFieldName(field_i)), + .type = store.set.typeToIndex( + ty.structFieldType(field_i), + target, + switch (kind) { + .forward, .complete, .parameter => .complete, + .global => .global, + }, + ).?, + .alignas = ty.structFieldAlign(field_i, target), + }; + c_field_i += 1; + } + + if (ty.isTupleOrAnonStruct()) { + const anon_pl = try arena.create(Payload.Fields); + anon_pl.* = .{ .base = .{ .tag = .anon_struct }, .data = fields_pl }; + return initPayload(anon_pl); + } + + const struct_pl = try arena.create(Payload.Aggregate); + struct_pl.* = .{ .base = .{ .tag = t }, .data = .{ + .fields = fields_pl, + .fwd_decl = store.set.typeToIndex(ty, target, .forward).?, + } }; + return initPayload(struct_pl); + }, + + .Union => { + const fields = ty.unionFields(); + const fields_len = fields.count(); + + var c_fields_len: usize = 0; + for (0..fields_len) |field_i| { + const field_ty = ty.structFieldType(field_i); + if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + c_fields_len += 1; + } + + const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len); + var field_i: usize = 0; + var c_field_i: usize = 0; + var field_it = fields.iterator(); + while (field_it.next()) |field| { + defer field_i += 1; + if (!field.value_ptr.ty.hasRuntimeBitsIgnoreComptime()) continue; + + fields_pl[c_field_i] = .{ + .name = try arena.dupeZ(u8, field.key_ptr.*), + .type = store.set.typeToIndex(field.value_ptr.ty, target, switch (kind) { + .forward => unreachable, + .complete, .parameter => .complete, + .global => .global, + }).?, + .alignas = ty.structFieldAlign(field_i, target), + }; + c_field_i += 1; + } + + const union_pl = try arena.create(Payload.Aggregate); + union_pl.* = .{ .base = .{ .tag = t }, .data = .{ + .fields = fields_pl, + .fwd_decl = store.set.typeToIndex(ty, target, .forward).?, + } }; + return initPayload(union_pl); + }, + + else => unreachable, + }, + + .function, + .varargs_function, + => { + const info = ty.fnInfo(); + const recurse_kind: Kind = switch (kind) { + .forward => .forward, + .complete, .parameter, .global => unreachable, + }; + + var c_params_len: usize = 0; + for (0..info.param_types.len) |param_i| { + if (info.paramIsComptime(param_i)) continue; + c_params_len += 1; + } + + const params_pl = try arena.alloc(Index, c_params_len); + var c_param_i: usize = 0; + for (info.param_types, 0..) |param_ty, param_i| { + if (info.paramIsComptime(param_i)) continue; + params_pl[c_param_i] = store.set.typeToIndex(param_ty, target, recurse_kind).?; + c_param_i += 1; + } + + const fn_pl = try arena.create(Payload.Function); + fn_pl.* = .{ .base = .{ .tag = t }, .data = .{ + .return_type = store.set.typeToIndex(info.return_type, target, recurse_kind).?, + .param_types = params_pl, + } }; + return initPayload(fn_pl); + }, + + else => unreachable, + }, + } + } + + pub const HashContext64 = struct { + store: *const Store.Set, + + pub fn hash(_: @This(), cty: CType) u64 { + return cty.hash(); + } + pub fn eql(_: @This(), lhs: CType, rhs: CType) bool { + return lhs.eql(rhs); + } + }; + + pub const HashContext32 = struct { + store: *const Store.Set, + + pub fn hash(self: @This(), cty: CType) u32 { + return @truncate(u32, cty.hash(self.store.*)); + } + pub fn eql(_: @This(), lhs: CType, rhs: CType, _: usize) bool { + return lhs.eql(rhs); + } + }; + + pub const TypeAdapter64 = struct { + kind: Kind, + lookup: Convert.Lookup, + convert: *const Convert, + + fn eqlRecurse(self: @This(), ty: Type, cty: Index, kind: Kind) bool { + assert(!self.lookup.isMutable()); + + var convert: Convert = undefined; + convert.initType(ty, kind, self.lookup) catch unreachable; + + const self_recurse = @This(){ .kind = kind, .lookup = self.lookup, .convert = &convert }; + return self_recurse.eql(ty, self.lookup.indexToCType(cty).?); + } + + pub fn eql(self: @This(), ty: Type, cty: CType) bool { + switch (self.convert.value) { + .cty => |c| return c.eql(cty), + .tag => |t| { + if (t != cty.tag()) return false; + + const target = self.lookup.getTarget(); + switch (t) { + .anon_struct, + .packed_anon_struct, + => { + if (!ty.isTupleOrAnonStruct()) return false; + + var name_buf: [ + std.fmt.count("f{}", .{std.math.maxInt(usize)}) + ]u8 = undefined; + const c_fields = cty.cast(Payload.Fields).?.data; + + var c_field_i: usize = 0; + for (0..ty.structFieldCount()) |field_i| { + if (ty.structFieldIsComptime(field_i)) continue; + + const c_field = &c_fields[c_field_i]; + c_field_i += 1; + + if (!self.eqlRecurse( + ty.structFieldType(field_i), + c_field.type, + switch (self.kind) { + .forward, .complete, .parameter => .complete, + .global => .global, + }, + ) or !mem.eql( + u8, + if (ty.isSimpleTuple()) + std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable + else + ty.structFieldName(field_i), + mem.span(c_field.name), + ) or ty.structFieldAlign(field_i, target) != c_field.alignas) + return false; + } + return true; + }, + + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => return self.eqlRecurse( + ty, + cty.cast(Payload.Aggregate).?.data.fwd_decl, + .forward, + ), + + .function, + .varargs_function, + => { + if (ty.zigTypeTag() != .Fn) return false; + + const info = ty.fnInfo(); + const data = cty.cast(Payload.Function).?.data; + const recurse_kind: Kind = switch (self.kind) { + .forward => .forward, + .complete, .parameter, .global => unreachable, + }; + + if (info.param_types.len != data.param_types.len or + !self.eqlRecurse(info.return_type, data.return_type, recurse_kind)) + return false; + for (info.param_types, data.param_types, 0..) |param_ty, param_cty, param_i| { + if (info.paramIsComptime(param_i)) continue; + if (!self.eqlRecurse(param_ty, param_cty, recurse_kind)) + return false; + } + return true; + }, + + else => unreachable, + } + }, + } + } + + pub fn hash(self: @This(), ty: Type) u64 { + var hasher = std.hash.Wyhash.init(0); + self.updateHasher(&hasher, ty); + return hasher.final(); + } + + fn updateHasherRecurse(self: @This(), hasher: anytype, ty: Type, kind: Kind) void { + assert(!self.lookup.isMutable()); + + var convert: Convert = undefined; + convert.initType(ty, kind, self.lookup) catch unreachable; + + const self_recurse = @This(){ .kind = kind, .lookup = self.lookup, .convert = &convert }; + self_recurse.updateHasher(hasher, ty); + } + + pub fn updateHasher(self: @This(), hasher: anytype, ty: Type) void { + switch (self.convert.value) { + .cty => |c| return c.updateHasher(hasher, self.lookup.getSet().?.*), + .tag => |t| { + autoHash(hasher, t); + + const target = self.lookup.getTarget(); + switch (t) { + .anon_struct, + .packed_anon_struct, + => { + var name_buf: [ + std.fmt.count("f{}", .{std.math.maxInt(usize)}) + ]u8 = undefined; + for (0..ty.structFieldCount()) |field_i| { + if (ty.structFieldIsComptime(field_i)) continue; + + self.updateHasherRecurse( + hasher, + ty.structFieldType(field_i), + switch (self.kind) { + .forward, .complete, .parameter => .complete, + .global => .global, + }, + ); + hasher.update(if (ty.isSimpleTuple()) + std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable + else + ty.structFieldName(field_i)); + autoHash(hasher, ty.structFieldAlign(field_i, target)); + } + }, + + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => self.updateHasherRecurse(hasher, ty, .forward), + + .function, + .varargs_function, + => { + const info = ty.fnInfo(); + const recurse_kind: Kind = switch (self.kind) { + .forward => .forward, + .complete, .parameter, .global => unreachable, + }; + + self.updateHasherRecurse(hasher, info.return_type, recurse_kind); + for (info.param_types, 0..) |param_ty, param_i| { + if (info.paramIsComptime(param_i)) continue; + self.updateHasherRecurse(hasher, param_ty, recurse_kind); + } + }, + + else => unreachable, + } + }, + } + } + }; + + pub const TypeAdapter32 = struct { + kind: Kind, + lookup: Convert.Lookup, + convert: *const Convert, + + fn to64(self: @This()) TypeAdapter64 { + return .{ .kind = self.kind, .lookup = self.lookup, .convert = self.convert }; + } + + pub fn eql(self: @This(), ty: Type, cty: CType, cty_index: usize) bool { + _ = cty_index; + return self.to64().eql(ty, cty); + } + + pub fn hash(self: @This(), ty: Type) u32 { + return @truncate(u32, self.to64().hash(ty)); + } + }; +}; -- cgit v1.2.3 From 7768d2024bfbb4aad143fb8a4143e324445bfd93 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 17 Feb 2023 05:00:17 -0500 Subject: CBE: use CType for type rendering --- src/codegen/c.zig | 574 +++++++++++++++++++++++++++--------------------------- 1 file changed, 287 insertions(+), 287 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 872d5fd344..aa540d6984 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1954,291 +1954,311 @@ pub const DeclGen = struct { return name; } + fn indexToCType(dg: *DeclGen, idx: CType.Index) CType { + return dg.ctypes.indexToCType(idx); + } fn typeToCType(dg: *DeclGen, ty: Type) !CType { return dg.ctypes.typeToCType(dg.gpa, ty, dg.module); } - - /// Renders a type as a single identifier, generating intermediate typedefs - /// if necessary. - /// - /// This is guaranteed to be valid in both typedefs and declarations/definitions. - /// - /// There are three type formats in total that we support rendering: - /// | Function | Example 1 (*u8) | Example 2 ([10]*u8) | - /// |---------------------|-----------------|---------------------| - /// | `renderTypecast` | "uint8_t *" | "uint8_t *[10]" | - /// | `renderTypeAndName` | "uint8_t *name" | "uint8_t *name[10]" | - /// | `renderType` | "uint8_t *" | "zig_A_uint8_t_10" | - /// - fn renderType( + fn typeToIndex(dg: *DeclGen, ty: Type) !CType.Index { + return dg.ctypes.typeToIndex(dg.gpa, ty, dg.module); + } + + const CTypeFix = enum { prefix, suffix }; + const CQualifiers = std.enums.EnumSet(enum { @"const", @"volatile", restrict }); + const CTypeRenderTrailing = enum { + no_space, + maybe_space, + + pub fn format( + self: @This(), + comptime fmt: []const u8, + _: std.fmt.FormatOptions, + w: anytype, + ) @TypeOf(w).Error!void { + if (fmt.len != 0) + @compileError("invalid format string '" ++ fmt ++ "' for type '" ++ + @typeName(@This()) ++ "'"); + comptime assert(fmt.len == 0); + switch (self) { + .no_space => {}, + .maybe_space => try w.writeByte(' '), + } + } + }; + fn renderTypePrefix( dg: *DeclGen, w: anytype, - t: Type, - kind: TypedefKind, - ) error{ OutOfMemory, AnalysisFail }!void { - _ = try dg.typeToCType(t); - - const target = dg.module.getTarget(); - - switch (t.zigTypeTag()) { - .Void => try w.writeAll("void"), - .Bool => try w.writeAll("bool"), - .NoReturn, .Float => { - try w.writeAll("zig_"); - try t.print(w, dg.module); - }, - .Int => { - if (t.isNamedInt()) { - try w.writeAll("zig_"); - try t.print(w, dg.module); - } else { - return renderTypeUnnamed(dg, w, t, kind); - } - }, - .ErrorSet => { - return renderTypeUnnamed(dg, w, t, kind); + idx: CType.Index, + parent_fix: CTypeFix, + qualifiers: CQualifiers, + ) @TypeOf(w).Error!CTypeRenderTrailing { + var trailing = CTypeRenderTrailing.maybe_space; + + const cty = dg.indexToCType(idx); + switch (cty.tag()) { + .void, + .char, + .@"signed char", + .short, + .int, + .long, + .@"long long", + ._Bool, + .@"unsigned char", + .@"unsigned short", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .float, + .double, + .@"long double", + .bool, + .size_t, + .ptrdiff_t, + .zig_u8, + .zig_i8, + .zig_u16, + .zig_i16, + .zig_u32, + .zig_i32, + .zig_u64, + .zig_i64, + .zig_u128, + .zig_i128, + .zig_f16, + .zig_f32, + .zig_f64, + .zig_f80, + .zig_f128, + => |tag| try w.writeAll(@tagName(tag)), + + .pointer, + .pointer_const, + .pointer_volatile, + .pointer_const_volatile, + => |tag| { + const child_idx = cty.cast(CType.Payload.Child).?.data; + try w.print("{}*", .{try dg.renderTypePrefix(w, child_idx, .prefix, CQualifiers.init(.{ + .@"const" = switch (tag) { + .pointer, .pointer_volatile => false, + .pointer_const, .pointer_const_volatile => true, + else => unreachable, + }, + .@"volatile" = switch (tag) { + .pointer, .pointer_const => false, + .pointer_volatile, .pointer_const_volatile => true, + else => unreachable, + }, + }))}); + trailing = .no_space; }, - .Pointer => { - const ptr_info = t.ptrInfo().data; - if (ptr_info.size == .Slice) { - var slice_pl = Type.Payload.ElemType{ - .base = .{ .tag = if (t.ptrIsMutable()) .mut_slice else .const_slice }, - .data = ptr_info.pointee_type, - }; - const slice_ty = Type.initPayload(&slice_pl.base); - - const name = dg.getTypedefName(slice_ty) orelse - try dg.renderSliceTypedef(slice_ty); - return w.writeAll(name); - } - - if (ptr_info.pointee_type.zigTypeTag() == .Fn) { - const name = dg.getTypedefName(ptr_info.pointee_type) orelse - try dg.renderPtrToFnTypedef(ptr_info.pointee_type); - - return w.writeAll(name); + .array, + .vector, + => { + const child_idx = cty.cast(CType.Payload.Sequence).?.data.elem_type; + const child_trailing = try dg.renderTypePrefix(w, child_idx, .suffix, qualifiers); + switch (parent_fix) { + .prefix => { + try w.print("{}(", .{child_trailing}); + return .no_space; + }, + .suffix => return child_trailing, } - - if (ptr_info.host_size != 0) { - var host_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = ptr_info.host_size * 8, - }; - const host_ty = Type.initPayload(&host_pl.base); - - try dg.renderType(w, host_ty, .Forward); - } else if (t.isCPtr() and ptr_info.pointee_type.eql(Type.u8, dg.module) and - (dg.decl.val.tag() == .extern_fn or - std.mem.eql(u8, std.mem.span(dg.decl.name), "main"))) - { - // This is a hack, since the c compiler expects a lot of external - // library functions to have char pointers in their signatures, but - // u8 and i8 produce unsigned char and signed char respectively, - // which in C are (not very usefully) different than char. - try w.writeAll("char"); - } else try dg.renderType(w, switch (ptr_info.pointee_type.tag()) { - .anyopaque => Type.void, - else => ptr_info.pointee_type, - }, .Forward); - if (t.isConstPtr()) try w.writeAll(" const"); - if (t.isVolatilePtr()) try w.writeAll(" volatile"); - return w.writeAll(" *"); - }, - .Array, .Vector => { - var array_pl = Type.Payload.Array{ .base = .{ .tag = .array }, .data = .{ - .len = t.arrayLenIncludingSentinel(), - .elem_type = t.childType(), - } }; - const array_ty = Type.initPayload(&array_pl.base); - - const name = dg.getTypedefName(array_ty) orelse - try dg.renderArrayTypedef(array_ty); - - return w.writeAll(name); }, - .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const child_ty = t.optionalChild(&opt_buf); - - if (!child_ty.hasRuntimeBitsIgnoreComptime()) - return dg.renderType(w, Type.bool, kind); - - if (t.optionalReprIsPayload()) - return dg.renderType(w, child_ty, kind); - switch (kind) { - .Complete => { - const name = dg.getTypedefName(t) orelse - try dg.renderOptionalTypedef(t); - - try w.writeAll(name); - }, - .Forward => { - var ptr_pl = Type.Payload.ElemType{ - .base = .{ .tag = .single_const_pointer }, - .data = t, - }; - const ptr_ty = Type.initPayload(&ptr_pl.base); - - const name = dg.getTypedefName(ptr_ty) orelse - try dg.renderFwdTypedef(ptr_ty); + .fwd_struct, + .fwd_union, + .anon_struct, + .packed_anon_struct, + => |tag| try w.print("{s} {}__{d}", .{ + switch (tag) { + .fwd_struct, + .anon_struct, + .packed_anon_struct, + => "struct", + .fwd_union => "union", + else => unreachable, + }, + fmtIdent(switch (tag) { + .fwd_struct, + .fwd_union, + => mem.span(dg.module.declPtr(cty.cast(CType.Payload.FwdDecl).?.data).name), + .anon_struct, + .packed_anon_struct, + => "anon", + else => unreachable, + }), + idx, + }), - try w.writeAll(name); + .@"struct", + .packed_struct, + .@"union", + .packed_union, + => return dg.renderTypePrefix( + w, + cty.cast(CType.Payload.Aggregate).?.data.fwd_decl, + parent_fix, + qualifiers, + ), + + .function, + .varargs_function, + => { + const child_trailing = try dg.renderTypePrefix( + w, + cty.cast(CType.Payload.Function).?.data.return_type, + .suffix, + CQualifiers.initEmpty(), + ); + switch (parent_fix) { + .prefix => { + try w.print("{}(", .{child_trailing}); + return .no_space; }, + .suffix => return child_trailing, } }, - .ErrorUnion => { - const payload_ty = t.errorUnionPayload(); + } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) - return dg.renderType(w, Type.anyerror, kind); + var qualifier_it = qualifiers.iterator(); + while (qualifier_it.next()) |qualifier| { + try w.print("{}{s}", .{ trailing, @tagName(qualifier) }); + trailing = .maybe_space; + } - var error_union_pl = Type.Payload.ErrorUnion{ - .data = .{ .error_set = Type.anyerror, .payload = payload_ty }, - }; - const error_union_ty = Type.initPayload(&error_union_pl.base); + return trailing; + } + fn renderTypeSuffix( + dg: *DeclGen, + w: anytype, + idx: CType.Index, + parent_fix: CTypeFix, + ) @TypeOf(w).Error!void { + const cty = dg.indexToCType(idx); + switch (cty.tag()) { + .void, + .char, + .@"signed char", + .short, + .int, + .long, + .@"long long", + ._Bool, + .@"unsigned char", + .@"unsigned short", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .float, + .double, + .@"long double", + .bool, + .size_t, + .ptrdiff_t, + .zig_u8, + .zig_i8, + .zig_u16, + .zig_i16, + .zig_u32, + .zig_i32, + .zig_u64, + .zig_i64, + .zig_u128, + .zig_i128, + .zig_f16, + .zig_f32, + .zig_f64, + .zig_f80, + .zig_f128, + => {}, + + .pointer, + .pointer_const, + .pointer_volatile, + .pointer_const_volatile, + => try dg.renderTypeSuffix(w, cty.cast(CType.Payload.Child).?.data, .prefix), + + .array, + .vector, + => { + switch (parent_fix) { + .prefix => try w.writeByte(')'), + .suffix => {}, + } - switch (kind) { - .Complete => { - const name = dg.getTypedefName(error_union_ty) orelse - try dg.renderErrorUnionTypedef(error_union_ty); + try w.print("[{}]", .{cty.cast(CType.Payload.Sequence).?.data.len}); + try dg.renderTypeSuffix(w, cty.cast(CType.Payload.Sequence).?.data.elem_type, .suffix); + }, - try w.writeAll(name); - }, - .Forward => { - var ptr_pl = Type.Payload.ElemType{ - .base = .{ .tag = .single_const_pointer }, - .data = error_union_ty, - }; - const ptr_ty = Type.initPayload(&ptr_pl.base); + .fwd_struct, + .fwd_union, + .anon_struct, + .packed_anon_struct, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => {}, + + .function, + .varargs_function, + => |tag| { + switch (parent_fix) { + .prefix => try w.writeByte(')'), + .suffix => {}, + } - const name = dg.getTypedefName(ptr_ty) orelse - try dg.renderFwdTypedef(ptr_ty); + const data = cty.cast(CType.Payload.Function).?.data; - try w.writeAll(name); - }, + try w.writeByte('('); + var need_comma = false; + for (data.param_types) |param_type| { + if (need_comma) try w.writeAll(", "); + need_comma = true; + _ = try dg.renderTypePrefix(w, param_type, .suffix, CQualifiers.initEmpty()); + try dg.renderTypeSuffix(w, param_type, .suffix); } - }, - .Struct, .Union => |tag| if (t.containerLayout() == .Packed) { - if (t.castTag(.@"struct")) |struct_obj| { - try dg.renderType(w, struct_obj.data.backing_int_ty, kind); - } else { - var buf: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, t.bitSize(target)), - }; - try dg.renderType(w, Type.initPayload(&buf.base), kind); + switch (tag) { + .function => {}, + .varargs_function => { + if (need_comma) try w.writeAll(", "); + need_comma = true; + try w.writeAll("..."); + }, + else => unreachable, } - } else if (t.isSimpleTupleOrAnonStruct()) { - const ExpectedContents = struct { types: [8]Type, values: [8]Value }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), dg.gpa); - const allocator = stack.get(); - - var tuple_storage = std.MultiArrayList(struct { type: Type, value: Value }){}; - defer tuple_storage.deinit(allocator); - try tuple_storage.ensureTotalCapacity(allocator, t.structFieldCount()); - - const fields = t.tupleFields(); - for (fields.values, 0..) |value, index| - if (value.tag() == .unreachable_value) - tuple_storage.appendAssumeCapacity(.{ - .type = fields.types[index], - .value = value, - }); - - const tuple_slice = tuple_storage.slice(); - var tuple_pl = Type.Payload.Tuple{ .data = .{ - .types = tuple_slice.items(.type), - .values = tuple_slice.items(.value), - } }; - const tuple_ty = Type.initPayload(&tuple_pl.base); - - const name = dg.getTypedefName(tuple_ty) orelse - try dg.renderTupleTypedef(tuple_ty); - - try w.writeAll(name); - } else switch (kind) { - .Complete => { - const name = dg.getTypedefName(t) orelse switch (tag) { - .Struct => try dg.renderStructTypedef(t), - .Union => try dg.renderUnionTypedef(t), - else => unreachable, - }; - - try w.writeAll(name); - }, - .Forward => { - var ptr_pl = Type.Payload.ElemType{ - .base = .{ .tag = .single_const_pointer }, - .data = t, - }; - const ptr_ty = Type.initPayload(&ptr_pl.base); - - const name = dg.getTypedefName(ptr_ty) orelse - try dg.renderFwdTypedef(ptr_ty); - - try w.writeAll(name); - }, - }, - .Enum => { - // For enums, we simply use the integer tag type. - var int_tag_buf: Type.Payload.Bits = undefined; - const int_tag_ty = t.intTagType(&int_tag_buf); + if (!need_comma) try w.writeAll("void"); + try w.writeByte(')'); - try dg.renderType(w, int_tag_ty, kind); + try dg.renderTypeSuffix(w, data.return_type, .suffix); }, - .Opaque => switch (t.tag()) { - .@"opaque" => { - const name = dg.getTypedefName(t) orelse - try dg.renderOpaqueTypedef(t); - - try w.writeAll(name); - }, - else => unreachable, - }, - - .Frame, - .AnyFrame, - => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{ - @tagName(tag), - }), - - .Fn => unreachable, // This is a function body, not a function pointer. - - .Null, - .Undefined, - .EnumLiteral, - .ComptimeFloat, - .ComptimeInt, - .Type, - => unreachable, // must be const or comptime } } - fn renderTypeUnnamed( + /// Renders a type as a single identifier, generating intermediate typedefs + /// if necessary. + /// + /// This is guaranteed to be valid in both typedefs and declarations/definitions. + /// + /// There are three type formats in total that we support rendering: + /// | Function | Example 1 (*u8) | Example 2 ([10]*u8) | + /// |---------------------|-----------------|---------------------| + /// | `renderTypecast` | "uint8_t *" | "uint8_t *[10]" | + /// | `renderTypeAndName` | "uint8_t *name" | "uint8_t *name[10]" | + /// | `renderType` | "uint8_t *" | "uint8_t *[10]" | + /// + fn renderType( dg: *DeclGen, w: anytype, t: Type, - kind: TypedefKind, + _: TypedefKind, ) error{ OutOfMemory, AnalysisFail }!void { - const target = dg.module.getTarget(); - const int_info = t.intInfo(target); - if (toCIntBits(int_info.bits)) |c_bits| - return w.print("zig_{c}{d}", .{ signAbbrev(int_info.signedness), c_bits }) - else if (loweredArrayInfo(t, target)) |array_info| { - assert(array_info.sentinel == null); - var array_pl = Type.Payload.Array{ - .base = .{ .tag = .array }, - .data = .{ .len = array_info.len, .elem_type = array_info.elem_type }, - }; - const array_ty = Type.initPayload(&array_pl.base); - - return dg.renderType(w, array_ty, kind); - } else return dg.fail("C backend: Unable to lower unnamed integer type {}", .{ - t.fmt(dg.module), - }); + const idx = try dg.typeToIndex(t); + _ = try dg.renderTypePrefix(w, idx, .suffix, CQualifiers.initEmpty()); + try dg.renderTypeSuffix(w, idx, .suffix); } const IntCastContext = union(enum) { @@ -2348,10 +2368,10 @@ pub const DeclGen = struct { /// |---------------------|-----------------|---------------------| /// | `renderTypecast` | "uint8_t *" | "uint8_t *[10]" | /// | `renderTypeAndName` | "uint8_t *name" | "uint8_t *name[10]" | - /// | `renderType` | "uint8_t *" | "zig_A_uint8_t_10" | + /// | `renderType` | "uint8_t *" | "uint8_t *[10]" | /// fn renderTypecast(dg: *DeclGen, w: anytype, ty: Type) error{ OutOfMemory, AnalysisFail }!void { - return renderTypeAndName(dg, w, ty, .{ .bytes = "" }, .Mut, 0, .Complete); + try dg.renderType(w, ty, undefined); } /// Renders a type and name in field declaration/definition format. @@ -2361,7 +2381,7 @@ pub const DeclGen = struct { /// |---------------------|-----------------|---------------------| /// | `renderTypecast` | "uint8_t *" | "uint8_t *[10]" | /// | `renderTypeAndName` | "uint8_t *name" | "uint8_t *name[10]" | - /// | `renderType` | "uint8_t *" | "zig_A_uint8_t_10" | + /// | `renderType` | "uint8_t *" | "uint8_t *[10]" | /// fn renderTypeAndName( dg: *DeclGen, @@ -2370,46 +2390,26 @@ pub const DeclGen = struct { name: CValue, mutability: Mutability, alignment: u32, - kind: TypedefKind, + _: TypedefKind, ) error{ OutOfMemory, AnalysisFail }!void { - var suffix = std.ArrayList(u8).init(dg.gpa); - defer suffix.deinit(); - const suffix_writer = suffix.writer(); - - // Any top-level array types are rendered here as a suffix, which - // avoids creating typedefs for every array type - const target = dg.module.getTarget(); - var render_ty = ty; - var depth: u32 = 0; - while (loweredArrayInfo(render_ty, target)) |array_info| { - const c_len = array_info.len + @boolToInt(array_info.sentinel != null); - var c_len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = c_len }; - const c_len_val = Value.initPayload(&c_len_pl.base); - - try suffix_writer.writeByte('['); - if (mutability == .ConstArgument and depth == 0) try suffix_writer.writeAll("zig_const_arr "); - try suffix.writer().print("{}]", .{try dg.fmtIntLiteral(Type.usize, c_len_val)}); - render_ty = array_info.elem_type; - depth += 1; - } - if (alignment != 0) { - const abi_alignment = ty.abiAlignment(target); + const abi_alignment = ty.abiAlignment(dg.module.getTarget()); if (alignment < abi_alignment) { try w.print("zig_under_align({}) ", .{alignment}); } else if (alignment > abi_alignment) { try w.print("zig_align({}) ", .{alignment}); } } - try dg.renderType(w, render_ty, kind); - const const_prefix = switch (mutability) { - .Const, .ConstArgument => "const ", - .Mut => "", - }; - try w.print(" {s}", .{const_prefix}); + const idx = try dg.typeToIndex(ty); + try w.print("{}", .{try dg.renderTypePrefix(w, idx, .suffix, CQualifiers.init(.{ + .@"const" = switch (mutability) { + .Const, .ConstArgument => true, + .Mut => false, + }, + }))}); try dg.writeCValue(w, name); - try w.writeAll(suffix.items); + try dg.renderTypeSuffix(w, idx, .suffix); } fn renderTagNameFn(dg: *DeclGen, enum_ty: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { -- cgit v1.2.3 From d513792afa4893c21d5a9635c61d8e41689d9541 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 17 Feb 2023 05:33:47 -0500 Subject: CBE: fix comptime checks --- src/codegen/c/type.zig | 80 ++++++++++++++++++++++++++------------------------ 1 file changed, 41 insertions(+), 39 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index c9aca79458..71132b5a97 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -817,8 +817,10 @@ pub const CType = extern union { .Struct, .Union => |zig_tag| if (ty.isTupleOrAnonStruct()) { if (lookup.isMutable()) { for (0..ty.structFieldCount()) |field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - _ = try lookup.typeToIndex(ty.structFieldType(field_i), switch (kind) { + const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i) or + !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + _ = try lookup.typeToIndex(field_ty, switch (kind) { .forward, .complete, .parameter => .complete, .global => .global, }); @@ -842,16 +844,13 @@ pub const CType = extern union { .Union => ty.cast(Type.Payload.Union).?.data.fields.count(), else => unreachable, }) |field_i| { - if (zig_tag == .Struct and ty.structFieldIsComptime(field_i)) - continue; - _ = try lookup.typeToIndex( - ty.structFieldType(field_i), - switch (kind) { - .forward => unreachable, - .complete, .parameter => .complete, - .global => .global, - }, - ); + const field_ty = ty.structFieldType(field_i); + if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + _ = try lookup.typeToIndex(field_ty, switch (kind) { + .forward => unreachable, + .complete, .parameter => .complete, + .global => .global, + }); } _ = try lookup.typeToIndex(ty, .forward); } @@ -953,9 +952,9 @@ pub const CType = extern union { .forward => .forward, .complete, .parameter, .global => .complete, }); - for (info.param_types, 0..) |param_ty, param_i| { - if (info.paramIsComptime(param_i)) continue; - _ = try lookup.typeToIndex(param_ty, switch (kind) { + for (info.param_types) |param_type| { + if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + _ = try lookup.typeToIndex(param_type, switch (kind) { .forward => .forward, .complete, .parameter, .global => unreachable, }); @@ -1118,28 +1117,28 @@ pub const CType = extern union { var c_fields_len: usize = 0; for (0..fields_len) |field_i| { - if (ty.structFieldIsComptime(field_i)) continue; + const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i) or + !field_ty.hasRuntimeBitsIgnoreComptime()) continue; c_fields_len += 1; } const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len); var c_field_i: usize = 0; for (0..fields_len) |field_i| { - if (ty.structFieldIsComptime(field_i)) continue; + const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i) or + !field_ty.hasRuntimeBitsIgnoreComptime()) continue; fields_pl[c_field_i] = .{ .name = try if (ty.isSimpleTuple()) std.fmt.allocPrintZ(arena, "f{}", .{field_i}) else arena.dupeZ(u8, ty.structFieldName(field_i)), - .type = store.set.typeToIndex( - ty.structFieldType(field_i), - target, - switch (kind) { - .forward, .complete, .parameter => .complete, - .global => .global, - }, - ).?, + .type = store.set.typeToIndex(field_ty, target, switch (kind) { + .forward, .complete, .parameter => .complete, + .global => .global, + }).?, .alignas = ty.structFieldAlign(field_i, target), }; c_field_i += 1; @@ -1211,16 +1210,16 @@ pub const CType = extern union { }; var c_params_len: usize = 0; - for (0..info.param_types.len) |param_i| { - if (info.paramIsComptime(param_i)) continue; + for (info.param_types) |param_type| { + if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; c_params_len += 1; } const params_pl = try arena.alloc(Index, c_params_len); var c_param_i: usize = 0; - for (info.param_types, 0..) |param_ty, param_i| { - if (info.paramIsComptime(param_i)) continue; - params_pl[c_param_i] = store.set.typeToIndex(param_ty, target, recurse_kind).?; + for (info.param_types) |param_type| { + if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + params_pl[c_param_i] = store.set.typeToIndex(param_type, target, recurse_kind).?; c_param_i += 1; } @@ -1294,7 +1293,9 @@ pub const CType = extern union { var c_field_i: usize = 0; for (0..ty.structFieldCount()) |field_i| { - if (ty.structFieldIsComptime(field_i)) continue; + const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i) or + !field_ty.hasRuntimeBitsIgnoreComptime()) continue; const c_field = &c_fields[c_field_i]; c_field_i += 1; @@ -1344,10 +1345,9 @@ pub const CType = extern union { if (info.param_types.len != data.param_types.len or !self.eqlRecurse(info.return_type, data.return_type, recurse_kind)) return false; - for (info.param_types, data.param_types, 0..) |param_ty, param_cty, param_i| { - if (info.paramIsComptime(param_i)) continue; - if (!self.eqlRecurse(param_ty, param_cty, recurse_kind)) - return false; + for (info.param_types, data.param_types) |param_ty, param_cty| { + if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!self.eqlRecurse(param_ty, param_cty, recurse_kind)) return false; } return true; }, @@ -1389,7 +1389,9 @@ pub const CType = extern union { std.fmt.count("f{}", .{std.math.maxInt(usize)}) ]u8 = undefined; for (0..ty.structFieldCount()) |field_i| { - if (ty.structFieldIsComptime(field_i)) continue; + const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i) or + !field_ty.hasRuntimeBitsIgnoreComptime()) continue; self.updateHasherRecurse( hasher, @@ -1423,9 +1425,9 @@ pub const CType = extern union { }; self.updateHasherRecurse(hasher, info.return_type, recurse_kind); - for (info.param_types, 0..) |param_ty, param_i| { - if (info.paramIsComptime(param_i)) continue; - self.updateHasherRecurse(hasher, param_ty, recurse_kind); + for (info.param_types) |param_type| { + if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + self.updateHasherRecurse(hasher, param_type, recurse_kind); } }, -- cgit v1.2.3 From 3eed197c95c21d850d503687f445946e6bd429c5 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 20 Feb 2023 20:52:26 -0500 Subject: CBE: use stdint.h types instead of `zig_` prefixes This requires manual defines before C99 which may not have stdint.h. Also have update-zig1 leave a copy of lib/zig.h in stage1/zig.h, which allows lib/zig.h to be updated without needing to update zig1.wasm. Note that since the object already existed with the exact same contents, this completely avoids repo bloat due to zig.h changes. --- CMakeLists.txt | 2 +- build.zig | 31 + lib/zig.h | 1440 +++++++++++++++------------- src/codegen/c.zig | 69 +- src/codegen/c/type.zig | 119 +-- stage1/zig.h | 2486 ++++++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 3396 insertions(+), 751 deletions(-) create mode 100644 stage1/zig.h (limited to 'src/codegen') diff --git a/CMakeLists.txt b/CMakeLists.txt index b31a0f596f..3fb011e493 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -783,7 +783,7 @@ set_target_properties(zig2 PROPERTIES COMPILE_FLAGS ${ZIG2_COMPILE_FLAGS} LINK_FLAGS ${ZIG2_LINK_FLAGS} ) -target_include_directories(zig2 PUBLIC "${CMAKE_SOURCE_DIR}/lib") +target_include_directories(zig2 PUBLIC "${CMAKE_SOURCE_DIR}/stage1") target_link_libraries(zig2 LINK_PUBLIC zigcpp) if(MSVC) diff --git a/build.zig b/build.zig index faf14cc405..175beeb422 100644 --- a/build.zig +++ b/build.zig @@ -506,8 +506,39 @@ fn addWasiUpdateStep(b: *std.Build, version: [:0]const u8) !void { run_opt.addArg("-o"); run_opt.addFileSourceArg(.{ .path = "stage1/zig1.wasm" }); + const CopyFileStep = struct { + const Step = std.Build.Step; + const FileSource = std.Build.FileSource; + const CopyFileStep = @This(); + + step: Step, + builder: *std.Build, + source: FileSource, + dest_rel_path: []const u8, + + pub fn init(builder: *std.Build, source: FileSource, dest_rel_path: []const u8) CopyFileStep { + return CopyFileStep{ + .builder = builder, + .step = Step.init(.custom, builder.fmt("install {s} to {s}", .{ source.getDisplayName(), dest_rel_path }), builder.allocator, make), + .source = source.dupe(builder), + .dest_rel_path = builder.dupePath(dest_rel_path), + }; + } + + fn make(step: *Step) !void { + const self = @fieldParentPtr(CopyFileStep, "step", step); + const full_src_path = self.source.getPath(self.builder); + const full_dest_path = self.builder.pathFromRoot(self.dest_rel_path); + try self.builder.updateFile(full_src_path, full_dest_path); + } + }; + + const copy_zig_h = try b.allocator.create(CopyFileStep); + copy_zig_h.* = CopyFileStep.init(b, .{ .path = "lib/zig.h" }, "stage1/zig.h"); + const update_zig1_step = b.step("update-zig1", "Update stage1/zig1.wasm"); update_zig1_step.dependOn(&run_opt.step); + update_zig1_step.dependOn(©_zig_h.step); } fn addCompilerStep( diff --git a/lib/zig.h b/lib/zig.h index 0756d9f731..5929656985 100644 --- a/lib/zig.h +++ b/lib/zig.h @@ -1,6 +1,8 @@ #undef linux +#ifndef __STDC_WANT_IEC_60559_TYPES_EXT__ #define __STDC_WANT_IEC_60559_TYPES_EXT__ +#endif #include #include #include @@ -297,690 +299,791 @@ typedef char bool; #define zig_bitSizeOf(T) (CHAR_BIT * sizeof(T)) -typedef uintptr_t zig_usize; -typedef intptr_t zig_isize; -typedef signed short int zig_c_short; -typedef unsigned short int zig_c_ushort; -typedef signed int zig_c_int; -typedef unsigned int zig_c_uint; -typedef signed long int zig_c_long; -typedef unsigned long int zig_c_ulong; -typedef signed long long int zig_c_longlong; -typedef unsigned long long int zig_c_ulonglong; - -typedef uint8_t zig_u8; -typedef int8_t zig_i8; -typedef uint16_t zig_u16; -typedef int16_t zig_i16; -typedef uint32_t zig_u32; -typedef int32_t zig_i32; -typedef uint64_t zig_u64; -typedef int64_t zig_i64; - -#define zig_as_u8(val) UINT8_C(val) -#define zig_as_i8(val) INT8_C(val) -#define zig_as_u16(val) UINT16_C(val) -#define zig_as_i16(val) INT16_C(val) -#define zig_as_u32(val) UINT32_C(val) -#define zig_as_i32(val) INT32_C(val) -#define zig_as_u64(val) UINT64_C(val) -#define zig_as_i64(val) INT64_C(val) - -#define zig_minInt_u8 zig_as_u8(0) -#define zig_maxInt_u8 UINT8_MAX +#define zig_compiler_rt_abbrev_uint32_t si +#define zig_compiler_rt_abbrev_int32_t si +#define zig_compiler_rt_abbrev_uint64_t di +#define zig_compiler_rt_abbrev_int64_t di +#define zig_compiler_rt_abbrev_zig_u128 ti +#define zig_compiler_rt_abbrev_zig_i128 ti +#define zig_compiler_rt_abbrev_zig_f16 hf +#define zig_compiler_rt_abbrev_zig_f32 sf +#define zig_compiler_rt_abbrev_zig_f64 df +#define zig_compiler_rt_abbrev_zig_f80 xf +#define zig_compiler_rt_abbrev_zig_f128 tf + +zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, size_t); +zig_extern void *memset (void *, int, size_t); + +/* ===================== 8/16/32/64-bit Integer Support ===================== */ + +#if __STDC_VERSION__ >= 199901L +#include +#else + +#if SCHAR_MIN == ~0x7F && SCHAR_MAX == 0x7F && UCHAR_MAX == 0xFF +typedef unsigned char uint8_t; +typedef signed char int8_t; +#define INT8_C(c) c +#define UINT8_C(c) c##U +#elif SHRT_MIN == ~0x7F && SHRT_MAX == 0x7F && USHRT_MAX == 0xFF +typedef unsigned short uint8_t; +typedef signed short int8_t; +#define INT8_C(c) c +#define UINT8_C(c) c##U +#elif INT_MIN == ~0x7F && INT_MAX == 0x7F && UINT_MAX == 0xFF +typedef unsigned int uint8_t; +typedef signed int int8_t; +#define INT8_C(c) c +#define UINT8_C(c) c##U +#elif LONG_MIN == ~0x7F && LONG_MAX == 0x7F && ULONG_MAX == 0xFF +typedef unsigned long uint8_t; +typedef signed long int8_t; +#define INT8_C(c) c##L +#define UINT8_C(c) c##LU +#elif LLONG_MIN == ~0x7F && LLONG_MAX == 0x7F && ULLONG_MAX == 0xFF +typedef unsigned long long uint8_t; +typedef signed long long int8_t; +#define INT8_C(c) c##LL +#define UINT8_C(c) c##LLU +#endif +#define INT8_MIN (~INT8_C(0x7F)) +#define INT8_MAX ( INT8_C(0x7F)) +#define UINT8_MAX ( INT8_C(0xFF)) + +#if SCHAR_MIN == ~0x7FFF && SCHAR_MAX == 0x7FFF && UCHAR_MAX == 0xFFFF +typedef unsigned char uint16_t; +typedef signed char int16_t; +#define INT16_C(c) c +#define UINT16_C(c) c##U +#elif SHRT_MIN == ~0x7FFF && SHRT_MAX == 0x7FFF && USHRT_MAX == 0xFFFF +typedef unsigned short uint16_t; +typedef signed short int16_t; +#define INT16_C(c) c +#define UINT16_C(c) c##U +#elif INT_MIN == ~0x7FFF && INT_MAX == 0x7FFF && UINT_MAX == 0xFFFF +typedef unsigned int uint16_t; +typedef signed int int16_t; +#define INT16_C(c) c +#define UINT16_C(c) c##U +#elif LONG_MIN == ~0x7FFF && LONG_MAX == 0x7FFF && ULONG_MAX == 0xFFFF +typedef unsigned long uint16_t; +typedef signed long int16_t; +#define INT16_C(c) c##L +#define UINT16_C(c) c##LU +#elif LLONG_MIN == ~0x7FFF && LLONG_MAX == 0x7FFF && ULLONG_MAX == 0xFFFF +typedef unsigned long long uint16_t; +typedef signed long long int16_t; +#define INT16_C(c) c##LL +#define UINT16_C(c) c##LLU +#endif +#define INT16_MIN (~INT16_C(0x7FFF)) +#define INT16_MAX ( INT16_C(0x7FFF)) +#define UINT16_MAX ( INT16_C(0xFFFF)) + +#if SCHAR_MIN == ~0x7FFFFFFF && SCHAR_MAX == 0x7FFFFFFF && UCHAR_MAX == 0xFFFFFFFF +typedef unsigned char uint32_t; +typedef signed char int32_t; +#define INT32_C(c) c +#define UINT32_C(c) c##U +#elif SHRT_MIN == ~0x7FFFFFFF && SHRT_MAX == 0x7FFFFFFF && USHRT_MAX == 0xFFFFFFFF +typedef unsigned short uint32_t; +typedef signed short int32_t; +#define INT32_C(c) c +#define UINT32_C(c) c##U +#elif INT_MIN == ~0x7FFFFFFF && INT_MAX == 0x7FFFFFFF && UINT_MAX == 0xFFFFFFFF +typedef unsigned int uint32_t; +typedef signed int int32_t; +#define INT32_C(c) c +#define UINT32_C(c) c##U +#elif LONG_MIN == ~0x7FFFFFFF && LONG_MAX == 0x7FFFFFFF && ULONG_MAX == 0xFFFFFFFF +typedef unsigned long uint32_t; +typedef signed long int32_t; +#define INT32_C(c) c##L +#define UINT32_C(c) c##LU +#elif LLONG_MIN == ~0x7FFFFFFF && LLONG_MAX == 0x7FFFFFFF && ULLONG_MAX == 0xFFFFFFFF +typedef unsigned long long uint32_t; +typedef signed long long int32_t; +#define INT32_C(c) c##LL +#define UINT32_C(c) c##LLU +#endif +#define INT32_MIN (~INT32_C(0x7FFFFFFF)) +#define INT32_MAX ( INT32_C(0x7FFFFFFF)) +#define UINT32_MAX ( INT32_C(0xFFFFFFFF)) + +#if SCHAR_MIN == ~0x7FFFFFFFFFFFFFFF && SCHAR_MAX == 0x7FFFFFFFFFFFFFFF && UCHAR_MAX == 0xFFFFFFFFFFFFFFFF +typedef unsigned char uint64_t; +typedef signed char int64_t; +#define INT64_C(c) c +#define UINT64_C(c) c##U +#elif SHRT_MIN == ~0x7FFFFFFFFFFFFFFF && SHRT_MAX == 0x7FFFFFFFFFFFFFFF && USHRT_MAX == 0xFFFFFFFFFFFFFFFF +typedef unsigned short uint64_t; +typedef signed short int64_t; +#define INT64_C(c) c +#define UINT64_C(c) c##U +#elif INT_MIN == ~0x7FFFFFFFFFFFFFFF && INT_MAX == 0x7FFFFFFFFFFFFFFF && UINT_MAX == 0xFFFFFFFFFFFFFFFF +typedef unsigned int uint64_t; +typedef signed int int64_t; +#define INT64_C(c) c +#define UINT64_C(c) c##U +#elif LONG_MIN == ~0x7FFFFFFFFFFFFFFF && LONG_MAX == 0x7FFFFFFFFFFFFFFF && ULONG_MAX == 0xFFFFFFFFFFFFFFFF +typedef unsigned long uint64_t; +typedef signed long int64_t; +#define INT64_C(c) c##L +#define UINT64_C(c) c##LU +#elif LLONG_MIN == ~0x7FFFFFFFFFFFFFFF && LLONG_MAX == 0x7FFFFFFFFFFFFFFF && ULLONG_MAX == 0xFFFFFFFFFFFFFFFF +typedef unsigned long long uint64_t; +typedef signed long long int64_t; +#define INT64_C(c) c##LL +#define UINT64_C(c) c##LLU +#endif +#define INT64_MIN (~INT64_C(0x7FFFFFFFFFFFFFFF)) +#define INT64_MAX ( INT64_C(0x7FFFFFFFFFFFFFFF)) +#define UINT64_MAX ( INT64_C(0xFFFFFFFFFFFFFFFF)) + +typedef size_t uintptr_t; +typedef ptrdiff_t intptr_t; + +#endif + #define zig_minInt_i8 INT8_MIN #define zig_maxInt_i8 INT8_MAX -#define zig_minInt_u16 zig_as_u16(0) -#define zig_maxInt_u16 UINT16_MAX +#define zig_minInt_u8 UINT8_C(0) +#define zig_maxInt_u8 UINT8_MAX #define zig_minInt_i16 INT16_MIN #define zig_maxInt_i16 INT16_MAX -#define zig_minInt_u32 zig_as_u32(0) -#define zig_maxInt_u32 UINT32_MAX +#define zig_minInt_u16 UINT16_C(0) +#define zig_maxInt_u16 UINT16_MAX #define zig_minInt_i32 INT32_MIN #define zig_maxInt_i32 INT32_MAX -#define zig_minInt_u64 zig_as_u64(0) -#define zig_maxInt_u64 UINT64_MAX +#define zig_minInt_u32 UINT32_C(0) +#define zig_maxInt_u32 UINT32_MAX #define zig_minInt_i64 INT64_MIN #define zig_maxInt_i64 INT64_MAX +#define zig_minInt_u64 UINT64_C(0) +#define zig_maxInt_u64 UINT64_MAX -#define zig_compiler_rt_abbrev_u32 si -#define zig_compiler_rt_abbrev_i32 si -#define zig_compiler_rt_abbrev_u64 di -#define zig_compiler_rt_abbrev_i64 di -#define zig_compiler_rt_abbrev_u128 ti -#define zig_compiler_rt_abbrev_i128 ti -#define zig_compiler_rt_abbrev_f16 hf -#define zig_compiler_rt_abbrev_f32 sf -#define zig_compiler_rt_abbrev_f64 df -#define zig_compiler_rt_abbrev_f80 xf -#define zig_compiler_rt_abbrev_f128 tf - -zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, zig_usize); -zig_extern void *memset (void *, int, zig_usize); - -/* ==================== 8/16/32/64-bit Integer Routines ===================== */ - -#define zig_maxInt(Type, bits) zig_shr_##Type(zig_maxInt_##Type, (zig_bitSizeOf(zig_##Type) - bits)) -#define zig_expand_maxInt(Type, bits) zig_maxInt(Type, bits) -#define zig_minInt(Type, bits) zig_not_##Type(zig_maxInt(Type, bits), bits) -#define zig_expand_minInt(Type, bits) zig_minInt(Type, bits) +#define zig_intLimit(s, w, limit, bits) zig_shr_##s##w(zig_##limit##Int_##s##w, w - (bits)) +#define zig_minInt_i(w, bits) zig_intLimit(i, w, min, bits) +#define zig_maxInt_i(w, bits) zig_intLimit(i, w, max, bits) +#define zig_minInt_u(w, bits) zig_intLimit(u, w, min, bits) +#define zig_maxInt_u(w, bits) zig_intLimit(u, w, max, bits) #define zig_int_operator(Type, RhsType, operation, operator) \ - static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##RhsType rhs) { \ + static inline Type zig_##operation(Type lhs, RhsType rhs) { \ return lhs operator rhs; \ } #define zig_int_basic_operator(Type, operation, operator) \ - zig_int_operator(Type, Type, operation, operator) + zig_int_operator(Type, Type, operation, operator) #define zig_int_shift_operator(Type, operation, operator) \ - zig_int_operator(Type, u8, operation, operator) + zig_int_operator(Type, uint8_t, operation, operator) #define zig_int_helpers(w) \ - zig_int_basic_operator(u##w, and, &) \ - zig_int_basic_operator(i##w, and, &) \ - zig_int_basic_operator(u##w, or, |) \ - zig_int_basic_operator(i##w, or, |) \ - zig_int_basic_operator(u##w, xor, ^) \ - zig_int_basic_operator(i##w, xor, ^) \ - zig_int_shift_operator(u##w, shl, <<) \ - zig_int_shift_operator(i##w, shl, <<) \ - zig_int_shift_operator(u##w, shr, >>) \ + zig_int_basic_operator(uint##w##_t, and_u##w, &) \ + zig_int_basic_operator( int##w##_t, and_i##w, &) \ + zig_int_basic_operator(uint##w##_t, or_u##w, |) \ + zig_int_basic_operator( int##w##_t, or_i##w, |) \ + zig_int_basic_operator(uint##w##_t, xor_u##w, ^) \ + zig_int_basic_operator( int##w##_t, xor_i##w, ^) \ + zig_int_shift_operator(uint##w##_t, shl_u##w, <<) \ + zig_int_shift_operator( int##w##_t, shl_i##w, <<) \ + zig_int_shift_operator(uint##w##_t, shr_u##w, >>) \ \ - static inline zig_i##w zig_shr_i##w(zig_i##w lhs, zig_u8 rhs) { \ - zig_i##w sign_mask = lhs < zig_as_i##w(0) ? -zig_as_i##w(1) : zig_as_i##w(0); \ + static inline int##w##_t zig_shr_i##w(int##w##_t lhs, uint8_t rhs) { \ + int##w##_t sign_mask = lhs < INT##w##_C(0) ? -INT##w##_C(1) : INT##w##_C(0); \ return ((lhs ^ sign_mask) >> rhs) ^ sign_mask; \ } \ \ - static inline zig_u##w zig_not_u##w(zig_u##w val, zig_u8 bits) { \ - return val ^ zig_maxInt(u##w, bits); \ + static inline uint##w##_t zig_not_u##w(uint##w##_t val, uint8_t bits) { \ + return val ^ zig_maxInt_u(w, bits); \ } \ \ - static inline zig_i##w zig_not_i##w(zig_i##w val, zig_u8 bits) { \ + static inline int##w##_t zig_not_i##w(int##w##_t val, uint8_t bits) { \ (void)bits; \ return ~val; \ } \ \ - static inline zig_u##w zig_wrap_u##w(zig_u##w val, zig_u8 bits) { \ - return val & zig_maxInt(u##w, bits); \ + static inline uint##w##_t zig_wrap_u##w(uint##w##_t val, uint8_t bits) { \ + return val & zig_maxInt_u(w, bits); \ } \ \ - static inline zig_i##w zig_wrap_i##w(zig_i##w val, zig_u8 bits) { \ - return (val & zig_as_u##w(1) << (bits - zig_as_u8(1))) != 0 \ - ? val | zig_minInt(i##w, bits) : val & zig_maxInt(i##w, bits); \ + static inline int##w##_t zig_wrap_i##w(int##w##_t val, uint8_t bits) { \ + return (val & UINT##w##_C(1) << (bits - UINT8_C(1))) != 0 \ + ? val | zig_minInt_i(w, bits) : val & zig_maxInt_i(w, bits); \ } \ \ - zig_int_basic_operator(u##w, div_floor, /) \ + zig_int_basic_operator(uint##w##_t, div_floor_u##w, /) \ \ - static inline zig_i##w zig_div_floor_i##w(zig_i##w lhs, zig_i##w rhs) { \ - return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < zig_as_i##w(0)); \ + static inline int##w##_t zig_div_floor_i##w(int##w##_t lhs, int##w##_t rhs) { \ + return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < INT##w##_C(0)); \ } \ \ - zig_int_basic_operator(u##w, mod, %) \ + zig_int_basic_operator(uint##w##_t, mod_u##w, %) \ \ - static inline zig_i##w zig_mod_i##w(zig_i##w lhs, zig_i##w rhs) { \ - zig_i##w rem = lhs % rhs; \ - return rem + (((lhs ^ rhs) & rem) < zig_as_i##w(0) ? rhs : zig_as_i##w(0)); \ + static inline int##w##_t zig_mod_i##w(int##w##_t lhs, int##w##_t rhs) { \ + int##w##_t rem = lhs % rhs; \ + return rem + (((lhs ^ rhs) & rem) < INT##w##_C(0) ? rhs : INT##w##_C(0)); \ } \ \ - static inline zig_u##w zig_shlw_u##w(zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \ + static inline uint##w##_t zig_shlw_u##w(uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \ return zig_wrap_u##w(zig_shl_u##w(lhs, rhs), bits); \ } \ \ - static inline zig_i##w zig_shlw_i##w(zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \ - return zig_wrap_i##w((zig_i##w)zig_shl_u##w((zig_u##w)lhs, (zig_u##w)rhs), bits); \ + static inline int##w##_t zig_shlw_i##w(int##w##_t lhs, uint8_t rhs, uint8_t bits) { \ + return zig_wrap_i##w((int##w##_t)zig_shl_u##w((uint##w##_t)lhs, (uint##w##_t)rhs), bits); \ } \ \ - static inline zig_u##w zig_addw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + static inline uint##w##_t zig_addw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ return zig_wrap_u##w(lhs + rhs, bits); \ } \ \ - static inline zig_i##w zig_addw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ - return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs + (zig_u##w)rhs), bits); \ + static inline int##w##_t zig_addw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs + (uint##w##_t)rhs), bits); \ } \ \ - static inline zig_u##w zig_subw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + static inline uint##w##_t zig_subw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ return zig_wrap_u##w(lhs - rhs, bits); \ } \ \ - static inline zig_i##w zig_subw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ - return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs - (zig_u##w)rhs), bits); \ + static inline int##w##_t zig_subw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs - (uint##w##_t)rhs), bits); \ } \ \ - static inline zig_u##w zig_mulw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + static inline uint##w##_t zig_mulw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ return zig_wrap_u##w(lhs * rhs, bits); \ } \ \ - static inline zig_i##w zig_mulw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ - return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs * (zig_u##w)rhs), bits); \ + static inline int##w##_t zig_mulw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs * (uint##w##_t)rhs), bits); \ } zig_int_helpers(8) zig_int_helpers(16) zig_int_helpers(32) zig_int_helpers(64) -static inline bool zig_addo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { +static inline bool zig_addo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) || defined(zig_gnuc) - zig_u32 full_res; + uint32_t full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_u32(full_res, bits); - return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits); + return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits); #else *res = zig_addw_u32(lhs, rhs, bits); return *res < lhs; #endif } -static inline void zig_vaddo_u32(zig_u8 *ov, zig_u32 *res, int n, - const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits) +static inline void zig_vaddo_u32(uint8_t *ov, uint32_t *res, int n, + const uint32_t *lhs, const uint32_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_addo_u32(&res[i], lhs[i], rhs[i], bits); } -zig_extern zig_i32 __addosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); -static inline bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { +zig_extern int32_t __addosi4(int32_t lhs, int32_t rhs, int *overflow); +static inline bool zig_addo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) || defined(zig_gnuc) - zig_i32 full_res; + int32_t full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; - zig_i32 full_res = __addosi4(lhs, rhs, &overflow_int); + int overflow_int; + int32_t full_res = __addosi4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i32(full_res, bits); - return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); + return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits); } -static inline void zig_vaddo_i32(zig_u8 *ov, zig_i32 *res, int n, - const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits) +static inline void zig_vaddo_i32(uint8_t *ov, int32_t *res, int n, + const int32_t *lhs, const int32_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_addo_i32(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { +static inline bool zig_addo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) || defined(zig_gnuc) - zig_u64 full_res; + uint64_t full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_u64(full_res, bits); - return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits); + return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits); #else *res = zig_addw_u64(lhs, rhs, bits); return *res < lhs; #endif } -static inline void zig_vaddo_u64(zig_u8 *ov, zig_u64 *res, int n, - const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits) +static inline void zig_vaddo_u64(uint8_t *ov, uint64_t *res, int n, + const uint64_t *lhs, const uint64_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_addo_u64(&res[i], lhs[i], rhs[i], bits); } -zig_extern zig_i64 __addodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); -static inline bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { +zig_extern int64_t __addodi4(int64_t lhs, int64_t rhs, int *overflow); +static inline bool zig_addo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) || defined(zig_gnuc) - zig_i64 full_res; + int64_t full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; - zig_i64 full_res = __addodi4(lhs, rhs, &overflow_int); + int overflow_int; + int64_t full_res = __addodi4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i64(full_res, bits); - return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); + return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits); } -static inline void zig_vaddo_i64(zig_u8 *ov, zig_i64 *res, int n, - const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits) +static inline void zig_vaddo_i64(uint8_t *ov, int64_t *res, int n, + const int64_t *lhs, const int64_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_addo_i64(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { +static inline bool zig_addo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) || defined(zig_gnuc) - zig_u8 full_res; + uint8_t full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_u8(full_res, bits); - return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits); + return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits); #else - zig_u32 full_res; + uint32_t full_res; bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits); - *res = (zig_u8)full_res; + *res = (uint8_t)full_res; return overflow; #endif } -static inline void zig_vaddo_u8(zig_u8 *ov, zig_u8 *res, int n, - const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits) +static inline void zig_vaddo_u8(uint8_t *ov, uint8_t *res, int n, + const uint8_t *lhs, const uint8_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_addo_u8(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { +static inline bool zig_addo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) || defined(zig_gnuc) - zig_i8 full_res; + int8_t full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_i8(full_res, bits); - return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits); + return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits); #else - zig_i32 full_res; + int32_t full_res; bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits); - *res = (zig_i8)full_res; + *res = (int8_t)full_res; return overflow; #endif } -static inline void zig_vaddo_i8(zig_u8 *ov, zig_i8 *res, int n, - const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits) +static inline void zig_vaddo_i8(uint8_t *ov, int8_t *res, int n, + const int8_t *lhs, const int8_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_addo_i8(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { +static inline bool zig_addo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) || defined(zig_gnuc) - zig_u16 full_res; + uint16_t full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_u16(full_res, bits); - return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits); + return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits); #else - zig_u32 full_res; + uint32_t full_res; bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits); - *res = (zig_u16)full_res; + *res = (uint16_t)full_res; return overflow; #endif } -static inline void zig_vaddo_u16(zig_u8 *ov, zig_u16 *res, int n, - const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits) +static inline void zig_vaddo_u16(uint8_t *ov, uint16_t *res, int n, + const uint16_t *lhs, const uint16_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_addo_u16(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { +static inline bool zig_addo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) || defined(zig_gnuc) - zig_i16 full_res; + int16_t full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_i16(full_res, bits); - return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits); + return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits); #else - zig_i32 full_res; + int32_t full_res; bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits); - *res = (zig_i16)full_res; + *res = (int16_t)full_res; return overflow; #endif } -static inline void zig_vaddo_i16(zig_u8 *ov, zig_i16 *res, int n, - const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits) +static inline void zig_vaddo_i16(uint8_t *ov, int16_t *res, int n, + const int16_t *lhs, const int16_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_addo_i16(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { +static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gnuc) - zig_u32 full_res; + uint32_t full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_u32(full_res, bits); - return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits); + return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits); #else *res = zig_subw_u32(lhs, rhs, bits); return *res > lhs; #endif } -static inline void zig_vsubo_u32(zig_u8 *ov, zig_u32 *res, int n, - const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits) +static inline void zig_vsubo_u32(uint8_t *ov, uint32_t *res, int n, + const uint32_t *lhs, const uint32_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_subo_u32(&res[i], lhs[i], rhs[i], bits); } -zig_extern zig_i32 __subosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); -static inline bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { +zig_extern int32_t __subosi4(int32_t lhs, int32_t rhs, int *overflow); +static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gnuc) - zig_i32 full_res; + int32_t full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; - zig_i32 full_res = __subosi4(lhs, rhs, &overflow_int); + int overflow_int; + int32_t full_res = __subosi4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i32(full_res, bits); - return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); + return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits); } -static inline void zig_vsubo_i32(zig_u8 *ov, zig_i32 *res, int n, - const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits) +static inline void zig_vsubo_i32(uint8_t *ov, int32_t *res, int n, + const int32_t *lhs, const int32_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_subo_i32(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { +static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gnuc) - zig_u64 full_res; + uint64_t full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_u64(full_res, bits); - return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits); + return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits); #else *res = zig_subw_u64(lhs, rhs, bits); return *res > lhs; #endif } -static inline void zig_vsubo_u64(zig_u8 *ov, zig_u64 *res, int n, - const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits) +static inline void zig_vsubo_u64(uint8_t *ov, uint64_t *res, int n, + const uint64_t *lhs, const uint64_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_subo_u64(&res[i], lhs[i], rhs[i], bits); } -zig_extern zig_i64 __subodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); -static inline bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { +zig_extern int64_t __subodi4(int64_t lhs, int64_t rhs, int *overflow); +static inline bool zig_subo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gnuc) - zig_i64 full_res; + int64_t full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; - zig_i64 full_res = __subodi4(lhs, rhs, &overflow_int); + int overflow_int; + int64_t full_res = __subodi4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i64(full_res, bits); - return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); + return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits); } -static inline void zig_vsubo_i64(zig_u8 *ov, zig_i64 *res, int n, - const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits) +static inline void zig_vsubo_i64(uint8_t *ov, int64_t *res, int n, + const int64_t *lhs, const int64_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_subo_i64(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { +static inline bool zig_subo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gnuc) - zig_u8 full_res; + uint8_t full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_u8(full_res, bits); - return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits); + return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits); #else - zig_u32 full_res; + uint32_t full_res; bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits); - *res = (zig_u8)full_res; + *res = (uint8_t)full_res; return overflow; #endif } -static inline void zig_vsubo_u8(zig_u8 *ov, zig_u8 *res, int n, - const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits) +static inline void zig_vsubo_u8(uint8_t *ov, uint8_t *res, int n, + const uint8_t *lhs, const uint8_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_subo_u8(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { +static inline bool zig_subo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gnuc) - zig_i8 full_res; + int8_t full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_i8(full_res, bits); - return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits); + return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits); #else - zig_i32 full_res; + int32_t full_res; bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits); - *res = (zig_i8)full_res; + *res = (int8_t)full_res; return overflow; #endif } -static inline void zig_vsubo_i8(zig_u8 *ov, zig_i8 *res, int n, - const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits) +static inline void zig_vsubo_i8(uint8_t *ov, int8_t *res, int n, + const int8_t *lhs, const int8_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_subo_i8(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { +static inline bool zig_subo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gnuc) - zig_u16 full_res; + uint16_t full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_u16(full_res, bits); - return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits); + return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits); #else - zig_u32 full_res; + uint32_t full_res; bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits); - *res = (zig_u16)full_res; + *res = (uint16_t)full_res; return overflow; #endif } -static inline void zig_vsubo_u16(zig_u8 *ov, zig_u16 *res, int n, - const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits) +static inline void zig_vsubo_u16(uint8_t *ov, uint16_t *res, int n, + const uint16_t *lhs, const uint16_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_subo_u16(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { +static inline bool zig_subo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gnuc) - zig_i16 full_res; + int16_t full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_i16(full_res, bits); - return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits); + return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits); #else - zig_i32 full_res; + int32_t full_res; bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits); - *res = (zig_i16)full_res; + *res = (int16_t)full_res; return overflow; #endif } -static inline void zig_vsubo_i16(zig_u8 *ov, zig_i16 *res, int n, - const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits) +static inline void zig_vsubo_i16(uint8_t *ov, int16_t *res, int n, + const int16_t *lhs, const int16_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_subo_i16(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { +static inline bool zig_mulo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) || defined(zig_gnuc) - zig_u32 full_res; + uint32_t full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_u32(full_res, bits); - return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits); + return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits); #else *res = zig_mulw_u32(lhs, rhs, bits); - return rhs != zig_as_u32(0) && lhs > zig_maxInt(u32, bits) / rhs; + return rhs != UINT32_C(0) && lhs > zig_maxInt_u(32, bits) / rhs; #endif } -static inline void zig_vmulo_u32(zig_u8 *ov, zig_u32 *res, int n, - const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits) +static inline void zig_vmulo_u32(uint8_t *ov, uint32_t *res, int n, + const uint32_t *lhs, const uint32_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u32(&res[i], lhs[i], rhs[i], bits); } -zig_extern zig_i32 __mulosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); -static inline bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { +zig_extern int32_t __mulosi4(int32_t lhs, int32_t rhs, int *overflow); +static inline bool zig_mulo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) || defined(zig_gnuc) - zig_i32 full_res; + int32_t full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; - zig_i32 full_res = __mulosi4(lhs, rhs, &overflow_int); + int overflow_int; + int32_t full_res = __mulosi4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i32(full_res, bits); - return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); + return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits); } -static inline void zig_vmulo_i32(zig_u8 *ov, zig_i32 *res, int n, - const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits) +static inline void zig_vmulo_i32(uint8_t *ov, int32_t *res, int n, + const int32_t *lhs, const int32_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i32(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { +static inline bool zig_mulo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) || defined(zig_gnuc) - zig_u64 full_res; + uint64_t full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_u64(full_res, bits); - return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits); + return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits); #else *res = zig_mulw_u64(lhs, rhs, bits); - return rhs != zig_as_u64(0) && lhs > zig_maxInt(u64, bits) / rhs; + return rhs != UINT64_C(0) && lhs > zig_maxInt_u(64, bits) / rhs; #endif } -static inline void zig_vmulo_u64(zig_u8 *ov, zig_u64 *res, int n, - const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits) +static inline void zig_vmulo_u64(uint8_t *ov, uint64_t *res, int n, + const uint64_t *lhs, const uint64_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u64(&res[i], lhs[i], rhs[i], bits); } -zig_extern zig_i64 __mulodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); -static inline bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { +zig_extern int64_t __mulodi4(int64_t lhs, int64_t rhs, int *overflow); +static inline bool zig_mulo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) || defined(zig_gnuc) - zig_i64 full_res; + int64_t full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; - zig_i64 full_res = __mulodi4(lhs, rhs, &overflow_int); + int overflow_int; + int64_t full_res = __mulodi4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i64(full_res, bits); - return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); + return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits); } -static inline void zig_vmulo_i64(zig_u8 *ov, zig_i64 *res, int n, - const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits) +static inline void zig_vmulo_i64(uint8_t *ov, int64_t *res, int n, + const int64_t *lhs, const int64_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i64(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { +static inline bool zig_mulo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) || defined(zig_gnuc) - zig_u8 full_res; + uint8_t full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_u8(full_res, bits); - return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits); + return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits); #else - zig_u32 full_res; + uint32_t full_res; bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits); - *res = (zig_u8)full_res; + *res = (uint8_t)full_res; return overflow; #endif } -static inline void zig_vmulo_u8(zig_u8 *ov, zig_u8 *res, int n, - const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits) +static inline void zig_vmulo_u8(uint8_t *ov, uint8_t *res, int n, + const uint8_t *lhs, const uint8_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u8(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { +static inline bool zig_mulo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) || defined(zig_gnuc) - zig_i8 full_res; + int8_t full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_i8(full_res, bits); - return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits); + return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits); #else - zig_i32 full_res; + int32_t full_res; bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits); - *res = (zig_i8)full_res; + *res = (int8_t)full_res; return overflow; #endif } -static inline void zig_vmulo_i8(zig_u8 *ov, zig_i8 *res, int n, - const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits) +static inline void zig_vmulo_i8(uint8_t *ov, int8_t *res, int n, + const int8_t *lhs, const int8_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i8(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { +static inline bool zig_mulo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) || defined(zig_gnuc) - zig_u16 full_res; + uint16_t full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_u16(full_res, bits); - return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits); + return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits); #else - zig_u32 full_res; + uint32_t full_res; bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits); - *res = (zig_u16)full_res; + *res = (uint16_t)full_res; return overflow; #endif } -static inline void zig_vmulo_u16(zig_u8 *ov, zig_u16 *res, int n, - const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits) +static inline void zig_vmulo_u16(uint8_t *ov, uint16_t *res, int n, + const uint16_t *lhs, const uint16_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u16(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { +static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) || defined(zig_gnuc) - zig_i16 full_res; + int16_t full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_i16(full_res, bits); - return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits); + return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits); #else - zig_i32 full_res; + int32_t full_res; bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits); - *res = (zig_i16)full_res; + *res = (int16_t)full_res; return overflow; #endif } -static inline void zig_vmulo_i16(zig_u8 *ov, zig_i16 *res, int n, - const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits) +static inline void zig_vmulo_i16(uint8_t *ov, int16_t *res, int n, + const int16_t *lhs, const int16_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i16(&res[i], lhs[i], rhs[i], bits); } #define zig_int_builtins(w) \ - static inline bool zig_shlo_u##w(zig_u##w *res, zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \ + static inline bool zig_shlo_u##w(uint##w##_t *res, uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \ *res = zig_shlw_u##w(lhs, rhs, bits); \ - return lhs > zig_maxInt(u##w, bits) >> rhs; \ + return lhs > zig_maxInt_u(w, bits) >> rhs; \ } \ \ - static inline bool zig_shlo_i##w(zig_i##w *res, zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \ + static inline bool zig_shlo_i##w(int##w##_t *res, int##w##_t lhs, uint8_t rhs, uint8_t bits) { \ *res = zig_shlw_i##w(lhs, rhs, bits); \ - zig_i##w mask = (zig_i##w)(zig_maxInt_u##w << (bits - rhs - 1)); \ - return (lhs & mask) != zig_as_i##w(0) && (lhs & mask) != mask; \ + int##w##_t mask = (int##w##_t)(UINT##w##_MAX << (bits - rhs - 1)); \ + return (lhs & mask) != INT##w##_C(0) && (lhs & mask) != mask; \ } \ \ - static inline zig_u##w zig_shls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ - zig_u##w res; \ - if (rhs >= bits) return lhs != zig_as_u##w(0) ? zig_maxInt(u##w, bits) : lhs; \ - return zig_shlo_u##w(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u##w, bits) : res; \ + static inline uint##w##_t zig_shls_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ + uint##w##_t res; \ + if (rhs >= bits) return lhs != UINT##w##_C(0) ? zig_maxInt_u(w, bits) : lhs; \ + return zig_shlo_u##w(&res, lhs, (uint8_t)rhs, bits) ? zig_maxInt_u(w, bits) : res; \ } \ \ - static inline zig_i##w zig_shls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ - zig_i##w res; \ - if ((zig_u##w)rhs < (zig_u##w)bits && !zig_shlo_i##w(&res, lhs, rhs, bits)) return res; \ - return lhs < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \ + static inline int##w##_t zig_shls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + int##w##_t res; \ + if ((uint##w##_t)rhs < (uint##w##_t)bits && !zig_shlo_i##w(&res, lhs, rhs, bits)) return res; \ + return lhs < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \ } \ \ - static inline zig_u##w zig_adds_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ - zig_u##w res; \ - return zig_addo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \ + static inline uint##w##_t zig_adds_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ + uint##w##_t res; \ + return zig_addo_u##w(&res, lhs, rhs, bits) ? zig_maxInt_u(w, bits) : res; \ } \ \ - static inline zig_i##w zig_adds_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ - zig_i##w res; \ + static inline int##w##_t zig_adds_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + int##w##_t res; \ if (!zig_addo_i##w(&res, lhs, rhs, bits)) return res; \ - return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \ + return res >= INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \ } \ \ - static inline zig_u##w zig_subs_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ - zig_u##w res; \ - return zig_subo_u##w(&res, lhs, rhs, bits) ? zig_minInt(u##w, bits) : res; \ + static inline uint##w##_t zig_subs_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ + uint##w##_t res; \ + return zig_subo_u##w(&res, lhs, rhs, bits) ? zig_minInt_u(w, bits) : res; \ } \ \ - static inline zig_i##w zig_subs_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ - zig_i##w res; \ + static inline int##w##_t zig_subs_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + int##w##_t res; \ if (!zig_subo_i##w(&res, lhs, rhs, bits)) return res; \ - return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \ + return res >= INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \ } \ \ - static inline zig_u##w zig_muls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ - zig_u##w res; \ - return zig_mulo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \ + static inline uint##w##_t zig_muls_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ + uint##w##_t res; \ + return zig_mulo_u##w(&res, lhs, rhs, bits) ? zig_maxInt_u(w, bits) : res; \ } \ \ - static inline zig_i##w zig_muls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ - zig_i##w res; \ + static inline int##w##_t zig_muls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + int##w##_t res; \ if (!zig_mulo_i##w(&res, lhs, rhs, bits)) return res; \ - return (lhs ^ rhs) < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \ + return (lhs ^ rhs) < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \ } zig_int_builtins(8) zig_int_builtins(16) @@ -988,89 +1091,89 @@ zig_int_builtins(32) zig_int_builtins(64) #define zig_builtin8(name, val) __builtin_##name(val) -typedef zig_c_uint zig_Builtin8; +typedef unsigned int zig_Builtin8; #define zig_builtin16(name, val) __builtin_##name(val) -typedef zig_c_uint zig_Builtin16; +typedef unsigned int zig_Builtin16; #if INT_MIN <= INT32_MIN #define zig_builtin32(name, val) __builtin_##name(val) -typedef zig_c_uint zig_Builtin32; +typedef unsigned int zig_Builtin32; #elif LONG_MIN <= INT32_MIN #define zig_builtin32(name, val) __builtin_##name##l(val) -typedef zig_c_ulong zig_Builtin32; +typedef unsigned long zig_Builtin32; #endif #if INT_MIN <= INT64_MIN #define zig_builtin64(name, val) __builtin_##name(val) -typedef zig_c_uint zig_Builtin64; +typedef unsigned int zig_Builtin64; #elif LONG_MIN <= INT64_MIN #define zig_builtin64(name, val) __builtin_##name##l(val) -typedef zig_c_ulong zig_Builtin64; +typedef unsigned long zig_Builtin64; #elif LLONG_MIN <= INT64_MIN #define zig_builtin64(name, val) __builtin_##name##ll(val) -typedef zig_c_ulonglong zig_Builtin64; +typedef unsigned long long zig_Builtin64; #endif -static inline zig_u8 zig_byte_swap_u8(zig_u8 val, zig_u8 bits) { +static inline uint8_t zig_byte_swap_u8(uint8_t val, uint8_t bits) { return zig_wrap_u8(val >> (8 - bits), bits); } -static inline zig_i8 zig_byte_swap_i8(zig_i8 val, zig_u8 bits) { - return zig_wrap_i8((zig_i8)zig_byte_swap_u8((zig_u8)val, bits), bits); +static inline int8_t zig_byte_swap_i8(int8_t val, uint8_t bits) { + return zig_wrap_i8((int8_t)zig_byte_swap_u8((uint8_t)val, bits), bits); } -static inline zig_u16 zig_byte_swap_u16(zig_u16 val, zig_u8 bits) { - zig_u16 full_res; +static inline uint16_t zig_byte_swap_u16(uint16_t val, uint8_t bits) { + uint16_t full_res; #if zig_has_builtin(bswap16) || defined(zig_gnuc) full_res = __builtin_bswap16(val); #else - full_res = (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 0), 8) << 8 | - (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 8), 8) >> 0; + full_res = (uint16_t)zig_byte_swap_u8((uint8_t)(val >> 0), 8) << 8 | + (uint16_t)zig_byte_swap_u8((uint8_t)(val >> 8), 8) >> 0; #endif return zig_wrap_u16(full_res >> (16 - bits), bits); } -static inline zig_i16 zig_byte_swap_i16(zig_i16 val, zig_u8 bits) { - return zig_wrap_i16((zig_i16)zig_byte_swap_u16((zig_u16)val, bits), bits); +static inline int16_t zig_byte_swap_i16(int16_t val, uint8_t bits) { + return zig_wrap_i16((int16_t)zig_byte_swap_u16((uint16_t)val, bits), bits); } -static inline zig_u32 zig_byte_swap_u32(zig_u32 val, zig_u8 bits) { - zig_u32 full_res; +static inline uint32_t zig_byte_swap_u32(uint32_t val, uint8_t bits) { + uint32_t full_res; #if zig_has_builtin(bswap32) || defined(zig_gnuc) full_res = __builtin_bswap32(val); #else - full_res = (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 0), 16) << 16 | - (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 16), 16) >> 0; + full_res = (uint32_t)zig_byte_swap_u16((uint16_t)(val >> 0), 16) << 16 | + (uint32_t)zig_byte_swap_u16((uint16_t)(val >> 16), 16) >> 0; #endif return zig_wrap_u32(full_res >> (32 - bits), bits); } -static inline zig_i32 zig_byte_swap_i32(zig_i32 val, zig_u8 bits) { - return zig_wrap_i32((zig_i32)zig_byte_swap_u32((zig_u32)val, bits), bits); +static inline int32_t zig_byte_swap_i32(int32_t val, uint8_t bits) { + return zig_wrap_i32((int32_t)zig_byte_swap_u32((uint32_t)val, bits), bits); } -static inline zig_u64 zig_byte_swap_u64(zig_u64 val, zig_u8 bits) { - zig_u64 full_res; +static inline uint64_t zig_byte_swap_u64(uint64_t val, uint8_t bits) { + uint64_t full_res; #if zig_has_builtin(bswap64) || defined(zig_gnuc) full_res = __builtin_bswap64(val); #else - full_res = (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 0), 32) << 32 | - (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 32), 32) >> 0; + full_res = (uint64_t)zig_byte_swap_u32((uint32_t)(val >> 0), 32) << 32 | + (uint64_t)zig_byte_swap_u32((uint32_t)(val >> 32), 32) >> 0; #endif return zig_wrap_u64(full_res >> (64 - bits), bits); } -static inline zig_i64 zig_byte_swap_i64(zig_i64 val, zig_u8 bits) { - return zig_wrap_i64((zig_i64)zig_byte_swap_u64((zig_u64)val, bits), bits); +static inline int64_t zig_byte_swap_i64(int64_t val, uint8_t bits) { + return zig_wrap_i64((int64_t)zig_byte_swap_u64((uint64_t)val, bits), bits); } -static inline zig_u8 zig_bit_reverse_u8(zig_u8 val, zig_u8 bits) { - zig_u8 full_res; +static inline uint8_t zig_bit_reverse_u8(uint8_t val, uint8_t bits) { + uint8_t full_res; #if zig_has_builtin(bitreverse8) full_res = __builtin_bitreverse8(val); #else - static zig_u8 const lut[0x10] = { + static uint8_t const lut[0x10] = { 0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe, 0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf }; @@ -1079,62 +1182,62 @@ static inline zig_u8 zig_bit_reverse_u8(zig_u8 val, zig_u8 bits) { return zig_wrap_u8(full_res >> (8 - bits), bits); } -static inline zig_i8 zig_bit_reverse_i8(zig_i8 val, zig_u8 bits) { - return zig_wrap_i8((zig_i8)zig_bit_reverse_u8((zig_u8)val, bits), bits); +static inline int8_t zig_bit_reverse_i8(int8_t val, uint8_t bits) { + return zig_wrap_i8((int8_t)zig_bit_reverse_u8((uint8_t)val, bits), bits); } -static inline zig_u16 zig_bit_reverse_u16(zig_u16 val, zig_u8 bits) { - zig_u16 full_res; +static inline uint16_t zig_bit_reverse_u16(uint16_t val, uint8_t bits) { + uint16_t full_res; #if zig_has_builtin(bitreverse16) full_res = __builtin_bitreverse16(val); #else - full_res = (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 0), 8) << 8 | - (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 8), 8) >> 0; + full_res = (uint16_t)zig_bit_reverse_u8((uint8_t)(val >> 0), 8) << 8 | + (uint16_t)zig_bit_reverse_u8((uint8_t)(val >> 8), 8) >> 0; #endif return zig_wrap_u16(full_res >> (16 - bits), bits); } -static inline zig_i16 zig_bit_reverse_i16(zig_i16 val, zig_u8 bits) { - return zig_wrap_i16((zig_i16)zig_bit_reverse_u16((zig_u16)val, bits), bits); +static inline int16_t zig_bit_reverse_i16(int16_t val, uint8_t bits) { + return zig_wrap_i16((int16_t)zig_bit_reverse_u16((uint16_t)val, bits), bits); } -static inline zig_u32 zig_bit_reverse_u32(zig_u32 val, zig_u8 bits) { - zig_u32 full_res; +static inline uint32_t zig_bit_reverse_u32(uint32_t val, uint8_t bits) { + uint32_t full_res; #if zig_has_builtin(bitreverse32) full_res = __builtin_bitreverse32(val); #else - full_res = (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 0), 16) << 16 | - (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 16), 16) >> 0; + full_res = (uint32_t)zig_bit_reverse_u16((uint16_t)(val >> 0), 16) << 16 | + (uint32_t)zig_bit_reverse_u16((uint16_t)(val >> 16), 16) >> 0; #endif return zig_wrap_u32(full_res >> (32 - bits), bits); } -static inline zig_i32 zig_bit_reverse_i32(zig_i32 val, zig_u8 bits) { - return zig_wrap_i32((zig_i32)zig_bit_reverse_u32((zig_u32)val, bits), bits); +static inline int32_t zig_bit_reverse_i32(int32_t val, uint8_t bits) { + return zig_wrap_i32((int32_t)zig_bit_reverse_u32((uint32_t)val, bits), bits); } -static inline zig_u64 zig_bit_reverse_u64(zig_u64 val, zig_u8 bits) { - zig_u64 full_res; +static inline uint64_t zig_bit_reverse_u64(uint64_t val, uint8_t bits) { + uint64_t full_res; #if zig_has_builtin(bitreverse64) full_res = __builtin_bitreverse64(val); #else - full_res = (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 0), 32) << 32 | - (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 32), 32) >> 0; + full_res = (uint64_t)zig_bit_reverse_u32((uint32_t)(val >> 0), 32) << 32 | + (uint64_t)zig_bit_reverse_u32((uint32_t)(val >> 32), 32) >> 0; #endif return zig_wrap_u64(full_res >> (64 - bits), bits); } -static inline zig_i64 zig_bit_reverse_i64(zig_i64 val, zig_u8 bits) { - return zig_wrap_i64((zig_i64)zig_bit_reverse_u64((zig_u64)val, bits), bits); +static inline int64_t zig_bit_reverse_i64(int64_t val, uint8_t bits) { + return zig_wrap_i64((int64_t)zig_bit_reverse_u64((uint64_t)val, bits), bits); } #define zig_builtin_popcount_common(w) \ - static inline zig_u8 zig_popcount_i##w(zig_i##w val, zig_u8 bits) { \ - return zig_popcount_u##w((zig_u##w)val, bits); \ + static inline uint8_t zig_popcount_i##w(int##w##_t val, uint8_t bits) { \ + return zig_popcount_u##w((uint##w##_t)val, bits); \ } #if zig_has_builtin(popcount) || defined(zig_gnuc) #define zig_builtin_popcount(w) \ - static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \ + static inline uint8_t zig_popcount_u##w(uint##w##_t val, uint8_t bits) { \ (void)bits; \ return zig_builtin##w(popcount, val); \ } \ @@ -1142,12 +1245,12 @@ static inline zig_i64 zig_bit_reverse_i64(zig_i64 val, zig_u8 bits) { zig_builtin_popcount_common(w) #else #define zig_builtin_popcount(w) \ - static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \ + static inline uint8_t zig_popcount_u##w(uint##w##_t val, uint8_t bits) { \ (void)bits; \ - zig_u##w temp = val - ((val >> 1) & (zig_maxInt_u##w / 3)); \ - temp = (temp & (zig_maxInt_u##w / 5)) + ((temp >> 2) & (zig_maxInt_u##w / 5)); \ - temp = (temp + (temp >> 4)) & (zig_maxInt_u##w / 17); \ - return temp * (zig_maxInt_u##w / 255) >> (w - 8); \ + uint##w##_t temp = val - ((val >> 1) & (UINT##w##_MAX / 3)); \ + temp = (temp & (UINT##w##_MAX / 5)) + ((temp >> 2) & (UINT##w##_MAX / 5)); \ + temp = (temp + (temp >> 4)) & (UINT##w##_MAX / 17); \ + return temp * (UINT##w##_MAX / 255) >> (w - 8); \ } \ \ zig_builtin_popcount_common(w) @@ -1158,12 +1261,12 @@ zig_builtin_popcount(32) zig_builtin_popcount(64) #define zig_builtin_ctz_common(w) \ - static inline zig_u8 zig_ctz_i##w(zig_i##w val, zig_u8 bits) { \ - return zig_ctz_u##w((zig_u##w)val, bits); \ + static inline uint8_t zig_ctz_i##w(int##w##_t val, uint8_t bits) { \ + return zig_ctz_u##w((uint##w##_t)val, bits); \ } #if zig_has_builtin(ctz) || defined(zig_gnuc) #define zig_builtin_ctz(w) \ - static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \ + static inline uint8_t zig_ctz_u##w(uint##w##_t val, uint8_t bits) { \ if (val == 0) return bits; \ return zig_builtin##w(ctz, val); \ } \ @@ -1171,7 +1274,7 @@ zig_builtin_popcount(64) zig_builtin_ctz_common(w) #else #define zig_builtin_ctz(w) \ - static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \ + static inline uint8_t zig_ctz_u##w(uint##w##_t val, uint8_t bits) { \ return zig_popcount_u##w(zig_not_u##w(val, bits) & zig_subw_u##w(val, 1, bits), bits); \ } \ \ @@ -1183,12 +1286,12 @@ zig_builtin_ctz(32) zig_builtin_ctz(64) #define zig_builtin_clz_common(w) \ - static inline zig_u8 zig_clz_i##w(zig_i##w val, zig_u8 bits) { \ - return zig_clz_u##w((zig_u##w)val, bits); \ + static inline uint8_t zig_clz_i##w(int##w##_t val, uint8_t bits) { \ + return zig_clz_u##w((uint##w##_t)val, bits); \ } #if zig_has_builtin(clz) || defined(zig_gnuc) #define zig_builtin_clz(w) \ - static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \ + static inline uint8_t zig_clz_u##w(uint##w##_t val, uint8_t bits) { \ if (val == 0) return bits; \ return zig_builtin##w(clz, val) - (zig_bitSizeOf(zig_Builtin##w) - bits); \ } \ @@ -1196,7 +1299,7 @@ zig_builtin_ctz(64) zig_builtin_clz_common(w) #else #define zig_builtin_clz(w) \ - static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \ + static inline uint8_t zig_clz_u##w(uint##w##_t val, uint8_t bits) { \ return zig_ctz_u##w(zig_bit_reverse_u##w(val, bits), bits); \ } \ \ @@ -1207,7 +1310,7 @@ zig_builtin_clz(16) zig_builtin_clz(32) zig_builtin_clz(64) -/* ======================== 128-bit Integer Routines ======================== */ +/* ======================== 128-bit Integer Support ========================= */ #if !defined(zig_has_int128) # if defined(__SIZEOF_INT128__) @@ -1222,18 +1325,18 @@ zig_builtin_clz(64) typedef unsigned __int128 zig_u128; typedef signed __int128 zig_i128; -#define zig_as_u128(hi, lo) ((zig_u128)(hi)<<64|(lo)) -#define zig_as_i128(hi, lo) ((zig_i128)zig_as_u128(hi, lo)) -#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo) -#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo) -#define zig_hi_u128(val) ((zig_u64)((val) >> 64)) -#define zig_lo_u128(val) ((zig_u64)((val) >> 0)) -#define zig_hi_i128(val) ((zig_i64)((val) >> 64)) -#define zig_lo_i128(val) ((zig_u64)((val) >> 0)) +#define zig_make_u128(hi, lo) ((zig_u128)(hi)<<64|(lo)) +#define zig_make_i128(hi, lo) ((zig_i128)zig_make_u128(hi, lo)) +#define zig_make_constant_u128(hi, lo) zig_make_u128(hi, lo) +#define zig_make_constant_i128(hi, lo) zig_make_i128(hi, lo) +#define zig_hi_u128(val) ((uint64_t)((val) >> 64)) +#define zig_lo_u128(val) ((uint64_t)((val) >> 0)) +#define zig_hi_i128(val) (( int64_t)((val) >> 64)) +#define zig_lo_i128(val) ((uint64_t)((val) >> 0)) #define zig_bitcast_u128(val) ((zig_u128)(val)) #define zig_bitcast_i128(val) ((zig_i128)(val)) #define zig_cmp_int128(Type) \ - static inline zig_i32 zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \ + static inline int32_t zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \ return (lhs > rhs) - (lhs < rhs); \ } #define zig_bit_int128(Type, operation, operator) \ @@ -1244,31 +1347,31 @@ typedef signed __int128 zig_i128; #else /* zig_has_int128 */ #if __LITTLE_ENDIAN__ || _MSC_VER -typedef struct { zig_align(16) zig_u64 lo; zig_u64 hi; } zig_u128; -typedef struct { zig_align(16) zig_u64 lo; zig_i64 hi; } zig_i128; +typedef struct { zig_align(16) uint64_t lo; uint64_t hi; } zig_u128; +typedef struct { zig_align(16) uint64_t lo; int64_t hi; } zig_i128; #else -typedef struct { zig_align(16) zig_u64 hi; zig_u64 lo; } zig_u128; -typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128; +typedef struct { zig_align(16) uint64_t hi; uint64_t lo; } zig_u128; +typedef struct { zig_align(16) int64_t hi; uint64_t lo; } zig_i128; #endif -#define zig_as_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) }) -#define zig_as_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) }) +#define zig_make_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) }) +#define zig_make_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) }) #if _MSC_VER -#define zig_as_constant_u128(hi, lo) { .h##i = (hi), .l##o = (lo) } -#define zig_as_constant_i128(hi, lo) { .h##i = (hi), .l##o = (lo) } +#define zig_make_constant_u128(hi, lo) { .h##i = (hi), .l##o = (lo) } +#define zig_make_constant_i128(hi, lo) { .h##i = (hi), .l##o = (lo) } #else -#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo) -#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo) +#define zig_make_constant_u128(hi, lo) zig_make_u128(hi, lo) +#define zig_make_constant_i128(hi, lo) zig_make_i128(hi, lo) #endif #define zig_hi_u128(val) ((val).hi) #define zig_lo_u128(val) ((val).lo) #define zig_hi_i128(val) ((val).hi) #define zig_lo_i128(val) ((val).lo) -#define zig_bitcast_u128(val) zig_as_u128((zig_u64)(val).hi, (val).lo) -#define zig_bitcast_i128(val) zig_as_i128((zig_i64)(val).hi, (val).lo) +#define zig_bitcast_u128(val) zig_make_u128((uint64_t)(val).hi, (val).lo) +#define zig_bitcast_i128(val) zig_make_i128(( int64_t)(val).hi, (val).lo) #define zig_cmp_int128(Type) \ - static inline zig_i32 zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \ + static inline int32_t zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \ return (lhs.hi == rhs.hi) \ ? (lhs.lo > rhs.lo) - (lhs.lo < rhs.lo) \ : (lhs.hi > rhs.hi) - (lhs.hi < rhs.hi); \ @@ -1280,10 +1383,10 @@ typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128; #endif /* zig_has_int128 */ -#define zig_minInt_u128 zig_as_u128(zig_minInt_u64, zig_minInt_u64) -#define zig_maxInt_u128 zig_as_u128(zig_maxInt_u64, zig_maxInt_u64) -#define zig_minInt_i128 zig_as_i128(zig_minInt_i64, zig_minInt_u64) -#define zig_maxInt_i128 zig_as_i128(zig_maxInt_i64, zig_maxInt_u64) +#define zig_minInt_u128 zig_make_u128(zig_minInt_u64, zig_minInt_u64) +#define zig_maxInt_u128 zig_make_u128(zig_maxInt_u64, zig_maxInt_u64) +#define zig_minInt_i128 zig_make_i128(zig_minInt_i64, zig_minInt_u64) +#define zig_maxInt_i128 zig_make_i128(zig_maxInt_i64, zig_maxInt_u64) zig_cmp_int128(u128) zig_cmp_int128(i128) @@ -1297,28 +1400,28 @@ zig_bit_int128(i128, or, |) zig_bit_int128(u128, xor, ^) zig_bit_int128(i128, xor, ^) -static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs); +static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs); #if zig_has_int128 -static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) { - return val ^ zig_maxInt(u128, bits); +static inline zig_u128 zig_not_u128(zig_u128 val, uint8_t bits) { + return val ^ zig_maxInt_u(128, bits); } -static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) { +static inline zig_i128 zig_not_i128(zig_i128 val, uint8_t bits) { (void)bits; return ~val; } -static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) { +static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs) { return lhs >> rhs; } -static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) { +static inline zig_u128 zig_shl_u128(zig_u128 lhs, uint8_t rhs) { return lhs << rhs; } -static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) { +static inline zig_i128 zig_shl_i128(zig_i128 lhs, uint8_t rhs) { return lhs << rhs; } @@ -1363,40 +1466,40 @@ static inline zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) { } static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) { - return zig_div_trunc_i128(lhs, rhs) - (((lhs ^ rhs) & zig_rem_i128(lhs, rhs)) < zig_as_i128(0, 0)); + return zig_div_trunc_i128(lhs, rhs) - (((lhs ^ rhs) & zig_rem_i128(lhs, rhs)) < zig_make_i128(0, 0)); } static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) { zig_i128 rem = zig_rem_i128(lhs, rhs); - return rem + (((lhs ^ rhs) & rem) < zig_as_i128(0, 0) ? rhs : zig_as_i128(0, 0)); + return rem + (((lhs ^ rhs) & rem) < zig_make_i128(0, 0) ? rhs : zig_make_i128(0, 0)); } #else /* zig_has_int128 */ -static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) { - return (zig_u128){ .hi = zig_not_u64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) }; +static inline zig_u128 zig_not_u128(zig_u128 val, uint8_t bits) { + return (zig_u128){ .hi = zig_not_u64(val.hi, bits - UINT8_C(64)), .lo = zig_not_u64(val.lo, UINT8_C(64)) }; } -static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) { - return (zig_i128){ .hi = zig_not_i64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) }; +static inline zig_i128 zig_not_i128(zig_i128 val, uint8_t bits) { + return (zig_i128){ .hi = zig_not_i64(val.hi, bits - UINT8_C(64)), .lo = zig_not_u64(val.lo, UINT8_C(64)) }; } -static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) { - if (rhs == zig_as_u8(0)) return lhs; - if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - zig_as_u8(64)) }; - return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (zig_as_u8(64) - rhs) | lhs.lo >> rhs }; +static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs) { + if (rhs == UINT8_C(0)) return lhs; + if (rhs >= UINT8_C(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - UINT8_C(64)) }; + return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (UINT8_C(64) - rhs) | lhs.lo >> rhs }; } -static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) { - if (rhs == zig_as_u8(0)) return lhs; - if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.lo << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 }; - return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs }; +static inline zig_u128 zig_shl_u128(zig_u128 lhs, uint8_t rhs) { + if (rhs == UINT8_C(0)) return lhs; + if (rhs >= UINT8_C(64)) return (zig_u128){ .hi = lhs.lo << (rhs - UINT8_C(64)), .lo = zig_minInt_u64 }; + return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (UINT8_C(64) - rhs), .lo = lhs.lo << rhs }; } -static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) { - if (rhs == zig_as_u8(0)) return lhs; - if (rhs >= zig_as_u8(64)) return (zig_i128){ .hi = lhs.lo << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 }; - return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs }; +static inline zig_i128 zig_shl_i128(zig_i128 lhs, uint8_t rhs) { + if (rhs == UINT8_C(0)) return lhs; + if (rhs >= UINT8_C(64)) return (zig_i128){ .hi = lhs.lo << (rhs - UINT8_C(64)), .lo = zig_minInt_u64 }; + return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (UINT8_C(64) - rhs), .lo = lhs.lo << rhs }; } static inline zig_u128 zig_add_u128(zig_u128 lhs, zig_u128 rhs) { @@ -1454,11 +1557,11 @@ static zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) { static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) { zig_i128 rem = zig_rem_i128(lhs, rhs); - return zig_add_i128(rem, (((lhs.hi ^ rhs.hi) & rem.hi) < zig_as_i64(0) ? rhs : zig_as_i128(0, 0))); + return zig_add_i128(rem, (((lhs.hi ^ rhs.hi) & rem.hi) < INT64_C(0) ? rhs : zig_make_i128(0, 0))); } static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) { - return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_as_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_as_i128(0, 0)) < zig_as_i32(0))); + return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_make_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_make_i128(0, 0)) < INT32_C(0))); } #endif /* zig_has_int128 */ @@ -1471,161 +1574,161 @@ static inline zig_u128 zig_nand_u128(zig_u128 lhs, zig_u128 rhs) { } static inline zig_u128 zig_min_u128(zig_u128 lhs, zig_u128 rhs) { - return zig_cmp_u128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs; + return zig_cmp_u128(lhs, rhs) < INT32_C(0) ? lhs : rhs; } static inline zig_i128 zig_min_i128(zig_i128 lhs, zig_i128 rhs) { - return zig_cmp_i128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs; + return zig_cmp_i128(lhs, rhs) < INT32_C(0) ? lhs : rhs; } static inline zig_u128 zig_max_u128(zig_u128 lhs, zig_u128 rhs) { - return zig_cmp_u128(lhs, rhs) > zig_as_i32(0) ? lhs : rhs; + return zig_cmp_u128(lhs, rhs) > INT32_C(0) ? lhs : rhs; } static inline zig_i128 zig_max_i128(zig_i128 lhs, zig_i128 rhs) { - return zig_cmp_i128(lhs, rhs) > zig_as_i32(0) ? lhs : rhs; + return zig_cmp_i128(lhs, rhs) > INT32_C(0) ? lhs : rhs; } -static inline zig_i128 zig_shr_i128(zig_i128 lhs, zig_u8 rhs) { - zig_i128 sign_mask = zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_sub_i128(zig_as_i128(0, 0), zig_as_i128(0, 1)) : zig_as_i128(0, 0); +static inline zig_i128 zig_shr_i128(zig_i128 lhs, uint8_t rhs) { + zig_i128 sign_mask = zig_cmp_i128(lhs, zig_make_i128(0, 0)) < INT32_C(0) ? zig_sub_i128(zig_make_i128(0, 0), zig_make_i128(0, 1)) : zig_make_i128(0, 0); return zig_xor_i128(zig_bitcast_i128(zig_shr_u128(zig_bitcast_u128(zig_xor_i128(lhs, sign_mask)), rhs)), sign_mask); } -static inline zig_u128 zig_wrap_u128(zig_u128 val, zig_u8 bits) { - return zig_and_u128(val, zig_maxInt(u128, bits)); +static inline zig_u128 zig_wrap_u128(zig_u128 val, uint8_t bits) { + return zig_and_u128(val, zig_maxInt_u(128, bits)); } -static inline zig_i128 zig_wrap_i128(zig_i128 val, zig_u8 bits) { - return zig_as_i128(zig_wrap_i64(zig_hi_i128(val), bits - zig_as_u8(64)), zig_lo_i128(val)); +static inline zig_i128 zig_wrap_i128(zig_i128 val, uint8_t bits) { + return zig_make_i128(zig_wrap_i64(zig_hi_i128(val), bits - UINT8_C(64)), zig_lo_i128(val)); } -static inline zig_u128 zig_shlw_u128(zig_u128 lhs, zig_u8 rhs, zig_u8 bits) { +static inline zig_u128 zig_shlw_u128(zig_u128 lhs, uint8_t rhs, uint8_t bits) { return zig_wrap_u128(zig_shl_u128(lhs, rhs), bits); } -static inline zig_i128 zig_shlw_i128(zig_i128 lhs, zig_u8 rhs, zig_u8 bits) { +static inline zig_i128 zig_shlw_i128(zig_i128 lhs, uint8_t rhs, uint8_t bits) { return zig_wrap_i128(zig_bitcast_i128(zig_shl_u128(zig_bitcast_u128(lhs), rhs)), bits); } -static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { return zig_wrap_u128(zig_add_u128(lhs, rhs), bits); } -static inline zig_i128 zig_addw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +static inline zig_i128 zig_addw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { return zig_wrap_i128(zig_bitcast_i128(zig_add_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits); } -static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { return zig_wrap_u128(zig_sub_u128(lhs, rhs), bits); } -static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { return zig_wrap_i128(zig_bitcast_i128(zig_sub_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits); } -static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { return zig_wrap_u128(zig_mul_u128(lhs, rhs), bits); } -static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { return zig_wrap_i128(zig_bitcast_i128(zig_mul_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits); } #if zig_has_int128 -static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) zig_u128 full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_u128(full_res, bits); - return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits); + return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits); #else *res = zig_addw_u128(lhs, rhs, bits); return *res < lhs; #endif } -zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); -static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, int *overflow); +static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) zig_i128 full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; + int overflow_int; zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i128(full_res, bits); - return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits); + return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits); } -static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) zig_u128 full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_u128(full_res, bits); - return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits); + return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits); #else *res = zig_subw_u128(lhs, rhs, bits); return *res > lhs; #endif } -zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); -static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, int *overflow); +static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) zig_i128 full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; + int overflow_int; zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i128(full_res, bits); - return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits); + return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits); } -static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) zig_u128 full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_u128(full_res, bits); - return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits); + return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits); #else *res = zig_mulw_u128(lhs, rhs, bits); - return rhs != zig_as_u128(0, 0) && lhs > zig_maxInt(u128, bits) / rhs; + return rhs != zig_make_u128(0, 0) && lhs > zig_maxInt_u(128, bits) / rhs; #endif } -zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); -static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, int *overflow); +static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) zig_i128 full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; + int overflow_int; zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i128(full_res, bits); - return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits); + return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits); } #else /* zig_has_int128 */ -static inline bool zig_overflow_u128(bool overflow, zig_u128 full_res, zig_u8 bits) { +static inline bool zig_overflow_u128(bool overflow, zig_u128 full_res, uint8_t bits) { return overflow || - zig_cmp_u128(full_res, zig_minInt(u128, bits)) < zig_as_i32(0) || - zig_cmp_u128(full_res, zig_maxInt(u128, bits)) > zig_as_i32(0); + zig_cmp_u128(full_res, zig_minInt_u(128, bits)) < INT32_C(0) || + zig_cmp_u128(full_res, zig_maxInt_u(128, bits)) > INT32_C(0); } -static inline bool zig_overflow_i128(bool overflow, zig_i128 full_res, zig_u8 bits) { +static inline bool zig_overflow_i128(bool overflow, zig_i128 full_res, uint8_t bits) { return overflow || - zig_cmp_i128(full_res, zig_minInt(i128, bits)) < zig_as_i32(0) || - zig_cmp_i128(full_res, zig_maxInt(i128, bits)) > zig_as_i32(0); + zig_cmp_i128(full_res, zig_minInt_i(128, bits)) < INT32_C(0) || + zig_cmp_i128(full_res, zig_maxInt_i(128, bits)) > INT32_C(0); } -static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { zig_u128 full_res; bool overflow = zig_addo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) | @@ -1634,15 +1737,15 @@ static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_ return zig_overflow_u128(overflow, full_res, bits); } -zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); -static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { - zig_c_int overflow_int; +zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, int *overflow); +static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { + int overflow_int; zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int); *res = zig_wrap_i128(full_res, bits); return zig_overflow_i128(overflow_int, full_res, bits); } -static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { zig_u128 full_res; bool overflow = zig_subo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) | @@ -1651,23 +1754,23 @@ static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_ return zig_overflow_u128(overflow, full_res, bits); } -zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); -static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { - zig_c_int overflow_int; +zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, int *overflow); +static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { + int overflow_int; zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int); *res = zig_wrap_i128(full_res, bits); return zig_overflow_i128(overflow_int, full_res, bits); } -static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { *res = zig_mulw_u128(lhs, rhs, bits); - return zig_cmp_u128(*res, zig_as_u128(0, 0)) != zig_as_i32(0) && - zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0); + return zig_cmp_u128(*res, zig_make_u128(0, 0)) != INT32_C(0) && + zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt_u(128, bits), rhs)) > INT32_C(0); } -zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); -static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { - zig_c_int overflow_int; +zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, int *overflow); +static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { + int overflow_int; zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int); *res = zig_wrap_i128(full_res, bits); return zig_overflow_i128(overflow_int, full_res, bits); @@ -1675,119 +1778,119 @@ static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_ #endif /* zig_has_int128 */ -static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) { +static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, uint8_t rhs, uint8_t bits) { *res = zig_shlw_u128(lhs, rhs, bits); - return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0); + return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt_u(128, bits), rhs)) > INT32_C(0); } -static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) { +static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, uint8_t rhs, uint8_t bits) { *res = zig_shlw_i128(lhs, rhs, bits); - zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - zig_as_u8(1))); - return zig_cmp_i128(zig_and_i128(lhs, mask), zig_as_i128(0, 0)) != zig_as_i32(0) && - zig_cmp_i128(zig_and_i128(lhs, mask), mask) != zig_as_i32(0); + zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - UINT8_C(1))); + return zig_cmp_i128(zig_and_i128(lhs, mask), zig_make_i128(0, 0)) != INT32_C(0) && + zig_cmp_i128(zig_and_i128(lhs, mask), mask) != INT32_C(0); } -static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { zig_u128 res; - if (zig_cmp_u128(rhs, zig_as_u128(0, bits)) >= zig_as_i32(0)) - return zig_cmp_u128(lhs, zig_as_u128(0, 0)) != zig_as_i32(0) ? zig_maxInt(u128, bits) : lhs; + if (zig_cmp_u128(rhs, zig_make_u128(0, bits)) >= INT32_C(0)) + return zig_cmp_u128(lhs, zig_make_u128(0, 0)) != INT32_C(0) ? zig_maxInt_u(128, bits) : lhs; #if zig_has_int128 - return zig_shlo_u128(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u128, bits) : res; + return zig_shlo_u128(&res, lhs, (uint8_t)rhs, bits) ? zig_maxInt_u(128, bits) : res; #else - return zig_shlo_u128(&res, lhs, (zig_u8)rhs.lo, bits) ? zig_maxInt(u128, bits) : res; + return zig_shlo_u128(&res, lhs, (uint8_t)rhs.lo, bits) ? zig_maxInt_u(128, bits) : res; #endif } -static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { zig_i128 res; - if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_as_u128(0, bits)) < zig_as_i32(0) && !zig_shlo_i128(&res, lhs, zig_lo_i128(rhs), bits)) return res; - return zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits); + if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_i128(&res, lhs, zig_lo_i128(rhs), bits)) return res; + return zig_cmp_i128(lhs, zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits); } -static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { zig_u128 res; - return zig_addo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res; + return zig_addo_u128(&res, lhs, rhs, bits) ? zig_maxInt_u(128, bits) : res; } -static inline zig_i128 zig_adds_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +static inline zig_i128 zig_adds_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { zig_i128 res; if (!zig_addo_i128(&res, lhs, rhs, bits)) return res; - return zig_cmp_i128(res, zig_as_i128(0, 0)) >= zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits); + return zig_cmp_i128(res, zig_make_i128(0, 0)) >= INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits); } -static inline zig_u128 zig_subs_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline zig_u128 zig_subs_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { zig_u128 res; - return zig_subo_u128(&res, lhs, rhs, bits) ? zig_minInt(u128, bits) : res; + return zig_subo_u128(&res, lhs, rhs, bits) ? zig_minInt_u(128, bits) : res; } -static inline zig_i128 zig_subs_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +static inline zig_i128 zig_subs_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { zig_i128 res; if (!zig_subo_i128(&res, lhs, rhs, bits)) return res; - return zig_cmp_i128(res, zig_as_i128(0, 0)) >= zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits); + return zig_cmp_i128(res, zig_make_i128(0, 0)) >= INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits); } -static inline zig_u128 zig_muls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline zig_u128 zig_muls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { zig_u128 res; - return zig_mulo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res; + return zig_mulo_u128(&res, lhs, rhs, bits) ? zig_maxInt_u(128, bits) : res; } -static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { zig_i128 res; if (!zig_mulo_i128(&res, lhs, rhs, bits)) return res; - return zig_cmp_i128(zig_xor_i128(lhs, rhs), zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits); + return zig_cmp_i128(zig_xor_i128(lhs, rhs), zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits); } -static inline zig_u8 zig_clz_u128(zig_u128 val, zig_u8 bits) { - if (bits <= zig_as_u8(64)) return zig_clz_u64(zig_lo_u128(val), bits); - if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - zig_as_u8(64)); - return zig_clz_u64(zig_lo_u128(val), zig_as_u8(64)) + (bits - zig_as_u8(64)); +static inline uint8_t zig_clz_u128(zig_u128 val, uint8_t bits) { + if (bits <= UINT8_C(64)) return zig_clz_u64(zig_lo_u128(val), bits); + if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - UINT8_C(64)); + return zig_clz_u64(zig_lo_u128(val), UINT8_C(64)) + (bits - UINT8_C(64)); } -static inline zig_u8 zig_clz_i128(zig_i128 val, zig_u8 bits) { +static inline uint8_t zig_clz_i128(zig_i128 val, uint8_t bits) { return zig_clz_u128(zig_bitcast_u128(val), bits); } -static inline zig_u8 zig_ctz_u128(zig_u128 val, zig_u8 bits) { - if (zig_lo_u128(val) != 0) return zig_ctz_u64(zig_lo_u128(val), zig_as_u8(64)); - return zig_ctz_u64(zig_hi_u128(val), bits - zig_as_u8(64)) + zig_as_u8(64); +static inline uint8_t zig_ctz_u128(zig_u128 val, uint8_t bits) { + if (zig_lo_u128(val) != 0) return zig_ctz_u64(zig_lo_u128(val), UINT8_C(64)); + return zig_ctz_u64(zig_hi_u128(val), bits - UINT8_C(64)) + UINT8_C(64); } -static inline zig_u8 zig_ctz_i128(zig_i128 val, zig_u8 bits) { +static inline uint8_t zig_ctz_i128(zig_i128 val, uint8_t bits) { return zig_ctz_u128(zig_bitcast_u128(val), bits); } -static inline zig_u8 zig_popcount_u128(zig_u128 val, zig_u8 bits) { - return zig_popcount_u64(zig_hi_u128(val), bits - zig_as_u8(64)) + - zig_popcount_u64(zig_lo_u128(val), zig_as_u8(64)); +static inline uint8_t zig_popcount_u128(zig_u128 val, uint8_t bits) { + return zig_popcount_u64(zig_hi_u128(val), bits - UINT8_C(64)) + + zig_popcount_u64(zig_lo_u128(val), UINT8_C(64)); } -static inline zig_u8 zig_popcount_i128(zig_i128 val, zig_u8 bits) { +static inline uint8_t zig_popcount_i128(zig_i128 val, uint8_t bits) { return zig_popcount_u128(zig_bitcast_u128(val), bits); } -static inline zig_u128 zig_byte_swap_u128(zig_u128 val, zig_u8 bits) { +static inline zig_u128 zig_byte_swap_u128(zig_u128 val, uint8_t bits) { zig_u128 full_res; #if zig_has_builtin(bswap128) full_res = __builtin_bswap128(val); #else - full_res = zig_as_u128(zig_byte_swap_u64(zig_lo_u128(val), zig_as_u8(64)), - zig_byte_swap_u64(zig_hi_u128(val), zig_as_u8(64))); + full_res = zig_make_u128(zig_byte_swap_u64(zig_lo_u128(val), UINT8_C(64)), + zig_byte_swap_u64(zig_hi_u128(val), UINT8_C(64))); #endif - return zig_shr_u128(full_res, zig_as_u8(128) - bits); + return zig_shr_u128(full_res, UINT8_C(128) - bits); } -static inline zig_i128 zig_byte_swap_i128(zig_i128 val, zig_u8 bits) { +static inline zig_i128 zig_byte_swap_i128(zig_i128 val, uint8_t bits) { return zig_bitcast_i128(zig_byte_swap_u128(zig_bitcast_u128(val), bits)); } -static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, zig_u8 bits) { - return zig_shr_u128(zig_as_u128(zig_bit_reverse_u64(zig_lo_u128(val), zig_as_u8(64)), - zig_bit_reverse_u64(zig_hi_u128(val), zig_as_u8(64))), - zig_as_u8(128) - bits); +static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, uint8_t bits) { + return zig_shr_u128(zig_make_u128(zig_bit_reverse_u64(zig_lo_u128(val), UINT8_C(64)), + zig_bit_reverse_u64(zig_hi_u128(val), UINT8_C(64))), + UINT8_C(128) - bits); } -static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) { +static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) { return zig_bitcast_i128(zig_bit_reverse_u128(zig_bitcast_u128(val), bits)); } @@ -1810,85 +1913,85 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) { #if (zig_has_builtin(nan) && zig_has_builtin(nans) && zig_has_builtin(inf)) || defined(zig_gnuc) #define zig_has_float_builtins 1 -#define zig_as_special_f16(sign, name, arg, repr) sign zig_as_f16(__builtin_##name, )(arg) -#define zig_as_special_f32(sign, name, arg, repr) sign zig_as_f32(__builtin_##name, )(arg) -#define zig_as_special_f64(sign, name, arg, repr) sign zig_as_f64(__builtin_##name, )(arg) -#define zig_as_special_f80(sign, name, arg, repr) sign zig_as_f80(__builtin_##name, )(arg) -#define zig_as_special_f128(sign, name, arg, repr) sign zig_as_f128(__builtin_##name, )(arg) -#define zig_as_special_c_longdouble(sign, name, arg, repr) sign zig_as_c_longdouble(__builtin_##name, )(arg) +#define zig_make_special_f16(sign, name, arg, repr) sign zig_make_f16(__builtin_##name, )(arg) +#define zig_make_special_f32(sign, name, arg, repr) sign zig_make_f32(__builtin_##name, )(arg) +#define zig_make_special_f64(sign, name, arg, repr) sign zig_make_f64(__builtin_##name, )(arg) +#define zig_make_special_f80(sign, name, arg, repr) sign zig_make_f80(__builtin_##name, )(arg) +#define zig_make_special_f128(sign, name, arg, repr) sign zig_make_f128(__builtin_##name, )(arg) +#define zig_make_special_c_longdouble(sign, name, arg, repr) sign zig_make_c_longdouble(__builtin_##name, )(arg) #else #define zig_has_float_builtins 0 -#define zig_as_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr) -#define zig_as_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr) -#define zig_as_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr) -#define zig_as_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr) -#define zig_as_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr) -#define zig_as_special_c_longdouble(sign, name, arg, repr) zig_float_from_repr_c_longdouble(repr) +#define zig_make_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr) +#define zig_make_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr) +#define zig_make_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr) +#define zig_make_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr) +#define zig_make_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr) +#define zig_make_special_c_longdouble(sign, name, arg, repr) zig_float_from_repr_c_longdouble(repr) #endif #define zig_has_f16 1 #define zig_bitSizeOf_f16 16 #define zig_libc_name_f16(name) __##name##h -#define zig_as_special_constant_f16(sign, name, arg, repr) zig_as_special_f16(sign, name, arg, repr) +#define zig_make_special_constant_f16(sign, name, arg, repr) zig_make_special_f16(sign, name, arg, repr) #if FLT_MANT_DIG == 11 typedef float zig_f16; -#define zig_as_f16(fp, repr) fp##f +#define zig_make_f16(fp, repr) fp##f #elif DBL_MANT_DIG == 11 typedef double zig_f16; -#define zig_as_f16(fp, repr) fp +#define zig_make_f16(fp, repr) fp #elif LDBL_MANT_DIG == 11 #define zig_bitSizeOf_c_longdouble 16 typedef long double zig_f16; -#define zig_as_f16(fp, repr) fp##l +#define zig_make_f16(fp, repr) fp##l #elif FLT16_MANT_DIG == 11 && (zig_has_builtin(inff16) || defined(zig_gnuc)) typedef _Float16 zig_f16; -#define zig_as_f16(fp, repr) fp##f16 +#define zig_make_f16(fp, repr) fp##f16 #elif defined(__SIZEOF_FP16__) typedef __fp16 zig_f16; -#define zig_as_f16(fp, repr) fp##f16 +#define zig_make_f16(fp, repr) fp##f16 #else #undef zig_has_f16 #define zig_has_f16 0 -#define zig_repr_f16 i16 -typedef zig_i16 zig_f16; -#define zig_as_f16(fp, repr) repr -#undef zig_as_special_f16 -#define zig_as_special_f16(sign, name, arg, repr) repr -#undef zig_as_special_constant_f16 -#define zig_as_special_constant_f16(sign, name, arg, repr) repr +#define zig_bitSizeOf_repr_f16 16 +typedef int16_t zig_f16; +#define zig_make_f16(fp, repr) repr +#undef zig_make_special_f16 +#define zig_make_special_f16(sign, name, arg, repr) repr +#undef zig_make_special_constant_f16 +#define zig_make_special_constant_f16(sign, name, arg, repr) repr #endif #define zig_has_f32 1 #define zig_bitSizeOf_f32 32 #define zig_libc_name_f32(name) name##f #if _MSC_VER -#define zig_as_special_constant_f32(sign, name, arg, repr) sign zig_as_f32(zig_msvc_flt_##name, ) +#define zig_make_special_constant_f32(sign, name, arg, repr) sign zig_make_f32(zig_msvc_flt_##name, ) #else -#define zig_as_special_constant_f32(sign, name, arg, repr) zig_as_special_f32(sign, name, arg, repr) +#define zig_make_special_constant_f32(sign, name, arg, repr) zig_make_special_f32(sign, name, arg, repr) #endif #if FLT_MANT_DIG == 24 typedef float zig_f32; -#define zig_as_f32(fp, repr) fp##f +#define zig_make_f32(fp, repr) fp##f #elif DBL_MANT_DIG == 24 typedef double zig_f32; -#define zig_as_f32(fp, repr) fp +#define zig_make_f32(fp, repr) fp #elif LDBL_MANT_DIG == 24 #define zig_bitSizeOf_c_longdouble 32 typedef long double zig_f32; -#define zig_as_f32(fp, repr) fp##l +#define zig_make_f32(fp, repr) fp##l #elif FLT32_MANT_DIG == 24 typedef _Float32 zig_f32; -#define zig_as_f32(fp, repr) fp##f32 +#define zig_make_f32(fp, repr) fp##f32 #else #undef zig_has_f32 #define zig_has_f32 0 -#define zig_repr_f32 i32 -typedef zig_i32 zig_f32; -#define zig_as_f32(fp, repr) repr -#undef zig_as_special_f32 -#define zig_as_special_f32(sign, name, arg, repr) repr -#undef zig_as_special_constant_f32 -#define zig_as_special_constant_f32(sign, name, arg, repr) repr +#define zig_bitSizeOf_repr_f32 32 +typedef int32_t zig_f32; +#define zig_make_f32(fp, repr) repr +#undef zig_make_special_f32 +#define zig_make_special_f32(sign, name, arg, repr) repr +#undef zig_make_special_constant_f32 +#define zig_make_special_constant_f32(sign, name, arg, repr) repr #endif #define zig_has_f64 1 @@ -1898,108 +2001,108 @@ typedef zig_i32 zig_f32; #ifdef ZIG_TARGET_ABI_MSVC #define zig_bitSizeOf_c_longdouble 64 #endif -#define zig_as_special_constant_f64(sign, name, arg, repr) sign zig_as_f64(zig_msvc_flt_##name, ) +#define zig_make_special_constant_f64(sign, name, arg, repr) sign zig_make_f64(zig_msvc_flt_##name, ) #else /* _MSC_VER */ -#define zig_as_special_constant_f64(sign, name, arg, repr) zig_as_special_f64(sign, name, arg, repr) +#define zig_make_special_constant_f64(sign, name, arg, repr) zig_make_special_f64(sign, name, arg, repr) #endif /* _MSC_VER */ #if FLT_MANT_DIG == 53 typedef float zig_f64; -#define zig_as_f64(fp, repr) fp##f +#define zig_make_f64(fp, repr) fp##f #elif DBL_MANT_DIG == 53 typedef double zig_f64; -#define zig_as_f64(fp, repr) fp +#define zig_make_f64(fp, repr) fp #elif LDBL_MANT_DIG == 53 #define zig_bitSizeOf_c_longdouble 64 typedef long double zig_f64; -#define zig_as_f64(fp, repr) fp##l +#define zig_make_f64(fp, repr) fp##l #elif FLT64_MANT_DIG == 53 typedef _Float64 zig_f64; -#define zig_as_f64(fp, repr) fp##f64 +#define zig_make_f64(fp, repr) fp##f64 #elif FLT32X_MANT_DIG == 53 typedef _Float32x zig_f64; -#define zig_as_f64(fp, repr) fp##f32x +#define zig_make_f64(fp, repr) fp##f32x #else #undef zig_has_f64 #define zig_has_f64 0 -#define zig_repr_f64 i64 -typedef zig_i64 zig_f64; -#define zig_as_f64(fp, repr) repr -#undef zig_as_special_f64 -#define zig_as_special_f64(sign, name, arg, repr) repr -#undef zig_as_special_constant_f64 -#define zig_as_special_constant_f64(sign, name, arg, repr) repr +#define zig_bitSizeOf_repr_f64 64 +typedef int64_t zig_f64; +#define zig_make_f64(fp, repr) repr +#undef zig_make_special_f64 +#define zig_make_special_f64(sign, name, arg, repr) repr +#undef zig_make_special_constant_f64 +#define zig_make_special_constant_f64(sign, name, arg, repr) repr #endif #define zig_has_f80 1 #define zig_bitSizeOf_f80 80 #define zig_libc_name_f80(name) __##name##x -#define zig_as_special_constant_f80(sign, name, arg, repr) zig_as_special_f80(sign, name, arg, repr) +#define zig_make_special_constant_f80(sign, name, arg, repr) zig_make_special_f80(sign, name, arg, repr) #if FLT_MANT_DIG == 64 typedef float zig_f80; -#define zig_as_f80(fp, repr) fp##f +#define zig_make_f80(fp, repr) fp##f #elif DBL_MANT_DIG == 64 typedef double zig_f80; -#define zig_as_f80(fp, repr) fp +#define zig_make_f80(fp, repr) fp #elif LDBL_MANT_DIG == 64 #define zig_bitSizeOf_c_longdouble 80 typedef long double zig_f80; -#define zig_as_f80(fp, repr) fp##l +#define zig_make_f80(fp, repr) fp##l #elif FLT80_MANT_DIG == 64 typedef _Float80 zig_f80; -#define zig_as_f80(fp, repr) fp##f80 +#define zig_make_f80(fp, repr) fp##f80 #elif FLT64X_MANT_DIG == 64 typedef _Float64x zig_f80; -#define zig_as_f80(fp, repr) fp##f64x +#define zig_make_f80(fp, repr) fp##f64x #elif defined(__SIZEOF_FLOAT80__) typedef __float80 zig_f80; -#define zig_as_f80(fp, repr) fp##l +#define zig_make_f80(fp, repr) fp##l #else #undef zig_has_f80 #define zig_has_f80 0 -#define zig_repr_f80 i128 +#define zig_bitSizeOf_repr_f80 128 typedef zig_i128 zig_f80; -#define zig_as_f80(fp, repr) repr -#undef zig_as_special_f80 -#define zig_as_special_f80(sign, name, arg, repr) repr -#undef zig_as_special_constant_f80 -#define zig_as_special_constant_f80(sign, name, arg, repr) repr +#define zig_make_f80(fp, repr) repr +#undef zig_make_special_f80 +#define zig_make_special_f80(sign, name, arg, repr) repr +#undef zig_make_special_constant_f80 +#define zig_make_special_constant_f80(sign, name, arg, repr) repr #endif #define zig_has_f128 1 #define zig_bitSizeOf_f128 128 #define zig_libc_name_f128(name) name##q -#define zig_as_special_constant_f128(sign, name, arg, repr) zig_as_special_f128(sign, name, arg, repr) +#define zig_make_special_constant_f128(sign, name, arg, repr) zig_make_special_f128(sign, name, arg, repr) #if FLT_MANT_DIG == 113 typedef float zig_f128; -#define zig_as_f128(fp, repr) fp##f +#define zig_make_f128(fp, repr) fp##f #elif DBL_MANT_DIG == 113 typedef double zig_f128; -#define zig_as_f128(fp, repr) fp +#define zig_make_f128(fp, repr) fp #elif LDBL_MANT_DIG == 113 #define zig_bitSizeOf_c_longdouble 128 typedef long double zig_f128; -#define zig_as_f128(fp, repr) fp##l +#define zig_make_f128(fp, repr) fp##l #elif FLT128_MANT_DIG == 113 typedef _Float128 zig_f128; -#define zig_as_f128(fp, repr) fp##f128 +#define zig_make_f128(fp, repr) fp##f128 #elif FLT64X_MANT_DIG == 113 typedef _Float64x zig_f128; -#define zig_as_f128(fp, repr) fp##f64x +#define zig_make_f128(fp, repr) fp##f64x #elif defined(__SIZEOF_FLOAT128__) typedef __float128 zig_f128; -#define zig_as_f128(fp, repr) fp##q -#undef zig_as_special_f128 -#define zig_as_special_f128(sign, name, arg, repr) sign __builtin_##name##f128(arg) +#define zig_make_f128(fp, repr) fp##q +#undef zig_make_special_f128 +#define zig_make_special_f128(sign, name, arg, repr) sign __builtin_##name##f128(arg) #else #undef zig_has_f128 #define zig_has_f128 0 -#define zig_repr_f128 i128 +#define zig_bitSizeOf_repr_f128 128 typedef zig_i128 zig_f128; -#define zig_as_f128(fp, repr) repr -#undef zig_as_special_f128 -#define zig_as_special_f128(sign, name, arg, repr) repr -#undef zig_as_special_constant_f128 -#define zig_as_special_constant_f128(sign, name, arg, repr) repr +#define zig_make_f128(fp, repr) repr +#undef zig_make_special_f128 +#define zig_make_special_f128(sign, name, arg, repr) repr +#undef zig_make_special_constant_f128 +#define zig_make_special_constant_f128(sign, name, arg, repr) repr #endif #define zig_has_c_longdouble 1 @@ -2010,17 +2113,17 @@ typedef zig_i128 zig_f128; #define zig_libc_name_c_longdouble(name) name##l #endif -#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) zig_as_special_c_longdouble(sign, name, arg, repr) +#define zig_make_special_constant_c_longdouble(sign, name, arg, repr) zig_make_special_c_longdouble(sign, name, arg, repr) #ifdef zig_bitSizeOf_c_longdouble #ifdef ZIG_TARGET_ABI_MSVC typedef double zig_c_longdouble; #undef zig_bitSizeOf_c_longdouble #define zig_bitSizeOf_c_longdouble 64 -#define zig_as_c_longdouble(fp, repr) fp +#define zig_make_c_longdouble(fp, repr) fp #else typedef long double zig_c_longdouble; -#define zig_as_c_longdouble(fp, repr) fp##l +#define zig_make_c_longdouble(fp, repr) fp##l #endif #else /* zig_bitSizeOf_c_longdouble */ @@ -2029,13 +2132,13 @@ typedef long double zig_c_longdouble; #define zig_has_c_longdouble 0 #define zig_bitSizeOf_c_longdouble 80 #define zig_compiler_rt_abbrev_c_longdouble zig_compiler_rt_abbrev_f80 -#define zig_repr_c_longdouble i128 +#define zig_bitSizeOf_repr_c_longdouble 128 typedef zig_i128 zig_c_longdouble; -#define zig_as_c_longdouble(fp, repr) repr -#undef zig_as_special_c_longdouble -#define zig_as_special_c_longdouble(sign, name, arg, repr) repr -#undef zig_as_special_constant_c_longdouble -#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) repr +#define zig_make_c_longdouble(fp, repr) repr +#undef zig_make_special_c_longdouble +#define zig_make_special_c_longdouble(sign, name, arg, repr) repr +#undef zig_make_special_constant_c_longdouble +#define zig_make_special_constant_c_longdouble(sign, name, arg, repr) repr #endif /* zig_bitSizeOf_c_longdouble */ @@ -2073,32 +2176,35 @@ zig_expand_float_from_repr(c_longdouble, zig_expand_concat(u, zig_bitSizeOf_c_lo #endif #define zig_convert_builtin(ResType, operation, ArgType, version) \ - zig_extern zig_##ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \ - zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(zig_##ArgType); -zig_convert_builtin(f16, trunc, f32, 2) -zig_convert_builtin(f16, trunc, f64, 2) -zig_convert_builtin(f16, trunc, f80, 2) -zig_convert_builtin(f16, trunc, f128, 2) -zig_convert_builtin(f32, extend, f16, 2) -zig_convert_builtin(f32, trunc, f64, 2) -zig_convert_builtin(f32, trunc, f80, 2) -zig_convert_builtin(f32, trunc, f128, 2) -zig_convert_builtin(f64, extend, f16, 2) -zig_convert_builtin(f64, extend, f32, 2) -zig_convert_builtin(f64, trunc, f80, 2) -zig_convert_builtin(f64, trunc, f128, 2) -zig_convert_builtin(f80, extend, f16, 2) -zig_convert_builtin(f80, extend, f32, 2) -zig_convert_builtin(f80, extend, f64, 2) -zig_convert_builtin(f80, trunc, f128, 2) -zig_convert_builtin(f128, extend, f16, 2) -zig_convert_builtin(f128, extend, f32, 2) -zig_convert_builtin(f128, extend, f64, 2) -zig_convert_builtin(f128, extend, f80, 2) + zig_extern ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \ + zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(ArgType); +zig_convert_builtin(zig_f16, trunc, zig_f32, 2) +zig_convert_builtin(zig_f16, trunc, zig_f64, 2) +zig_convert_builtin(zig_f16, trunc, zig_f80, 2) +zig_convert_builtin(zig_f16, trunc, zig_f128, 2) +zig_convert_builtin(zig_f32, extend, zig_f16, 2) +zig_convert_builtin(zig_f32, trunc, zig_f64, 2) +zig_convert_builtin(zig_f32, trunc, zig_f80, 2) +zig_convert_builtin(zig_f32, trunc, zig_f128, 2) +zig_convert_builtin(zig_f64, extend, zig_f16, 2) +zig_convert_builtin(zig_f64, extend, zig_f32, 2) +zig_convert_builtin(zig_f64, trunc, zig_f80, 2) +zig_convert_builtin(zig_f64, trunc, zig_f128, 2) +zig_convert_builtin(zig_f80, extend, zig_f16, 2) +zig_convert_builtin(zig_f80, extend, zig_f32, 2) +zig_convert_builtin(zig_f80, extend, zig_f64, 2) +zig_convert_builtin(zig_f80, trunc, zig_f128, 2) +zig_convert_builtin(zig_f128, extend, zig_f16, 2) +zig_convert_builtin(zig_f128, extend, zig_f32, 2) +zig_convert_builtin(zig_f128, extend, zig_f64, 2) +zig_convert_builtin(zig_f128, extend, zig_f80, 2) #define zig_float_negate_builtin_0(Type) \ static inline zig_##Type zig_neg_##Type(zig_##Type arg) { \ - return zig_expand_concat(zig_xor_, zig_repr_##Type)(arg, zig_expand_minInt(zig_repr_##Type, zig_bitSizeOf_##Type)); \ + return zig_expand_concat(zig_xor_i, zig_bitSizeOf_repr_##Type)( \ + arg, \ + zig_minInt_i(zig_bitSizeOf_repr_##Type, zig_bitSizeOf_##Type) \ + ); \ } #define zig_float_negate_builtin_1(Type) \ static inline zig_##Type zig_neg_##Type(zig_##Type arg) { \ @@ -2106,28 +2212,28 @@ zig_convert_builtin(f128, extend, f80, 2) } #define zig_float_less_builtin_0(Type, operation) \ - zig_extern zig_i32 zig_expand_concat(zig_expand_concat(__##operation, \ - zig_compiler_rt_abbrev_##Type), 2)(zig_##Type, zig_##Type); \ - static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ - return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_##Type), 2)(lhs, rhs); \ + zig_extern int32_t zig_expand_concat(zig_expand_concat(__##operation, \ + zig_compiler_rt_abbrev_zig_##Type), 2)(zig_##Type, zig_##Type); \ + static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_zig_##Type), 2)(lhs, rhs); \ } #define zig_float_less_builtin_1(Type, operation) \ - static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ return (!(lhs <= rhs) - (lhs < rhs)); \ } #define zig_float_greater_builtin_0(Type, operation) \ zig_float_less_builtin_0(Type, operation) #define zig_float_greater_builtin_1(Type, operation) \ - static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ return ((lhs > rhs) - !(lhs >= rhs)); \ } #define zig_float_binary_builtin_0(Type, operation, operator) \ zig_extern zig_##Type zig_expand_concat(zig_expand_concat(__##operation, \ - zig_compiler_rt_abbrev_##Type), 3)(zig_##Type, zig_##Type); \ + zig_compiler_rt_abbrev_zig_##Type), 3)(zig_##Type, zig_##Type); \ static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ - return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_##Type), 3)(lhs, rhs); \ + return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_zig_##Type), 3)(lhs, rhs); \ } #define zig_float_binary_builtin_1(Type, operation, operator) \ static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ @@ -2135,18 +2241,18 @@ zig_convert_builtin(f128, extend, f80, 2) } #define zig_float_builtins(Type) \ - zig_convert_builtin(i32, fix, Type, ) \ - zig_convert_builtin(u32, fixuns, Type, ) \ - zig_convert_builtin(i64, fix, Type, ) \ - zig_convert_builtin(u64, fixuns, Type, ) \ - zig_convert_builtin(i128, fix, Type, ) \ - zig_convert_builtin(u128, fixuns, Type, ) \ - zig_convert_builtin(Type, float, i32, ) \ - zig_convert_builtin(Type, floatun, u32, ) \ - zig_convert_builtin(Type, float, i64, ) \ - zig_convert_builtin(Type, floatun, u64, ) \ - zig_convert_builtin(Type, float, i128, ) \ - zig_convert_builtin(Type, floatun, u128, ) \ + zig_convert_builtin( int32_t, fix, zig_##Type, ) \ + zig_convert_builtin(uint32_t, fixuns, zig_##Type, ) \ + zig_convert_builtin( int64_t, fix, zig_##Type, ) \ + zig_convert_builtin(uint64_t, fixuns, zig_##Type, ) \ + zig_convert_builtin(zig_i128, fix, zig_##Type, ) \ + zig_convert_builtin(zig_u128, fixuns, zig_##Type, ) \ + zig_convert_builtin(zig_##Type, float, int32_t, ) \ + zig_convert_builtin(zig_##Type, floatun, uint32_t, ) \ + zig_convert_builtin(zig_##Type, float, int64_t, ) \ + zig_convert_builtin(zig_##Type, floatun, uint64_t, ) \ + zig_convert_builtin(zig_##Type, float, zig_i128, ) \ + zig_convert_builtin(zig_##Type, floatun, zig_u128, ) \ zig_expand_concat(zig_float_negate_builtin_, zig_has_##Type)(Type) \ zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, cmp) \ zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, ne) \ @@ -2332,17 +2438,17 @@ zig_msvc_flt_atomics(f64, u64, 64) #if _M_IX86 static inline void zig_msvc_atomic_barrier() { - zig_i32 barrier; + int32_t barrier; __asm { xchg barrier, eax } } -static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, zig_u32* arg) { +static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, uint32_t* arg) { return _InterlockedExchangePointer(obj, arg); } -static inline void zig_msvc_atomic_store_p32(void** obj, zig_u32* arg) { +static inline void zig_msvc_atomic_store_p32(void** obj, uint32_t* arg) { _InterlockedExchangePointer(obj, arg); } @@ -2360,11 +2466,11 @@ static inline bool zig_msvc_cmpxchg_p32(void** obj, void** expected, void* desir return exchanged; } #else /* _M_IX86 */ -static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, zig_u64* arg) { +static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, uint64_t* arg) { return _InterlockedExchangePointer(obj, arg); } -static inline void zig_msvc_atomic_store_p64(void** obj, zig_u64* arg) { +static inline void zig_msvc_atomic_store_p64(void** obj, uint64_t* arg) { _InterlockedExchangePointer(obj, arg); } @@ -2383,11 +2489,11 @@ static inline bool zig_msvc_cmpxchg_p64(void** obj, void** expected, void* desir } static inline bool zig_msvc_cmpxchg_u128(zig_u128 volatile* obj, zig_u128* expected, zig_u128 desired) { - return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_i64*)expected); + return _InterlockedCompareExchange128((int64_t volatile*)obj, desired.hi, desired.lo, (int64_t*)expected); } static inline bool zig_msvc_cmpxchg_i128(zig_i128 volatile* obj, zig_i128* expected, zig_i128 desired) { - return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_u64*)expected); + return _InterlockedCompareExchange128((int64_t volatile*)obj, desired.hi, desired.lo, (uint64_t*)expected); } #define zig_msvc_atomics_128xchg(Type) \ @@ -2429,7 +2535,7 @@ zig_msvc_atomics_128op(u128, max) #endif /* _MSC_VER && (_M_IX86 || _M_X64) */ -/* ========================= Special Case Intrinsics ========================= */ +/* ======================== Special Case Intrinsics ========================= */ #if (_MSC_VER && _M_X64) || defined(__x86_64__) @@ -2459,8 +2565,8 @@ static inline void* zig_x86_windows_teb(void) { #if (_MSC_VER && (_M_IX86 || _M_X64)) || defined(__i386__) || defined(__x86_64__) -static inline void zig_x86_cpuid(zig_u32 leaf_id, zig_u32 subid, zig_u32* eax, zig_u32* ebx, zig_u32* ecx, zig_u32* edx) { - zig_u32 cpu_info[4]; +static inline void zig_x86_cpuid(uint32_t leaf_id, uint32_t subid, uint32_t* eax, uint32_t* ebx, uint32_t* ecx, uint32_t* edx) { + uint32_t cpu_info[4]; #if _MSC_VER __cpuidex(cpu_info, leaf_id, subid); #else @@ -2472,12 +2578,12 @@ static inline void zig_x86_cpuid(zig_u32 leaf_id, zig_u32 subid, zig_u32* eax, z *edx = cpu_info[3]; } -static inline zig_u32 zig_x86_get_xcr0(void) { +static inline uint32_t zig_x86_get_xcr0(void) { #if _MSC_VER - return (zig_u32)_xgetbv(0); + return (uint32_t)_xgetbv(0); #else - zig_u32 eax; - zig_u32 edx; + uint32_t eax; + uint32_t edx; __asm__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(0)); return eax; #endif diff --git a/src/codegen/c.zig b/src/codegen/c.zig index aa540d6984..bed3a37a5c 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -752,7 +752,7 @@ pub const DeclGen = struct { try writer.writeAll("zig_cast_"); try dg.renderTypeForBuiltinFnName(writer, ty); - try writer.writeAll(" zig_as_"); + try writer.writeAll(" zig_make_"); try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); switch (bits) { @@ -962,7 +962,7 @@ pub const DeclGen = struct { try writer.writeByte(' '); var empty = true; if (std.math.isFinite(f128_val)) { - try writer.writeAll("zig_as_"); + try writer.writeAll("zig_make_"); try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); switch (bits) { @@ -997,7 +997,7 @@ pub const DeclGen = struct { // return dg.fail("Only quiet nans are supported in global variable initializers", .{}); } - try writer.writeAll("zig_as_special_"); + try writer.writeAll("zig_make_special_"); if (location == .StaticInitializer) try writer.writeAll("constant_"); try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); @@ -2016,14 +2016,16 @@ pub const DeclGen = struct { .bool, .size_t, .ptrdiff_t, - .zig_u8, - .zig_i8, - .zig_u16, - .zig_i16, - .zig_u32, - .zig_i32, - .zig_u64, - .zig_i64, + .uint8_t, + .int8_t, + .uint16_t, + .int16_t, + .uint32_t, + .int32_t, + .uint64_t, + .int64_t, + .uintptr_t, + .intptr_t, .zig_u128, .zig_i128, .zig_f16, @@ -2158,14 +2160,16 @@ pub const DeclGen = struct { .bool, .size_t, .ptrdiff_t, - .zig_u8, - .zig_i8, - .zig_u16, - .zig_i16, - .zig_u32, - .zig_i32, - .zig_u64, - .zig_i64, + .uint8_t, + .int8_t, + .uint16_t, + .int16_t, + .uint32_t, + .int32_t, + .uint64_t, + .int64_t, + .uintptr_t, + .intptr_t, .zig_u128, .zig_i128, .zig_f16, @@ -2285,16 +2289,16 @@ pub const DeclGen = struct { /// Renders a cast to an int type, from either an int or a pointer. /// /// Some platforms don't have 128 bit integers, so we need to use - /// the zig_as_ and zig_lo_ macros in those cases. + /// the zig_make_ and zig_lo_ macros in those cases. /// /// | Dest type bits | Src type | Result /// |------------------|------------------|---------------------------| /// | < 64 bit integer | pointer | (zig_)(zig_size)src /// | < 64 bit integer | < 64 bit integer | (zig_)src /// | < 64 bit integer | > 64 bit integer | zig_lo(src) - /// | > 64 bit integer | pointer | zig_as_(0, (zig_size)src) - /// | > 64 bit integer | < 64 bit integer | zig_as_(0, src) - /// | > 64 bit integer | > 64 bit integer | zig_as_(zig_hi_(src), zig_lo_(src)) + /// | > 64 bit integer | pointer | zig_make_(0, (zig_size)src) + /// | > 64 bit integer | < 64 bit integer | zig_make_(0, src) + /// | > 64 bit integer | > 64 bit integer | zig_make_(zig_hi_(src), zig_lo_(src)) fn renderIntCast(dg: *DeclGen, w: anytype, dest_ty: Type, context: IntCastContext, src_ty: Type, location: ValueRenderLocation) !void { const target = dg.module.getTarget(); const dest_bits = dest_ty.bitSize(target); @@ -2332,7 +2336,7 @@ pub const DeclGen = struct { try context.writeValue(dg, w, src_ty, .FunctionArgument); try w.writeByte(')'); } else if (dest_bits > 64 and src_bits <= 64) { - try w.writeAll("zig_as_"); + try w.writeAll("zig_make_"); try dg.renderTypeForBuiltinFnName(w, dest_ty); try w.writeAll("(0, "); // TODO: Should the 0 go through fmtIntLiteral? if (src_is_ptr) { @@ -2344,7 +2348,7 @@ pub const DeclGen = struct { try w.writeByte(')'); } else { assert(!src_is_ptr); - try w.writeAll("zig_as_"); + try w.writeAll("zig_make_"); try dg.renderTypeForBuiltinFnName(w, dest_ty); try w.writeAll("(zig_hi_"); try dg.renderTypeForBuiltinFnName(w, src_eff_ty); @@ -3858,7 +3862,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue { const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64; if (cant_cast) { if (src_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); - try writer.writeAll("zig_as_"); + try writer.writeAll("zig_make_"); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeAll("(0, "); } else { @@ -7355,7 +7359,7 @@ fn formatIntLiteral( use_twos_comp = true; } else { // TODO: Use fmtIntLiteral for 0? - try writer.print("zig_sub_{c}{d}(zig_as_{c}{d}(0, 0), ", .{ signAbbrev(int_info.signedness), c_bits, signAbbrev(int_info.signedness), c_bits }); + try writer.print("zig_sub_{c}{d}(zig_make_{c}{d}(0, 0), ", .{ signAbbrev(int_info.signedness), c_bits, signAbbrev(int_info.signedness), c_bits }); } } else { try writer.writeByte('-'); @@ -7365,11 +7369,16 @@ fn formatIntLiteral( switch (data.ty.tag()) { .c_short, .c_ushort, .c_int, .c_uint, .c_long, .c_ulong, .c_longlong, .c_ulonglong => {}, else => { - if (int_info.bits > 64 and data.location != null and data.location.? == .StaticInitializer) { + if (int_info.bits <= 64) { + try writer.print("{s}INT{d}_C(", .{ switch (int_info.signedness) { + .signed => "", + .unsigned => "U", + }, c_bits }); + } else if (data.location != null and data.location.? == .StaticInitializer) { // MSVC treats casting the struct initializer as not constant (C2099), so an alternate form is used in global initializers - try writer.print("zig_as_constant_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits }); + try writer.print("zig_make_constant_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits }); } else { - try writer.print("zig_as_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits }); + try writer.print("zig_make_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits }); } }, } diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 71132b5a97..601c15abee 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -77,19 +77,24 @@ pub const CType = extern union { @"long double", // C header types - bool, // stdbool.h - size_t, // stddef.h - ptrdiff_t, // stddef.h + // - stdbool.h + bool, + // - stddef.h + size_t, + ptrdiff_t, + // - stdint.h + uint8_t, + int8_t, + uint16_t, + int16_t, + uint32_t, + int32_t, + uint64_t, + int64_t, + uintptr_t, + intptr_t, // zig.h types - zig_u8, - zig_i8, - zig_u16, - zig_i16, - zig_u32, - zig_i32, - zig_u64, - zig_i64, zig_u128, zig_i128, zig_f16, @@ -149,14 +154,16 @@ pub const CType = extern union { .bool, .size_t, .ptrdiff_t, - .zig_u8, - .zig_i8, - .zig_u16, - .zig_i16, - .zig_u32, - .zig_i32, - .zig_u64, - .zig_i64, + .uint8_t, + .int8_t, + .uint16_t, + .int16_t, + .uint32_t, + .int32_t, + .uint64_t, + .int64_t, + .uintptr_t, + .intptr_t, .zig_u128, .zig_i128, .zig_f16, @@ -428,14 +435,16 @@ pub const CType = extern union { .bool, .size_t, .ptrdiff_t, - .zig_u8, - .zig_i8, - .zig_u16, - .zig_i16, - .zig_u32, - .zig_i32, - .zig_u64, - .zig_i64, + .uint8_t, + .int8_t, + .uint16_t, + .int16_t, + .uint32_t, + .int32_t, + .uint64_t, + .int64_t, + .uintptr_t, + .intptr_t, .zig_u128, .zig_i128, .zig_f16, @@ -526,14 +535,16 @@ pub const CType = extern union { .bool, .size_t, .ptrdiff_t, - .zig_u8, - .zig_i8, - .zig_u16, - .zig_i16, - .zig_u32, - .zig_i32, - .zig_u64, - .zig_i64, + .uint8_t, + .int8_t, + .uint16_t, + .int16_t, + .uint32_t, + .int32_t, + .uint64_t, + .int64_t, + .uintptr_t, + .intptr_t, .zig_u128, .zig_i128, .zig_f16, @@ -628,20 +639,20 @@ pub const CType = extern union { return switch (bits) { 0 => .void, 1...8 => switch (signedness) { - .unsigned => .zig_u8, - .signed => .zig_i8, + .unsigned => .uint8_t, + .signed => .int8_t, }, 9...16 => switch (signedness) { - .unsigned => .zig_u16, - .signed => .zig_i16, + .unsigned => .uint16_t, + .signed => .int16_t, }, 17...32 => switch (signedness) { - .unsigned => .zig_u32, - .signed => .zig_i32, + .unsigned => .uint32_t, + .signed => .int32_t, }, 33...64 => switch (signedness) { - .unsigned => .zig_u64, - .signed => .zig_i64, + .unsigned => .uint64_t, + .signed => .int64_t, }, 65...128 => switch (signedness) { .unsigned => .zig_u128, @@ -712,8 +723,8 @@ pub const CType = extern union { if (!ty.isFnOrHasRuntimeBitsIgnoreComptime()) self.init(.void) else if (ty.isAbiInt()) switch (ty.tag()) { - .usize => self.init(.size_t), - .isize => self.init(.ptrdiff_t), + .usize => self.init(.uintptr_t), + .isize => self.init(.intptr_t), .c_short => self.init(.short), .c_ushort => self.init(.@"unsigned short"), .c_int => self.init(.int), @@ -996,14 +1007,16 @@ pub const CType = extern union { .bool, .size_t, .ptrdiff_t, - .zig_u8, - .zig_i8, - .zig_u16, - .zig_i16, - .zig_u32, - .zig_i32, - .zig_u64, - .zig_i64, + .uint8_t, + .int8_t, + .uint16_t, + .int16_t, + .uint32_t, + .int32_t, + .uint64_t, + .int64_t, + .uintptr_t, + .intptr_t, .zig_u128, .zig_i128, .zig_f16, diff --git a/stage1/zig.h b/stage1/zig.h new file mode 100644 index 0000000000..0756d9f731 --- /dev/null +++ b/stage1/zig.h @@ -0,0 +1,2486 @@ +#undef linux + +#define __STDC_WANT_IEC_60559_TYPES_EXT__ +#include +#include +#include +#include + +#if _MSC_VER +#include +#elif defined(__i386__) || defined(__x86_64__) +#include +#endif + +#if !defined(__cplusplus) && __STDC_VERSION__ <= 201710L +#if __STDC_VERSION__ >= 199901L +#include +#else +typedef char bool; +#define false 0 +#define true 1 +#endif +#endif + +#if defined(__has_builtin) +#define zig_has_builtin(builtin) __has_builtin(__builtin_##builtin) +#else +#define zig_has_builtin(builtin) 0 +#endif + +#if defined(__has_attribute) +#define zig_has_attribute(attribute) __has_attribute(attribute) +#else +#define zig_has_attribute(attribute) 0 +#endif + +#if __STDC_VERSION__ >= 201112L +#define zig_threadlocal _Thread_local +#elif defined(__GNUC__) +#define zig_threadlocal __thread +#elif _MSC_VER +#define zig_threadlocal __declspec(thread) +#else +#define zig_threadlocal zig_threadlocal_unavailable +#endif + +#if defined(__clang__) +#define zig_clang +#elif defined(__GNUC__) +#define zig_gnuc +#endif + +#if _MSC_VER +#define zig_const_arr +#define zig_callconv(c) __##c +#else +#define zig_const_arr static const +#define zig_callconv(c) __attribute__((c)) +#endif + +#if zig_has_attribute(naked) || defined(zig_gnuc) +#define zig_naked_decl __attribute__((naked)) +#define zig_naked __attribute__((naked)) +#elif defined(_MSC_VER) +#define zig_naked_decl +#define zig_naked __declspec(naked) +#else +#define zig_naked_decl zig_naked_unavailable +#define zig_naked zig_naked_unavailable +#endif + +#if zig_has_attribute(cold) +#define zig_cold __attribute__((cold)) +#else +#define zig_cold +#endif + +#if __STDC_VERSION__ >= 199901L +#define zig_restrict restrict +#elif defined(__GNUC__) +#define zig_restrict __restrict +#else +#define zig_restrict +#endif + +#if __STDC_VERSION__ >= 201112L +#define zig_align(alignment) _Alignas(alignment) +#elif zig_has_attribute(aligned) +#define zig_align(alignment) __attribute__((aligned(alignment))) +#elif _MSC_VER +#define zig_align(alignment) __declspec(align(alignment)) +#else +#define zig_align zig_align_unavailable +#endif + +#if zig_has_attribute(aligned) +#define zig_under_align(alignment) __attribute__((aligned(alignment))) +#elif _MSC_VER +#define zig_under_align(alignment) zig_align(alignment) +#else +#define zig_align zig_align_unavailable +#endif + +#if zig_has_attribute(aligned) +#define zig_align_fn(alignment) __attribute__((aligned(alignment))) +#elif _MSC_VER +#define zig_align_fn(alignment) +#else +#define zig_align_fn zig_align_fn_unavailable +#endif + +#if zig_has_attribute(packed) +#define zig_packed(definition) __attribute__((packed)) definition +#elif _MSC_VER +#define zig_packed(definition) __pragma(pack(1)) definition __pragma(pack()) +#else +#define zig_packed(definition) zig_packed_unavailable +#endif + +#if zig_has_attribute(section) +#define zig_linksection(name, def, ...) def __attribute__((section(name))) +#elif _MSC_VER +#define zig_linksection(name, def, ...) __pragma(section(name, __VA_ARGS__)) __declspec(allocate(name)) def +#else +#define zig_linksection(name, def, ...) zig_linksection_unavailable +#endif + +#if zig_has_builtin(unreachable) || defined(zig_gnuc) +#define zig_unreachable() __builtin_unreachable() +#else +#define zig_unreachable() +#endif + +#if defined(__cplusplus) +#define zig_extern extern "C" +#else +#define zig_extern extern +#endif + +#if zig_has_attribute(alias) +#define zig_export(sig, symbol, name) zig_extern sig __attribute__((alias(symbol))) +#elif _MSC_VER +#if _M_X64 +#define zig_export(sig, symbol, name) sig;\ + __pragma(comment(linker, "/alternatename:" name "=" symbol )) +#else /*_M_X64 */ +#define zig_export(sig, symbol, name) sig;\ + __pragma(comment(linker, "/alternatename:_" name "=_" symbol )) +#endif /*_M_X64 */ +#else +#define zig_export(sig, symbol, name) __asm(name " = " symbol) +#endif + +#if zig_has_builtin(debugtrap) +#define zig_breakpoint() __builtin_debugtrap() +#elif zig_has_builtin(trap) || defined(zig_gnuc) +#define zig_breakpoint() __builtin_trap() +#elif defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__) +#define zig_breakpoint() __debugbreak() +#elif defined(__i386__) || defined(__x86_64__) +#define zig_breakpoint() __asm__ volatile("int $0x03"); +#else +#define zig_breakpoint() raise(SIGTRAP) +#endif + +#if zig_has_builtin(return_address) || defined(zig_gnuc) +#define zig_return_address() __builtin_extract_return_addr(__builtin_return_address(0)) +#elif defined(_MSC_VER) +#define zig_return_address() _ReturnAddress() +#else +#define zig_return_address() 0 +#endif + +#if zig_has_builtin(frame_address) || defined(zig_gnuc) +#define zig_frame_address() __builtin_frame_address(0) +#else +#define zig_frame_address() 0 +#endif + +#if zig_has_builtin(prefetch) || defined(zig_gnuc) +#define zig_prefetch(addr, rw, locality) __builtin_prefetch(addr, rw, locality) +#else +#define zig_prefetch(addr, rw, locality) +#endif + +#if zig_has_builtin(memory_size) && zig_has_builtin(memory_grow) +#define zig_wasm_memory_size(index) __builtin_wasm_memory_size(index) +#define zig_wasm_memory_grow(index, delta) __builtin_wasm_memory_grow(index, delta) +#else +#define zig_wasm_memory_size(index) zig_unimplemented() +#define zig_wasm_memory_grow(index, delta) zig_unimplemented() +#endif + +#define zig_concat(lhs, rhs) lhs##rhs +#define zig_expand_concat(lhs, rhs) zig_concat(lhs, rhs) + +#if __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) +#include +#define zig_atomic(type) _Atomic(type) +#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) atomic_compare_exchange_strong_explicit(obj, &(expected), desired, succ, fail) +#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) atomic_compare_exchange_weak_explicit (obj, &(expected), desired, succ, fail) +#define zig_atomicrmw_xchg(obj, arg, order, type) atomic_exchange_explicit (obj, arg, order) +#define zig_atomicrmw_add(obj, arg, order, type) atomic_fetch_add_explicit (obj, arg, order) +#define zig_atomicrmw_sub(obj, arg, order, type) atomic_fetch_sub_explicit (obj, arg, order) +#define zig_atomicrmw_or(obj, arg, order, type) atomic_fetch_or_explicit (obj, arg, order) +#define zig_atomicrmw_xor(obj, arg, order, type) atomic_fetch_xor_explicit (obj, arg, order) +#define zig_atomicrmw_and(obj, arg, order, type) atomic_fetch_and_explicit (obj, arg, order) +#define zig_atomicrmw_nand(obj, arg, order, type) __atomic_fetch_nand (obj, arg, order) +#define zig_atomicrmw_min(obj, arg, order, type) __atomic_fetch_min (obj, arg, order) +#define zig_atomicrmw_max(obj, arg, order, type) __atomic_fetch_max (obj, arg, order) +#define zig_atomic_store(obj, arg, order, type) atomic_store_explicit (obj, arg, order) +#define zig_atomic_load(obj, order, type) atomic_load_explicit (obj, order) +#define zig_fence(order) atomic_thread_fence(order) +#elif defined(__GNUC__) +#define memory_order_relaxed __ATOMIC_RELAXED +#define memory_order_consume __ATOMIC_CONSUME +#define memory_order_acquire __ATOMIC_ACQUIRE +#define memory_order_release __ATOMIC_RELEASE +#define memory_order_acq_rel __ATOMIC_ACQ_REL +#define memory_order_seq_cst __ATOMIC_SEQ_CST +#define zig_atomic(type) type +#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) __atomic_compare_exchange_n(obj, &(expected), desired, false, succ, fail) +#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) __atomic_compare_exchange_n(obj, &(expected), desired, true , succ, fail) +#define zig_atomicrmw_xchg(obj, arg, order, type) __atomic_exchange_n(obj, arg, order) +#define zig_atomicrmw_add(obj, arg, order, type) __atomic_fetch_add (obj, arg, order) +#define zig_atomicrmw_sub(obj, arg, order, type) __atomic_fetch_sub (obj, arg, order) +#define zig_atomicrmw_or(obj, arg, order, type) __atomic_fetch_or (obj, arg, order) +#define zig_atomicrmw_xor(obj, arg, order, type) __atomic_fetch_xor (obj, arg, order) +#define zig_atomicrmw_and(obj, arg, order, type) __atomic_fetch_and (obj, arg, order) +#define zig_atomicrmw_nand(obj, arg, order, type) __atomic_fetch_nand(obj, arg, order) +#define zig_atomicrmw_min(obj, arg, order, type) __atomic_fetch_min (obj, arg, order) +#define zig_atomicrmw_max(obj, arg, order, type) __atomic_fetch_max (obj, arg, order) +#define zig_atomic_store(obj, arg, order, type) __atomic_store_n (obj, arg, order) +#define zig_atomic_load(obj, order, type) __atomic_load_n (obj, order) +#define zig_fence(order) __atomic_thread_fence(order) +#elif _MSC_VER && (_M_IX86 || _M_X64) +#define memory_order_relaxed 0 +#define memory_order_consume 1 +#define memory_order_acquire 2 +#define memory_order_release 3 +#define memory_order_acq_rel 4 +#define memory_order_seq_cst 5 +#define zig_atomic(type) type +#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) zig_expand_concat(zig_msvc_cmpxchg_, type)(obj, &(expected), desired) +#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) +#define zig_atomicrmw_xchg(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_xchg_, type)(obj, arg) +#define zig_atomicrmw_add(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_add_, type)(obj, arg) +#define zig_atomicrmw_sub(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_sub_, type)(obj, arg) +#define zig_atomicrmw_or(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_or_, type)(obj, arg) +#define zig_atomicrmw_xor(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_xor_, type)(obj, arg) +#define zig_atomicrmw_and(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_and_, type)(obj, arg) +#define zig_atomicrmw_nand(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_nand_, type)(obj, arg) +#define zig_atomicrmw_min(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_min_, type)(obj, arg) +#define zig_atomicrmw_max(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_max_, type)(obj, arg) +#define zig_atomic_store(obj, arg, order, type) zig_expand_concat(zig_msvc_atomic_store_, type)(obj, arg) +#define zig_atomic_load(obj, order, type) zig_expand_concat(zig_msvc_atomic_load_, type)(obj) +#if _M_X64 +#define zig_fence(order) __faststorefence() +#else +#define zig_fence(order) zig_msvc_atomic_barrier() +#endif + +// TODO: _MSC_VER && (_M_ARM || _M_ARM64) +#else +#define memory_order_relaxed 0 +#define memory_order_consume 1 +#define memory_order_acquire 2 +#define memory_order_release 3 +#define memory_order_acq_rel 4 +#define memory_order_seq_cst 5 +#define zig_atomic(type) type +#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) zig_unimplemented() +#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) zig_unimplemented() +#define zig_atomicrmw_xchg(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_add(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_sub(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_or(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_xor(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_and(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_nand(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_min(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_max(obj, arg, order, type) zig_unimplemented() +#define zig_atomic_store(obj, arg, order, type) zig_unimplemented() +#define zig_atomic_load(obj, order, type) zig_unimplemented() +#define zig_fence(order) zig_unimplemented() +#endif + +#if __STDC_VERSION__ >= 201112L +#define zig_noreturn _Noreturn void +#elif zig_has_attribute(noreturn) || defined(zig_gnuc) +#define zig_noreturn __attribute__((noreturn)) void +#elif _MSC_VER +#define zig_noreturn __declspec(noreturn) void +#else +#define zig_noreturn void +#endif + +#define zig_bitSizeOf(T) (CHAR_BIT * sizeof(T)) + +typedef uintptr_t zig_usize; +typedef intptr_t zig_isize; +typedef signed short int zig_c_short; +typedef unsigned short int zig_c_ushort; +typedef signed int zig_c_int; +typedef unsigned int zig_c_uint; +typedef signed long int zig_c_long; +typedef unsigned long int zig_c_ulong; +typedef signed long long int zig_c_longlong; +typedef unsigned long long int zig_c_ulonglong; + +typedef uint8_t zig_u8; +typedef int8_t zig_i8; +typedef uint16_t zig_u16; +typedef int16_t zig_i16; +typedef uint32_t zig_u32; +typedef int32_t zig_i32; +typedef uint64_t zig_u64; +typedef int64_t zig_i64; + +#define zig_as_u8(val) UINT8_C(val) +#define zig_as_i8(val) INT8_C(val) +#define zig_as_u16(val) UINT16_C(val) +#define zig_as_i16(val) INT16_C(val) +#define zig_as_u32(val) UINT32_C(val) +#define zig_as_i32(val) INT32_C(val) +#define zig_as_u64(val) UINT64_C(val) +#define zig_as_i64(val) INT64_C(val) + +#define zig_minInt_u8 zig_as_u8(0) +#define zig_maxInt_u8 UINT8_MAX +#define zig_minInt_i8 INT8_MIN +#define zig_maxInt_i8 INT8_MAX +#define zig_minInt_u16 zig_as_u16(0) +#define zig_maxInt_u16 UINT16_MAX +#define zig_minInt_i16 INT16_MIN +#define zig_maxInt_i16 INT16_MAX +#define zig_minInt_u32 zig_as_u32(0) +#define zig_maxInt_u32 UINT32_MAX +#define zig_minInt_i32 INT32_MIN +#define zig_maxInt_i32 INT32_MAX +#define zig_minInt_u64 zig_as_u64(0) +#define zig_maxInt_u64 UINT64_MAX +#define zig_minInt_i64 INT64_MIN +#define zig_maxInt_i64 INT64_MAX + +#define zig_compiler_rt_abbrev_u32 si +#define zig_compiler_rt_abbrev_i32 si +#define zig_compiler_rt_abbrev_u64 di +#define zig_compiler_rt_abbrev_i64 di +#define zig_compiler_rt_abbrev_u128 ti +#define zig_compiler_rt_abbrev_i128 ti +#define zig_compiler_rt_abbrev_f16 hf +#define zig_compiler_rt_abbrev_f32 sf +#define zig_compiler_rt_abbrev_f64 df +#define zig_compiler_rt_abbrev_f80 xf +#define zig_compiler_rt_abbrev_f128 tf + +zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, zig_usize); +zig_extern void *memset (void *, int, zig_usize); + +/* ==================== 8/16/32/64-bit Integer Routines ===================== */ + +#define zig_maxInt(Type, bits) zig_shr_##Type(zig_maxInt_##Type, (zig_bitSizeOf(zig_##Type) - bits)) +#define zig_expand_maxInt(Type, bits) zig_maxInt(Type, bits) +#define zig_minInt(Type, bits) zig_not_##Type(zig_maxInt(Type, bits), bits) +#define zig_expand_minInt(Type, bits) zig_minInt(Type, bits) + +#define zig_int_operator(Type, RhsType, operation, operator) \ + static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##RhsType rhs) { \ + return lhs operator rhs; \ + } +#define zig_int_basic_operator(Type, operation, operator) \ + zig_int_operator(Type, Type, operation, operator) +#define zig_int_shift_operator(Type, operation, operator) \ + zig_int_operator(Type, u8, operation, operator) +#define zig_int_helpers(w) \ + zig_int_basic_operator(u##w, and, &) \ + zig_int_basic_operator(i##w, and, &) \ + zig_int_basic_operator(u##w, or, |) \ + zig_int_basic_operator(i##w, or, |) \ + zig_int_basic_operator(u##w, xor, ^) \ + zig_int_basic_operator(i##w, xor, ^) \ + zig_int_shift_operator(u##w, shl, <<) \ + zig_int_shift_operator(i##w, shl, <<) \ + zig_int_shift_operator(u##w, shr, >>) \ +\ + static inline zig_i##w zig_shr_i##w(zig_i##w lhs, zig_u8 rhs) { \ + zig_i##w sign_mask = lhs < zig_as_i##w(0) ? -zig_as_i##w(1) : zig_as_i##w(0); \ + return ((lhs ^ sign_mask) >> rhs) ^ sign_mask; \ + } \ +\ + static inline zig_u##w zig_not_u##w(zig_u##w val, zig_u8 bits) { \ + return val ^ zig_maxInt(u##w, bits); \ + } \ +\ + static inline zig_i##w zig_not_i##w(zig_i##w val, zig_u8 bits) { \ + (void)bits; \ + return ~val; \ + } \ +\ + static inline zig_u##w zig_wrap_u##w(zig_u##w val, zig_u8 bits) { \ + return val & zig_maxInt(u##w, bits); \ + } \ +\ + static inline zig_i##w zig_wrap_i##w(zig_i##w val, zig_u8 bits) { \ + return (val & zig_as_u##w(1) << (bits - zig_as_u8(1))) != 0 \ + ? val | zig_minInt(i##w, bits) : val & zig_maxInt(i##w, bits); \ + } \ +\ + zig_int_basic_operator(u##w, div_floor, /) \ +\ + static inline zig_i##w zig_div_floor_i##w(zig_i##w lhs, zig_i##w rhs) { \ + return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < zig_as_i##w(0)); \ + } \ +\ + zig_int_basic_operator(u##w, mod, %) \ +\ + static inline zig_i##w zig_mod_i##w(zig_i##w lhs, zig_i##w rhs) { \ + zig_i##w rem = lhs % rhs; \ + return rem + (((lhs ^ rhs) & rem) < zig_as_i##w(0) ? rhs : zig_as_i##w(0)); \ + } \ +\ + static inline zig_u##w zig_shlw_u##w(zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \ + return zig_wrap_u##w(zig_shl_u##w(lhs, rhs), bits); \ + } \ +\ + static inline zig_i##w zig_shlw_i##w(zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \ + return zig_wrap_i##w((zig_i##w)zig_shl_u##w((zig_u##w)lhs, (zig_u##w)rhs), bits); \ + } \ +\ + static inline zig_u##w zig_addw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + return zig_wrap_u##w(lhs + rhs, bits); \ + } \ +\ + static inline zig_i##w zig_addw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ + return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs + (zig_u##w)rhs), bits); \ + } \ +\ + static inline zig_u##w zig_subw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + return zig_wrap_u##w(lhs - rhs, bits); \ + } \ +\ + static inline zig_i##w zig_subw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ + return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs - (zig_u##w)rhs), bits); \ + } \ +\ + static inline zig_u##w zig_mulw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + return zig_wrap_u##w(lhs * rhs, bits); \ + } \ +\ + static inline zig_i##w zig_mulw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ + return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs * (zig_u##w)rhs), bits); \ + } +zig_int_helpers(8) +zig_int_helpers(16) +zig_int_helpers(32) +zig_int_helpers(64) + +static inline bool zig_addo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gnuc) + zig_u32 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u32(full_res, bits); + return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits); +#else + *res = zig_addw_u32(lhs, rhs, bits); + return *res < lhs; +#endif +} + +static inline void zig_vaddo_u32(zig_u8 *ov, zig_u32 *res, int n, + const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_u32(&res[i], lhs[i], rhs[i], bits); +} + +zig_extern zig_i32 __addosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); +static inline bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gnuc) + zig_i32 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i32 full_res = __addosi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i32(full_res, bits); + return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); +} + +static inline void zig_vaddo_i32(zig_u8 *ov, zig_i32 *res, int n, + const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_i32(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gnuc) + zig_u64 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u64(full_res, bits); + return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits); +#else + *res = zig_addw_u64(lhs, rhs, bits); + return *res < lhs; +#endif +} + +static inline void zig_vaddo_u64(zig_u8 *ov, zig_u64 *res, int n, + const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_u64(&res[i], lhs[i], rhs[i], bits); +} + +zig_extern zig_i64 __addodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); +static inline bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gnuc) + zig_i64 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i64 full_res = __addodi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i64(full_res, bits); + return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); +} + +static inline void zig_vaddo_i64(zig_u8 *ov, zig_i64 *res, int n, + const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_i64(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gnuc) + zig_u8 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u8(full_res, bits); + return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits); +#else + zig_u32 full_res; + bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits); + *res = (zig_u8)full_res; + return overflow; +#endif +} + +static inline void zig_vaddo_u8(zig_u8 *ov, zig_u8 *res, int n, + const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_u8(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gnuc) + zig_i8 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i8(full_res, bits); + return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits); +#else + zig_i32 full_res; + bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits); + *res = (zig_i8)full_res; + return overflow; +#endif +} + +static inline void zig_vaddo_i8(zig_u8 *ov, zig_i8 *res, int n, + const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_i8(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gnuc) + zig_u16 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u16(full_res, bits); + return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits); +#else + zig_u32 full_res; + bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits); + *res = (zig_u16)full_res; + return overflow; +#endif +} + +static inline void zig_vaddo_u16(zig_u8 *ov, zig_u16 *res, int n, + const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_u16(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gnuc) + zig_i16 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i16(full_res, bits); + return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits); +#else + zig_i32 full_res; + bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits); + *res = (zig_i16)full_res; + return overflow; +#endif +} + +static inline void zig_vaddo_i16(zig_u8 *ov, zig_i16 *res, int n, + const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_i16(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gnuc) + zig_u32 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u32(full_res, bits); + return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits); +#else + *res = zig_subw_u32(lhs, rhs, bits); + return *res > lhs; +#endif +} + +static inline void zig_vsubo_u32(zig_u8 *ov, zig_u32 *res, int n, + const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_u32(&res[i], lhs[i], rhs[i], bits); +} + +zig_extern zig_i32 __subosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); +static inline bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gnuc) + zig_i32 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i32 full_res = __subosi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i32(full_res, bits); + return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); +} + +static inline void zig_vsubo_i32(zig_u8 *ov, zig_i32 *res, int n, + const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_i32(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gnuc) + zig_u64 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u64(full_res, bits); + return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits); +#else + *res = zig_subw_u64(lhs, rhs, bits); + return *res > lhs; +#endif +} + +static inline void zig_vsubo_u64(zig_u8 *ov, zig_u64 *res, int n, + const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_u64(&res[i], lhs[i], rhs[i], bits); +} + +zig_extern zig_i64 __subodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); +static inline bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gnuc) + zig_i64 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i64 full_res = __subodi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i64(full_res, bits); + return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); +} + +static inline void zig_vsubo_i64(zig_u8 *ov, zig_i64 *res, int n, + const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_i64(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gnuc) + zig_u8 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u8(full_res, bits); + return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits); +#else + zig_u32 full_res; + bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits); + *res = (zig_u8)full_res; + return overflow; +#endif +} + +static inline void zig_vsubo_u8(zig_u8 *ov, zig_u8 *res, int n, + const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_u8(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gnuc) + zig_i8 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i8(full_res, bits); + return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits); +#else + zig_i32 full_res; + bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits); + *res = (zig_i8)full_res; + return overflow; +#endif +} + +static inline void zig_vsubo_i8(zig_u8 *ov, zig_i8 *res, int n, + const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_i8(&res[i], lhs[i], rhs[i], bits); +} + + +static inline bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gnuc) + zig_u16 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u16(full_res, bits); + return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits); +#else + zig_u32 full_res; + bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits); + *res = (zig_u16)full_res; + return overflow; +#endif +} + +static inline void zig_vsubo_u16(zig_u8 *ov, zig_u16 *res, int n, + const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_u16(&res[i], lhs[i], rhs[i], bits); +} + + +static inline bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gnuc) + zig_i16 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i16(full_res, bits); + return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits); +#else + zig_i32 full_res; + bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits); + *res = (zig_i16)full_res; + return overflow; +#endif +} + +static inline void zig_vsubo_i16(zig_u8 *ov, zig_i16 *res, int n, + const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_i16(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gnuc) + zig_u32 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u32(full_res, bits); + return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits); +#else + *res = zig_mulw_u32(lhs, rhs, bits); + return rhs != zig_as_u32(0) && lhs > zig_maxInt(u32, bits) / rhs; +#endif +} + +static inline void zig_vmulo_u32(zig_u8 *ov, zig_u32 *res, int n, + const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u32(&res[i], lhs[i], rhs[i], bits); +} + +zig_extern zig_i32 __mulosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); +static inline bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gnuc) + zig_i32 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i32 full_res = __mulosi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i32(full_res, bits); + return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); +} + +static inline void zig_vmulo_i32(zig_u8 *ov, zig_i32 *res, int n, + const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i32(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gnuc) + zig_u64 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u64(full_res, bits); + return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits); +#else + *res = zig_mulw_u64(lhs, rhs, bits); + return rhs != zig_as_u64(0) && lhs > zig_maxInt(u64, bits) / rhs; +#endif +} + +static inline void zig_vmulo_u64(zig_u8 *ov, zig_u64 *res, int n, + const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u64(&res[i], lhs[i], rhs[i], bits); +} + +zig_extern zig_i64 __mulodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); +static inline bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gnuc) + zig_i64 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i64 full_res = __mulodi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i64(full_res, bits); + return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); +} + +static inline void zig_vmulo_i64(zig_u8 *ov, zig_i64 *res, int n, + const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i64(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gnuc) + zig_u8 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u8(full_res, bits); + return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits); +#else + zig_u32 full_res; + bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits); + *res = (zig_u8)full_res; + return overflow; +#endif +} + +static inline void zig_vmulo_u8(zig_u8 *ov, zig_u8 *res, int n, + const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u8(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gnuc) + zig_i8 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i8(full_res, bits); + return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits); +#else + zig_i32 full_res; + bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits); + *res = (zig_i8)full_res; + return overflow; +#endif +} + +static inline void zig_vmulo_i8(zig_u8 *ov, zig_i8 *res, int n, + const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i8(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gnuc) + zig_u16 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u16(full_res, bits); + return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits); +#else + zig_u32 full_res; + bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits); + *res = (zig_u16)full_res; + return overflow; +#endif +} + +static inline void zig_vmulo_u16(zig_u8 *ov, zig_u16 *res, int n, + const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u16(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gnuc) + zig_i16 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i16(full_res, bits); + return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits); +#else + zig_i32 full_res; + bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits); + *res = (zig_i16)full_res; + return overflow; +#endif +} + +static inline void zig_vmulo_i16(zig_u8 *ov, zig_i16 *res, int n, + const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i16(&res[i], lhs[i], rhs[i], bits); +} + +#define zig_int_builtins(w) \ + static inline bool zig_shlo_u##w(zig_u##w *res, zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \ + *res = zig_shlw_u##w(lhs, rhs, bits); \ + return lhs > zig_maxInt(u##w, bits) >> rhs; \ + } \ +\ + static inline bool zig_shlo_i##w(zig_i##w *res, zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \ + *res = zig_shlw_i##w(lhs, rhs, bits); \ + zig_i##w mask = (zig_i##w)(zig_maxInt_u##w << (bits - rhs - 1)); \ + return (lhs & mask) != zig_as_i##w(0) && (lhs & mask) != mask; \ + } \ +\ + static inline zig_u##w zig_shls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + zig_u##w res; \ + if (rhs >= bits) return lhs != zig_as_u##w(0) ? zig_maxInt(u##w, bits) : lhs; \ + return zig_shlo_u##w(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u##w, bits) : res; \ + } \ +\ + static inline zig_i##w zig_shls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ + zig_i##w res; \ + if ((zig_u##w)rhs < (zig_u##w)bits && !zig_shlo_i##w(&res, lhs, rhs, bits)) return res; \ + return lhs < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \ + } \ +\ + static inline zig_u##w zig_adds_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + zig_u##w res; \ + return zig_addo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \ + } \ +\ + static inline zig_i##w zig_adds_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ + zig_i##w res; \ + if (!zig_addo_i##w(&res, lhs, rhs, bits)) return res; \ + return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \ + } \ +\ + static inline zig_u##w zig_subs_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + zig_u##w res; \ + return zig_subo_u##w(&res, lhs, rhs, bits) ? zig_minInt(u##w, bits) : res; \ + } \ +\ + static inline zig_i##w zig_subs_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ + zig_i##w res; \ + if (!zig_subo_i##w(&res, lhs, rhs, bits)) return res; \ + return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \ + } \ +\ + static inline zig_u##w zig_muls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + zig_u##w res; \ + return zig_mulo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \ + } \ +\ + static inline zig_i##w zig_muls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ + zig_i##w res; \ + if (!zig_mulo_i##w(&res, lhs, rhs, bits)) return res; \ + return (lhs ^ rhs) < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \ + } +zig_int_builtins(8) +zig_int_builtins(16) +zig_int_builtins(32) +zig_int_builtins(64) + +#define zig_builtin8(name, val) __builtin_##name(val) +typedef zig_c_uint zig_Builtin8; + +#define zig_builtin16(name, val) __builtin_##name(val) +typedef zig_c_uint zig_Builtin16; + +#if INT_MIN <= INT32_MIN +#define zig_builtin32(name, val) __builtin_##name(val) +typedef zig_c_uint zig_Builtin32; +#elif LONG_MIN <= INT32_MIN +#define zig_builtin32(name, val) __builtin_##name##l(val) +typedef zig_c_ulong zig_Builtin32; +#endif + +#if INT_MIN <= INT64_MIN +#define zig_builtin64(name, val) __builtin_##name(val) +typedef zig_c_uint zig_Builtin64; +#elif LONG_MIN <= INT64_MIN +#define zig_builtin64(name, val) __builtin_##name##l(val) +typedef zig_c_ulong zig_Builtin64; +#elif LLONG_MIN <= INT64_MIN +#define zig_builtin64(name, val) __builtin_##name##ll(val) +typedef zig_c_ulonglong zig_Builtin64; +#endif + +static inline zig_u8 zig_byte_swap_u8(zig_u8 val, zig_u8 bits) { + return zig_wrap_u8(val >> (8 - bits), bits); +} + +static inline zig_i8 zig_byte_swap_i8(zig_i8 val, zig_u8 bits) { + return zig_wrap_i8((zig_i8)zig_byte_swap_u8((zig_u8)val, bits), bits); +} + +static inline zig_u16 zig_byte_swap_u16(zig_u16 val, zig_u8 bits) { + zig_u16 full_res; +#if zig_has_builtin(bswap16) || defined(zig_gnuc) + full_res = __builtin_bswap16(val); +#else + full_res = (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 0), 8) << 8 | + (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 8), 8) >> 0; +#endif + return zig_wrap_u16(full_res >> (16 - bits), bits); +} + +static inline zig_i16 zig_byte_swap_i16(zig_i16 val, zig_u8 bits) { + return zig_wrap_i16((zig_i16)zig_byte_swap_u16((zig_u16)val, bits), bits); +} + +static inline zig_u32 zig_byte_swap_u32(zig_u32 val, zig_u8 bits) { + zig_u32 full_res; +#if zig_has_builtin(bswap32) || defined(zig_gnuc) + full_res = __builtin_bswap32(val); +#else + full_res = (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 0), 16) << 16 | + (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 16), 16) >> 0; +#endif + return zig_wrap_u32(full_res >> (32 - bits), bits); +} + +static inline zig_i32 zig_byte_swap_i32(zig_i32 val, zig_u8 bits) { + return zig_wrap_i32((zig_i32)zig_byte_swap_u32((zig_u32)val, bits), bits); +} + +static inline zig_u64 zig_byte_swap_u64(zig_u64 val, zig_u8 bits) { + zig_u64 full_res; +#if zig_has_builtin(bswap64) || defined(zig_gnuc) + full_res = __builtin_bswap64(val); +#else + full_res = (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 0), 32) << 32 | + (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 32), 32) >> 0; +#endif + return zig_wrap_u64(full_res >> (64 - bits), bits); +} + +static inline zig_i64 zig_byte_swap_i64(zig_i64 val, zig_u8 bits) { + return zig_wrap_i64((zig_i64)zig_byte_swap_u64((zig_u64)val, bits), bits); +} + +static inline zig_u8 zig_bit_reverse_u8(zig_u8 val, zig_u8 bits) { + zig_u8 full_res; +#if zig_has_builtin(bitreverse8) + full_res = __builtin_bitreverse8(val); +#else + static zig_u8 const lut[0x10] = { + 0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe, + 0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf + }; + full_res = lut[val >> 0 & 0xF] << 4 | lut[val >> 4 & 0xF] << 0; +#endif + return zig_wrap_u8(full_res >> (8 - bits), bits); +} + +static inline zig_i8 zig_bit_reverse_i8(zig_i8 val, zig_u8 bits) { + return zig_wrap_i8((zig_i8)zig_bit_reverse_u8((zig_u8)val, bits), bits); +} + +static inline zig_u16 zig_bit_reverse_u16(zig_u16 val, zig_u8 bits) { + zig_u16 full_res; +#if zig_has_builtin(bitreverse16) + full_res = __builtin_bitreverse16(val); +#else + full_res = (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 0), 8) << 8 | + (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 8), 8) >> 0; +#endif + return zig_wrap_u16(full_res >> (16 - bits), bits); +} + +static inline zig_i16 zig_bit_reverse_i16(zig_i16 val, zig_u8 bits) { + return zig_wrap_i16((zig_i16)zig_bit_reverse_u16((zig_u16)val, bits), bits); +} + +static inline zig_u32 zig_bit_reverse_u32(zig_u32 val, zig_u8 bits) { + zig_u32 full_res; +#if zig_has_builtin(bitreverse32) + full_res = __builtin_bitreverse32(val); +#else + full_res = (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 0), 16) << 16 | + (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 16), 16) >> 0; +#endif + return zig_wrap_u32(full_res >> (32 - bits), bits); +} + +static inline zig_i32 zig_bit_reverse_i32(zig_i32 val, zig_u8 bits) { + return zig_wrap_i32((zig_i32)zig_bit_reverse_u32((zig_u32)val, bits), bits); +} + +static inline zig_u64 zig_bit_reverse_u64(zig_u64 val, zig_u8 bits) { + zig_u64 full_res; +#if zig_has_builtin(bitreverse64) + full_res = __builtin_bitreverse64(val); +#else + full_res = (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 0), 32) << 32 | + (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 32), 32) >> 0; +#endif + return zig_wrap_u64(full_res >> (64 - bits), bits); +} + +static inline zig_i64 zig_bit_reverse_i64(zig_i64 val, zig_u8 bits) { + return zig_wrap_i64((zig_i64)zig_bit_reverse_u64((zig_u64)val, bits), bits); +} + +#define zig_builtin_popcount_common(w) \ + static inline zig_u8 zig_popcount_i##w(zig_i##w val, zig_u8 bits) { \ + return zig_popcount_u##w((zig_u##w)val, bits); \ + } +#if zig_has_builtin(popcount) || defined(zig_gnuc) +#define zig_builtin_popcount(w) \ + static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \ + (void)bits; \ + return zig_builtin##w(popcount, val); \ + } \ +\ + zig_builtin_popcount_common(w) +#else +#define zig_builtin_popcount(w) \ + static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \ + (void)bits; \ + zig_u##w temp = val - ((val >> 1) & (zig_maxInt_u##w / 3)); \ + temp = (temp & (zig_maxInt_u##w / 5)) + ((temp >> 2) & (zig_maxInt_u##w / 5)); \ + temp = (temp + (temp >> 4)) & (zig_maxInt_u##w / 17); \ + return temp * (zig_maxInt_u##w / 255) >> (w - 8); \ + } \ +\ + zig_builtin_popcount_common(w) +#endif +zig_builtin_popcount(8) +zig_builtin_popcount(16) +zig_builtin_popcount(32) +zig_builtin_popcount(64) + +#define zig_builtin_ctz_common(w) \ + static inline zig_u8 zig_ctz_i##w(zig_i##w val, zig_u8 bits) { \ + return zig_ctz_u##w((zig_u##w)val, bits); \ + } +#if zig_has_builtin(ctz) || defined(zig_gnuc) +#define zig_builtin_ctz(w) \ + static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \ + if (val == 0) return bits; \ + return zig_builtin##w(ctz, val); \ + } \ +\ + zig_builtin_ctz_common(w) +#else +#define zig_builtin_ctz(w) \ + static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \ + return zig_popcount_u##w(zig_not_u##w(val, bits) & zig_subw_u##w(val, 1, bits), bits); \ + } \ +\ + zig_builtin_ctz_common(w) +#endif +zig_builtin_ctz(8) +zig_builtin_ctz(16) +zig_builtin_ctz(32) +zig_builtin_ctz(64) + +#define zig_builtin_clz_common(w) \ + static inline zig_u8 zig_clz_i##w(zig_i##w val, zig_u8 bits) { \ + return zig_clz_u##w((zig_u##w)val, bits); \ + } +#if zig_has_builtin(clz) || defined(zig_gnuc) +#define zig_builtin_clz(w) \ + static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \ + if (val == 0) return bits; \ + return zig_builtin##w(clz, val) - (zig_bitSizeOf(zig_Builtin##w) - bits); \ + } \ +\ + zig_builtin_clz_common(w) +#else +#define zig_builtin_clz(w) \ + static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \ + return zig_ctz_u##w(zig_bit_reverse_u##w(val, bits), bits); \ + } \ +\ + zig_builtin_clz_common(w) +#endif +zig_builtin_clz(8) +zig_builtin_clz(16) +zig_builtin_clz(32) +zig_builtin_clz(64) + +/* ======================== 128-bit Integer Routines ======================== */ + +#if !defined(zig_has_int128) +# if defined(__SIZEOF_INT128__) +# define zig_has_int128 1 +# else +# define zig_has_int128 0 +# endif +#endif + +#if zig_has_int128 + +typedef unsigned __int128 zig_u128; +typedef signed __int128 zig_i128; + +#define zig_as_u128(hi, lo) ((zig_u128)(hi)<<64|(lo)) +#define zig_as_i128(hi, lo) ((zig_i128)zig_as_u128(hi, lo)) +#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo) +#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo) +#define zig_hi_u128(val) ((zig_u64)((val) >> 64)) +#define zig_lo_u128(val) ((zig_u64)((val) >> 0)) +#define zig_hi_i128(val) ((zig_i64)((val) >> 64)) +#define zig_lo_i128(val) ((zig_u64)((val) >> 0)) +#define zig_bitcast_u128(val) ((zig_u128)(val)) +#define zig_bitcast_i128(val) ((zig_i128)(val)) +#define zig_cmp_int128(Type) \ + static inline zig_i32 zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return (lhs > rhs) - (lhs < rhs); \ + } +#define zig_bit_int128(Type, operation, operator) \ + static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return lhs operator rhs; \ + } + +#else /* zig_has_int128 */ + +#if __LITTLE_ENDIAN__ || _MSC_VER +typedef struct { zig_align(16) zig_u64 lo; zig_u64 hi; } zig_u128; +typedef struct { zig_align(16) zig_u64 lo; zig_i64 hi; } zig_i128; +#else +typedef struct { zig_align(16) zig_u64 hi; zig_u64 lo; } zig_u128; +typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128; +#endif + +#define zig_as_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) }) +#define zig_as_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) }) + +#if _MSC_VER +#define zig_as_constant_u128(hi, lo) { .h##i = (hi), .l##o = (lo) } +#define zig_as_constant_i128(hi, lo) { .h##i = (hi), .l##o = (lo) } +#else +#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo) +#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo) +#endif +#define zig_hi_u128(val) ((val).hi) +#define zig_lo_u128(val) ((val).lo) +#define zig_hi_i128(val) ((val).hi) +#define zig_lo_i128(val) ((val).lo) +#define zig_bitcast_u128(val) zig_as_u128((zig_u64)(val).hi, (val).lo) +#define zig_bitcast_i128(val) zig_as_i128((zig_i64)(val).hi, (val).lo) +#define zig_cmp_int128(Type) \ + static inline zig_i32 zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return (lhs.hi == rhs.hi) \ + ? (lhs.lo > rhs.lo) - (lhs.lo < rhs.lo) \ + : (lhs.hi > rhs.hi) - (lhs.hi < rhs.hi); \ + } +#define zig_bit_int128(Type, operation, operator) \ + static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return (zig_##Type){ .hi = lhs.hi operator rhs.hi, .lo = lhs.lo operator rhs.lo }; \ + } + +#endif /* zig_has_int128 */ + +#define zig_minInt_u128 zig_as_u128(zig_minInt_u64, zig_minInt_u64) +#define zig_maxInt_u128 zig_as_u128(zig_maxInt_u64, zig_maxInt_u64) +#define zig_minInt_i128 zig_as_i128(zig_minInt_i64, zig_minInt_u64) +#define zig_maxInt_i128 zig_as_i128(zig_maxInt_i64, zig_maxInt_u64) + +zig_cmp_int128(u128) +zig_cmp_int128(i128) + +zig_bit_int128(u128, and, &) +zig_bit_int128(i128, and, &) + +zig_bit_int128(u128, or, |) +zig_bit_int128(i128, or, |) + +zig_bit_int128(u128, xor, ^) +zig_bit_int128(i128, xor, ^) + +static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs); + +#if zig_has_int128 + +static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) { + return val ^ zig_maxInt(u128, bits); +} + +static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) { + (void)bits; + return ~val; +} + +static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) { + return lhs >> rhs; +} + +static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) { + return lhs << rhs; +} + +static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) { + return lhs << rhs; +} + +static inline zig_u128 zig_add_u128(zig_u128 lhs, zig_u128 rhs) { + return lhs + rhs; +} + +static inline zig_i128 zig_add_i128(zig_i128 lhs, zig_i128 rhs) { + return lhs + rhs; +} + +static inline zig_u128 zig_sub_u128(zig_u128 lhs, zig_u128 rhs) { + return lhs - rhs; +} + +static inline zig_i128 zig_sub_i128(zig_i128 lhs, zig_i128 rhs) { + return lhs - rhs; +} + +static inline zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) { + return lhs * rhs; +} + +static inline zig_i128 zig_mul_i128(zig_i128 lhs, zig_i128 rhs) { + return lhs * rhs; +} + +static inline zig_u128 zig_div_trunc_u128(zig_u128 lhs, zig_u128 rhs) { + return lhs / rhs; +} + +static inline zig_i128 zig_div_trunc_i128(zig_i128 lhs, zig_i128 rhs) { + return lhs / rhs; +} + +static inline zig_u128 zig_rem_u128(zig_u128 lhs, zig_u128 rhs) { + return lhs % rhs; +} + +static inline zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) { + return lhs % rhs; +} + +static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) { + return zig_div_trunc_i128(lhs, rhs) - (((lhs ^ rhs) & zig_rem_i128(lhs, rhs)) < zig_as_i128(0, 0)); +} + +static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) { + zig_i128 rem = zig_rem_i128(lhs, rhs); + return rem + (((lhs ^ rhs) & rem) < zig_as_i128(0, 0) ? rhs : zig_as_i128(0, 0)); +} + +#else /* zig_has_int128 */ + +static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) { + return (zig_u128){ .hi = zig_not_u64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) }; +} + +static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) { + return (zig_i128){ .hi = zig_not_i64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) }; +} + +static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) { + if (rhs == zig_as_u8(0)) return lhs; + if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - zig_as_u8(64)) }; + return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (zig_as_u8(64) - rhs) | lhs.lo >> rhs }; +} + +static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) { + if (rhs == zig_as_u8(0)) return lhs; + if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.lo << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 }; + return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs }; +} + +static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) { + if (rhs == zig_as_u8(0)) return lhs; + if (rhs >= zig_as_u8(64)) return (zig_i128){ .hi = lhs.lo << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 }; + return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs }; +} + +static inline zig_u128 zig_add_u128(zig_u128 lhs, zig_u128 rhs) { + zig_u128 res; + res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, 64); + return res; +} + +static inline zig_i128 zig_add_i128(zig_i128 lhs, zig_i128 rhs) { + zig_i128 res; + res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, 64); + return res; +} + +static inline zig_u128 zig_sub_u128(zig_u128 lhs, zig_u128 rhs) { + zig_u128 res; + res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, 64); + return res; +} + +static inline zig_i128 zig_sub_i128(zig_i128 lhs, zig_i128 rhs) { + zig_i128 res; + res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, 64); + return res; +} + +zig_extern zig_i128 __multi3(zig_i128 lhs, zig_i128 rhs); +static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) { + return zig_bitcast_u128(__multi3(zig_bitcast_i128(lhs), zig_bitcast_i128(rhs))); +} + +static zig_i128 zig_mul_i128(zig_i128 lhs, zig_i128 rhs) { + return __multi3(lhs, rhs); +} + +zig_extern zig_u128 __udivti3(zig_u128 lhs, zig_u128 rhs); +static zig_u128 zig_div_trunc_u128(zig_u128 lhs, zig_u128 rhs) { + return __udivti3(lhs, rhs); +}; + +zig_extern zig_i128 __divti3(zig_i128 lhs, zig_i128 rhs); +static zig_i128 zig_div_trunc_i128(zig_i128 lhs, zig_i128 rhs) { + return __divti3(lhs, rhs); +}; + +zig_extern zig_u128 __umodti3(zig_u128 lhs, zig_u128 rhs); +static zig_u128 zig_rem_u128(zig_u128 lhs, zig_u128 rhs) { + return __umodti3(lhs, rhs); +} + +zig_extern zig_i128 __modti3(zig_i128 lhs, zig_i128 rhs); +static zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) { + return __modti3(lhs, rhs); +} + +static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) { + zig_i128 rem = zig_rem_i128(lhs, rhs); + return zig_add_i128(rem, (((lhs.hi ^ rhs.hi) & rem.hi) < zig_as_i64(0) ? rhs : zig_as_i128(0, 0))); +} + +static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) { + return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_as_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_as_i128(0, 0)) < zig_as_i32(0))); +} + +#endif /* zig_has_int128 */ + +#define zig_div_floor_u128 zig_div_trunc_u128 +#define zig_mod_u128 zig_rem_u128 + +static inline zig_u128 zig_nand_u128(zig_u128 lhs, zig_u128 rhs) { + return zig_not_u128(zig_and_u128(lhs, rhs), 128); +} + +static inline zig_u128 zig_min_u128(zig_u128 lhs, zig_u128 rhs) { + return zig_cmp_u128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs; +} + +static inline zig_i128 zig_min_i128(zig_i128 lhs, zig_i128 rhs) { + return zig_cmp_i128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs; +} + +static inline zig_u128 zig_max_u128(zig_u128 lhs, zig_u128 rhs) { + return zig_cmp_u128(lhs, rhs) > zig_as_i32(0) ? lhs : rhs; +} + +static inline zig_i128 zig_max_i128(zig_i128 lhs, zig_i128 rhs) { + return zig_cmp_i128(lhs, rhs) > zig_as_i32(0) ? lhs : rhs; +} + +static inline zig_i128 zig_shr_i128(zig_i128 lhs, zig_u8 rhs) { + zig_i128 sign_mask = zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_sub_i128(zig_as_i128(0, 0), zig_as_i128(0, 1)) : zig_as_i128(0, 0); + return zig_xor_i128(zig_bitcast_i128(zig_shr_u128(zig_bitcast_u128(zig_xor_i128(lhs, sign_mask)), rhs)), sign_mask); +} + +static inline zig_u128 zig_wrap_u128(zig_u128 val, zig_u8 bits) { + return zig_and_u128(val, zig_maxInt(u128, bits)); +} + +static inline zig_i128 zig_wrap_i128(zig_i128 val, zig_u8 bits) { + return zig_as_i128(zig_wrap_i64(zig_hi_i128(val), bits - zig_as_u8(64)), zig_lo_i128(val)); +} + +static inline zig_u128 zig_shlw_u128(zig_u128 lhs, zig_u8 rhs, zig_u8 bits) { + return zig_wrap_u128(zig_shl_u128(lhs, rhs), bits); +} + +static inline zig_i128 zig_shlw_i128(zig_i128 lhs, zig_u8 rhs, zig_u8 bits) { + return zig_wrap_i128(zig_bitcast_i128(zig_shl_u128(zig_bitcast_u128(lhs), rhs)), bits); +} + +static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + return zig_wrap_u128(zig_add_u128(lhs, rhs), bits); +} + +static inline zig_i128 zig_addw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + return zig_wrap_i128(zig_bitcast_i128(zig_add_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits); +} + +static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + return zig_wrap_u128(zig_sub_u128(lhs, rhs), bits); +} + +static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + return zig_wrap_i128(zig_bitcast_i128(zig_sub_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits); +} + +static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + return zig_wrap_u128(zig_mul_u128(lhs, rhs), bits); +} + +static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + return zig_wrap_i128(zig_bitcast_i128(zig_mul_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits); +} + +#if zig_has_int128 + +static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) + zig_u128 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u128(full_res, bits); + return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits); +#else + *res = zig_addw_u128(lhs, rhs, bits); + return *res < lhs; +#endif +} + +zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); +static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) + zig_i128 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i128(full_res, bits); + return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits); +} + +static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) + zig_u128 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u128(full_res, bits); + return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits); +#else + *res = zig_subw_u128(lhs, rhs, bits); + return *res > lhs; +#endif +} + +zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); +static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) + zig_i128 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i128(full_res, bits); + return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits); +} + +static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) + zig_u128 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u128(full_res, bits); + return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits); +#else + *res = zig_mulw_u128(lhs, rhs, bits); + return rhs != zig_as_u128(0, 0) && lhs > zig_maxInt(u128, bits) / rhs; +#endif +} + +zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); +static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) + zig_i128 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i128(full_res, bits); + return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits); +} + +#else /* zig_has_int128 */ + +static inline bool zig_overflow_u128(bool overflow, zig_u128 full_res, zig_u8 bits) { + return overflow || + zig_cmp_u128(full_res, zig_minInt(u128, bits)) < zig_as_i32(0) || + zig_cmp_u128(full_res, zig_maxInt(u128, bits)) > zig_as_i32(0); +} + +static inline bool zig_overflow_i128(bool overflow, zig_i128 full_res, zig_u8 bits) { + return overflow || + zig_cmp_i128(full_res, zig_minInt(i128, bits)) < zig_as_i32(0) || + zig_cmp_i128(full_res, zig_maxInt(i128, bits)) > zig_as_i32(0); +} + +static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + zig_u128 full_res; + bool overflow = + zig_addo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) | + zig_addo_u64(&full_res.hi, full_res.hi, zig_addo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64); + *res = zig_wrap_u128(full_res, bits); + return zig_overflow_u128(overflow, full_res, bits); +} + +zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); +static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + zig_c_int overflow_int; + zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int); + *res = zig_wrap_i128(full_res, bits); + return zig_overflow_i128(overflow_int, full_res, bits); +} + +static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + zig_u128 full_res; + bool overflow = + zig_subo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) | + zig_subo_u64(&full_res.hi, full_res.hi, zig_subo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64); + *res = zig_wrap_u128(full_res, bits); + return zig_overflow_u128(overflow, full_res, bits); +} + +zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); +static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + zig_c_int overflow_int; + zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int); + *res = zig_wrap_i128(full_res, bits); + return zig_overflow_i128(overflow_int, full_res, bits); +} + +static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + *res = zig_mulw_u128(lhs, rhs, bits); + return zig_cmp_u128(*res, zig_as_u128(0, 0)) != zig_as_i32(0) && + zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0); +} + +zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); +static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + zig_c_int overflow_int; + zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int); + *res = zig_wrap_i128(full_res, bits); + return zig_overflow_i128(overflow_int, full_res, bits); +} + +#endif /* zig_has_int128 */ + +static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) { + *res = zig_shlw_u128(lhs, rhs, bits); + return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0); +} + +static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) { + *res = zig_shlw_i128(lhs, rhs, bits); + zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - zig_as_u8(1))); + return zig_cmp_i128(zig_and_i128(lhs, mask), zig_as_i128(0, 0)) != zig_as_i32(0) && + zig_cmp_i128(zig_and_i128(lhs, mask), mask) != zig_as_i32(0); +} + +static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + zig_u128 res; + if (zig_cmp_u128(rhs, zig_as_u128(0, bits)) >= zig_as_i32(0)) + return zig_cmp_u128(lhs, zig_as_u128(0, 0)) != zig_as_i32(0) ? zig_maxInt(u128, bits) : lhs; + +#if zig_has_int128 + return zig_shlo_u128(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u128, bits) : res; +#else + return zig_shlo_u128(&res, lhs, (zig_u8)rhs.lo, bits) ? zig_maxInt(u128, bits) : res; +#endif +} + +static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + zig_i128 res; + if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_as_u128(0, bits)) < zig_as_i32(0) && !zig_shlo_i128(&res, lhs, zig_lo_i128(rhs), bits)) return res; + return zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits); +} + +static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + zig_u128 res; + return zig_addo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res; +} + +static inline zig_i128 zig_adds_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + zig_i128 res; + if (!zig_addo_i128(&res, lhs, rhs, bits)) return res; + return zig_cmp_i128(res, zig_as_i128(0, 0)) >= zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits); +} + +static inline zig_u128 zig_subs_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + zig_u128 res; + return zig_subo_u128(&res, lhs, rhs, bits) ? zig_minInt(u128, bits) : res; +} + +static inline zig_i128 zig_subs_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + zig_i128 res; + if (!zig_subo_i128(&res, lhs, rhs, bits)) return res; + return zig_cmp_i128(res, zig_as_i128(0, 0)) >= zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits); +} + +static inline zig_u128 zig_muls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + zig_u128 res; + return zig_mulo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res; +} + +static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + zig_i128 res; + if (!zig_mulo_i128(&res, lhs, rhs, bits)) return res; + return zig_cmp_i128(zig_xor_i128(lhs, rhs), zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits); +} + +static inline zig_u8 zig_clz_u128(zig_u128 val, zig_u8 bits) { + if (bits <= zig_as_u8(64)) return zig_clz_u64(zig_lo_u128(val), bits); + if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - zig_as_u8(64)); + return zig_clz_u64(zig_lo_u128(val), zig_as_u8(64)) + (bits - zig_as_u8(64)); +} + +static inline zig_u8 zig_clz_i128(zig_i128 val, zig_u8 bits) { + return zig_clz_u128(zig_bitcast_u128(val), bits); +} + +static inline zig_u8 zig_ctz_u128(zig_u128 val, zig_u8 bits) { + if (zig_lo_u128(val) != 0) return zig_ctz_u64(zig_lo_u128(val), zig_as_u8(64)); + return zig_ctz_u64(zig_hi_u128(val), bits - zig_as_u8(64)) + zig_as_u8(64); +} + +static inline zig_u8 zig_ctz_i128(zig_i128 val, zig_u8 bits) { + return zig_ctz_u128(zig_bitcast_u128(val), bits); +} + +static inline zig_u8 zig_popcount_u128(zig_u128 val, zig_u8 bits) { + return zig_popcount_u64(zig_hi_u128(val), bits - zig_as_u8(64)) + + zig_popcount_u64(zig_lo_u128(val), zig_as_u8(64)); +} + +static inline zig_u8 zig_popcount_i128(zig_i128 val, zig_u8 bits) { + return zig_popcount_u128(zig_bitcast_u128(val), bits); +} + +static inline zig_u128 zig_byte_swap_u128(zig_u128 val, zig_u8 bits) { + zig_u128 full_res; +#if zig_has_builtin(bswap128) + full_res = __builtin_bswap128(val); +#else + full_res = zig_as_u128(zig_byte_swap_u64(zig_lo_u128(val), zig_as_u8(64)), + zig_byte_swap_u64(zig_hi_u128(val), zig_as_u8(64))); +#endif + return zig_shr_u128(full_res, zig_as_u8(128) - bits); +} + +static inline zig_i128 zig_byte_swap_i128(zig_i128 val, zig_u8 bits) { + return zig_bitcast_i128(zig_byte_swap_u128(zig_bitcast_u128(val), bits)); +} + +static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, zig_u8 bits) { + return zig_shr_u128(zig_as_u128(zig_bit_reverse_u64(zig_lo_u128(val), zig_as_u8(64)), + zig_bit_reverse_u64(zig_hi_u128(val), zig_as_u8(64))), + zig_as_u8(128) - bits); +} + +static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) { + return zig_bitcast_i128(zig_bit_reverse_u128(zig_bitcast_u128(val), bits)); +} + +/* ========================= Floating Point Support ========================= */ + +#if _MSC_VER +#define zig_msvc_flt_inf ((double)(1e+300 * 1e+300)) +#define zig_msvc_flt_inff ((float)(1e+300 * 1e+300)) +#define zig_msvc_flt_infl ((long double)(1e+300 * 1e+300)) +#define zig_msvc_flt_nan ((double)(zig_msvc_flt_inf * 0.f)) +#define zig_msvc_flt_nanf ((float)(zig_msvc_flt_inf * 0.f)) +#define zig_msvc_flt_nanl ((long double)(zig_msvc_flt_inf * 0.f)) +#define __builtin_nan(str) nan(str) +#define __builtin_nanf(str) nanf(str) +#define __builtin_nanl(str) nanl(str) +#define __builtin_inf() zig_msvc_flt_inf +#define __builtin_inff() zig_msvc_flt_inff +#define __builtin_infl() zig_msvc_flt_infl +#endif + +#if (zig_has_builtin(nan) && zig_has_builtin(nans) && zig_has_builtin(inf)) || defined(zig_gnuc) +#define zig_has_float_builtins 1 +#define zig_as_special_f16(sign, name, arg, repr) sign zig_as_f16(__builtin_##name, )(arg) +#define zig_as_special_f32(sign, name, arg, repr) sign zig_as_f32(__builtin_##name, )(arg) +#define zig_as_special_f64(sign, name, arg, repr) sign zig_as_f64(__builtin_##name, )(arg) +#define zig_as_special_f80(sign, name, arg, repr) sign zig_as_f80(__builtin_##name, )(arg) +#define zig_as_special_f128(sign, name, arg, repr) sign zig_as_f128(__builtin_##name, )(arg) +#define zig_as_special_c_longdouble(sign, name, arg, repr) sign zig_as_c_longdouble(__builtin_##name, )(arg) +#else +#define zig_has_float_builtins 0 +#define zig_as_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr) +#define zig_as_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr) +#define zig_as_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr) +#define zig_as_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr) +#define zig_as_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr) +#define zig_as_special_c_longdouble(sign, name, arg, repr) zig_float_from_repr_c_longdouble(repr) +#endif + +#define zig_has_f16 1 +#define zig_bitSizeOf_f16 16 +#define zig_libc_name_f16(name) __##name##h +#define zig_as_special_constant_f16(sign, name, arg, repr) zig_as_special_f16(sign, name, arg, repr) +#if FLT_MANT_DIG == 11 +typedef float zig_f16; +#define zig_as_f16(fp, repr) fp##f +#elif DBL_MANT_DIG == 11 +typedef double zig_f16; +#define zig_as_f16(fp, repr) fp +#elif LDBL_MANT_DIG == 11 +#define zig_bitSizeOf_c_longdouble 16 +typedef long double zig_f16; +#define zig_as_f16(fp, repr) fp##l +#elif FLT16_MANT_DIG == 11 && (zig_has_builtin(inff16) || defined(zig_gnuc)) +typedef _Float16 zig_f16; +#define zig_as_f16(fp, repr) fp##f16 +#elif defined(__SIZEOF_FP16__) +typedef __fp16 zig_f16; +#define zig_as_f16(fp, repr) fp##f16 +#else +#undef zig_has_f16 +#define zig_has_f16 0 +#define zig_repr_f16 i16 +typedef zig_i16 zig_f16; +#define zig_as_f16(fp, repr) repr +#undef zig_as_special_f16 +#define zig_as_special_f16(sign, name, arg, repr) repr +#undef zig_as_special_constant_f16 +#define zig_as_special_constant_f16(sign, name, arg, repr) repr +#endif + +#define zig_has_f32 1 +#define zig_bitSizeOf_f32 32 +#define zig_libc_name_f32(name) name##f +#if _MSC_VER +#define zig_as_special_constant_f32(sign, name, arg, repr) sign zig_as_f32(zig_msvc_flt_##name, ) +#else +#define zig_as_special_constant_f32(sign, name, arg, repr) zig_as_special_f32(sign, name, arg, repr) +#endif +#if FLT_MANT_DIG == 24 +typedef float zig_f32; +#define zig_as_f32(fp, repr) fp##f +#elif DBL_MANT_DIG == 24 +typedef double zig_f32; +#define zig_as_f32(fp, repr) fp +#elif LDBL_MANT_DIG == 24 +#define zig_bitSizeOf_c_longdouble 32 +typedef long double zig_f32; +#define zig_as_f32(fp, repr) fp##l +#elif FLT32_MANT_DIG == 24 +typedef _Float32 zig_f32; +#define zig_as_f32(fp, repr) fp##f32 +#else +#undef zig_has_f32 +#define zig_has_f32 0 +#define zig_repr_f32 i32 +typedef zig_i32 zig_f32; +#define zig_as_f32(fp, repr) repr +#undef zig_as_special_f32 +#define zig_as_special_f32(sign, name, arg, repr) repr +#undef zig_as_special_constant_f32 +#define zig_as_special_constant_f32(sign, name, arg, repr) repr +#endif + +#define zig_has_f64 1 +#define zig_bitSizeOf_f64 64 +#define zig_libc_name_f64(name) name +#if _MSC_VER +#ifdef ZIG_TARGET_ABI_MSVC +#define zig_bitSizeOf_c_longdouble 64 +#endif +#define zig_as_special_constant_f64(sign, name, arg, repr) sign zig_as_f64(zig_msvc_flt_##name, ) +#else /* _MSC_VER */ +#define zig_as_special_constant_f64(sign, name, arg, repr) zig_as_special_f64(sign, name, arg, repr) +#endif /* _MSC_VER */ +#if FLT_MANT_DIG == 53 +typedef float zig_f64; +#define zig_as_f64(fp, repr) fp##f +#elif DBL_MANT_DIG == 53 +typedef double zig_f64; +#define zig_as_f64(fp, repr) fp +#elif LDBL_MANT_DIG == 53 +#define zig_bitSizeOf_c_longdouble 64 +typedef long double zig_f64; +#define zig_as_f64(fp, repr) fp##l +#elif FLT64_MANT_DIG == 53 +typedef _Float64 zig_f64; +#define zig_as_f64(fp, repr) fp##f64 +#elif FLT32X_MANT_DIG == 53 +typedef _Float32x zig_f64; +#define zig_as_f64(fp, repr) fp##f32x +#else +#undef zig_has_f64 +#define zig_has_f64 0 +#define zig_repr_f64 i64 +typedef zig_i64 zig_f64; +#define zig_as_f64(fp, repr) repr +#undef zig_as_special_f64 +#define zig_as_special_f64(sign, name, arg, repr) repr +#undef zig_as_special_constant_f64 +#define zig_as_special_constant_f64(sign, name, arg, repr) repr +#endif + +#define zig_has_f80 1 +#define zig_bitSizeOf_f80 80 +#define zig_libc_name_f80(name) __##name##x +#define zig_as_special_constant_f80(sign, name, arg, repr) zig_as_special_f80(sign, name, arg, repr) +#if FLT_MANT_DIG == 64 +typedef float zig_f80; +#define zig_as_f80(fp, repr) fp##f +#elif DBL_MANT_DIG == 64 +typedef double zig_f80; +#define zig_as_f80(fp, repr) fp +#elif LDBL_MANT_DIG == 64 +#define zig_bitSizeOf_c_longdouble 80 +typedef long double zig_f80; +#define zig_as_f80(fp, repr) fp##l +#elif FLT80_MANT_DIG == 64 +typedef _Float80 zig_f80; +#define zig_as_f80(fp, repr) fp##f80 +#elif FLT64X_MANT_DIG == 64 +typedef _Float64x zig_f80; +#define zig_as_f80(fp, repr) fp##f64x +#elif defined(__SIZEOF_FLOAT80__) +typedef __float80 zig_f80; +#define zig_as_f80(fp, repr) fp##l +#else +#undef zig_has_f80 +#define zig_has_f80 0 +#define zig_repr_f80 i128 +typedef zig_i128 zig_f80; +#define zig_as_f80(fp, repr) repr +#undef zig_as_special_f80 +#define zig_as_special_f80(sign, name, arg, repr) repr +#undef zig_as_special_constant_f80 +#define zig_as_special_constant_f80(sign, name, arg, repr) repr +#endif + +#define zig_has_f128 1 +#define zig_bitSizeOf_f128 128 +#define zig_libc_name_f128(name) name##q +#define zig_as_special_constant_f128(sign, name, arg, repr) zig_as_special_f128(sign, name, arg, repr) +#if FLT_MANT_DIG == 113 +typedef float zig_f128; +#define zig_as_f128(fp, repr) fp##f +#elif DBL_MANT_DIG == 113 +typedef double zig_f128; +#define zig_as_f128(fp, repr) fp +#elif LDBL_MANT_DIG == 113 +#define zig_bitSizeOf_c_longdouble 128 +typedef long double zig_f128; +#define zig_as_f128(fp, repr) fp##l +#elif FLT128_MANT_DIG == 113 +typedef _Float128 zig_f128; +#define zig_as_f128(fp, repr) fp##f128 +#elif FLT64X_MANT_DIG == 113 +typedef _Float64x zig_f128; +#define zig_as_f128(fp, repr) fp##f64x +#elif defined(__SIZEOF_FLOAT128__) +typedef __float128 zig_f128; +#define zig_as_f128(fp, repr) fp##q +#undef zig_as_special_f128 +#define zig_as_special_f128(sign, name, arg, repr) sign __builtin_##name##f128(arg) +#else +#undef zig_has_f128 +#define zig_has_f128 0 +#define zig_repr_f128 i128 +typedef zig_i128 zig_f128; +#define zig_as_f128(fp, repr) repr +#undef zig_as_special_f128 +#define zig_as_special_f128(sign, name, arg, repr) repr +#undef zig_as_special_constant_f128 +#define zig_as_special_constant_f128(sign, name, arg, repr) repr +#endif + +#define zig_has_c_longdouble 1 + +#ifdef ZIG_TARGET_ABI_MSVC +#define zig_libc_name_c_longdouble(name) name +#else +#define zig_libc_name_c_longdouble(name) name##l +#endif + +#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) zig_as_special_c_longdouble(sign, name, arg, repr) +#ifdef zig_bitSizeOf_c_longdouble + +#ifdef ZIG_TARGET_ABI_MSVC +typedef double zig_c_longdouble; +#undef zig_bitSizeOf_c_longdouble +#define zig_bitSizeOf_c_longdouble 64 +#define zig_as_c_longdouble(fp, repr) fp +#else +typedef long double zig_c_longdouble; +#define zig_as_c_longdouble(fp, repr) fp##l +#endif + +#else /* zig_bitSizeOf_c_longdouble */ + +#undef zig_has_c_longdouble +#define zig_has_c_longdouble 0 +#define zig_bitSizeOf_c_longdouble 80 +#define zig_compiler_rt_abbrev_c_longdouble zig_compiler_rt_abbrev_f80 +#define zig_repr_c_longdouble i128 +typedef zig_i128 zig_c_longdouble; +#define zig_as_c_longdouble(fp, repr) repr +#undef zig_as_special_c_longdouble +#define zig_as_special_c_longdouble(sign, name, arg, repr) repr +#undef zig_as_special_constant_c_longdouble +#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) repr + +#endif /* zig_bitSizeOf_c_longdouble */ + +#if !zig_has_float_builtins +#define zig_float_from_repr(Type, ReprType) \ + static inline zig_##Type zig_float_from_repr_##Type(zig_##ReprType repr) { \ + return *((zig_##Type*)&repr); \ + } + +zig_float_from_repr(f16, u16) +zig_float_from_repr(f32, u32) +zig_float_from_repr(f64, u64) +zig_float_from_repr(f80, u128) +zig_float_from_repr(f128, u128) +#if zig_bitSizeOf_c_longdouble == 80 +zig_float_from_repr(c_longdouble, u128) +#else +#define zig_expand_float_from_repr(Type, ReprType) zig_float_from_repr(Type, ReprType) +zig_expand_float_from_repr(c_longdouble, zig_expand_concat(u, zig_bitSizeOf_c_longdouble)) +#endif +#endif + +#define zig_cast_f16 (zig_f16) +#define zig_cast_f32 (zig_f32) +#define zig_cast_f64 (zig_f64) + +#if _MSC_VER && !zig_has_f128 +#define zig_cast_f80 +#define zig_cast_c_longdouble +#define zig_cast_f128 +#else +#define zig_cast_f80 (zig_f80) +#define zig_cast_c_longdouble (zig_c_longdouble) +#define zig_cast_f128 (zig_f128) +#endif + +#define zig_convert_builtin(ResType, operation, ArgType, version) \ + zig_extern zig_##ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \ + zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(zig_##ArgType); +zig_convert_builtin(f16, trunc, f32, 2) +zig_convert_builtin(f16, trunc, f64, 2) +zig_convert_builtin(f16, trunc, f80, 2) +zig_convert_builtin(f16, trunc, f128, 2) +zig_convert_builtin(f32, extend, f16, 2) +zig_convert_builtin(f32, trunc, f64, 2) +zig_convert_builtin(f32, trunc, f80, 2) +zig_convert_builtin(f32, trunc, f128, 2) +zig_convert_builtin(f64, extend, f16, 2) +zig_convert_builtin(f64, extend, f32, 2) +zig_convert_builtin(f64, trunc, f80, 2) +zig_convert_builtin(f64, trunc, f128, 2) +zig_convert_builtin(f80, extend, f16, 2) +zig_convert_builtin(f80, extend, f32, 2) +zig_convert_builtin(f80, extend, f64, 2) +zig_convert_builtin(f80, trunc, f128, 2) +zig_convert_builtin(f128, extend, f16, 2) +zig_convert_builtin(f128, extend, f32, 2) +zig_convert_builtin(f128, extend, f64, 2) +zig_convert_builtin(f128, extend, f80, 2) + +#define zig_float_negate_builtin_0(Type) \ + static inline zig_##Type zig_neg_##Type(zig_##Type arg) { \ + return zig_expand_concat(zig_xor_, zig_repr_##Type)(arg, zig_expand_minInt(zig_repr_##Type, zig_bitSizeOf_##Type)); \ + } +#define zig_float_negate_builtin_1(Type) \ + static inline zig_##Type zig_neg_##Type(zig_##Type arg) { \ + return -arg; \ + } + +#define zig_float_less_builtin_0(Type, operation) \ + zig_extern zig_i32 zig_expand_concat(zig_expand_concat(__##operation, \ + zig_compiler_rt_abbrev_##Type), 2)(zig_##Type, zig_##Type); \ + static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_##Type), 2)(lhs, rhs); \ + } +#define zig_float_less_builtin_1(Type, operation) \ + static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return (!(lhs <= rhs) - (lhs < rhs)); \ + } + +#define zig_float_greater_builtin_0(Type, operation) \ + zig_float_less_builtin_0(Type, operation) +#define zig_float_greater_builtin_1(Type, operation) \ + static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return ((lhs > rhs) - !(lhs >= rhs)); \ + } + +#define zig_float_binary_builtin_0(Type, operation, operator) \ + zig_extern zig_##Type zig_expand_concat(zig_expand_concat(__##operation, \ + zig_compiler_rt_abbrev_##Type), 3)(zig_##Type, zig_##Type); \ + static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_##Type), 3)(lhs, rhs); \ + } +#define zig_float_binary_builtin_1(Type, operation, operator) \ + static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return lhs operator rhs; \ + } + +#define zig_float_builtins(Type) \ + zig_convert_builtin(i32, fix, Type, ) \ + zig_convert_builtin(u32, fixuns, Type, ) \ + zig_convert_builtin(i64, fix, Type, ) \ + zig_convert_builtin(u64, fixuns, Type, ) \ + zig_convert_builtin(i128, fix, Type, ) \ + zig_convert_builtin(u128, fixuns, Type, ) \ + zig_convert_builtin(Type, float, i32, ) \ + zig_convert_builtin(Type, floatun, u32, ) \ + zig_convert_builtin(Type, float, i64, ) \ + zig_convert_builtin(Type, floatun, u64, ) \ + zig_convert_builtin(Type, float, i128, ) \ + zig_convert_builtin(Type, floatun, u128, ) \ + zig_expand_concat(zig_float_negate_builtin_, zig_has_##Type)(Type) \ + zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, cmp) \ + zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, ne) \ + zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, eq) \ + zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, lt) \ + zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, le) \ + zig_expand_concat(zig_float_greater_builtin_, zig_has_##Type)(Type, gt) \ + zig_expand_concat(zig_float_greater_builtin_, zig_has_##Type)(Type, ge) \ + zig_expand_concat(zig_float_binary_builtin_, zig_has_##Type)(Type, add, +) \ + zig_expand_concat(zig_float_binary_builtin_, zig_has_##Type)(Type, sub, -) \ + zig_expand_concat(zig_float_binary_builtin_, zig_has_##Type)(Type, mul, *) \ + zig_expand_concat(zig_float_binary_builtin_, zig_has_##Type)(Type, div, /) \ + zig_extern zig_##Type zig_libc_name_##Type(sqrt)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(sin)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(cos)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(tan)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(exp)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(exp2)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(log)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(log2)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(log10)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(fabs)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(floor)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(ceil)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(round)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(trunc)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(fmod)(zig_##Type, zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(fmin)(zig_##Type, zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(fmax)(zig_##Type, zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(fma)(zig_##Type, zig_##Type, zig_##Type); \ +\ + static inline zig_##Type zig_div_trunc_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return zig_libc_name_##Type(trunc)(zig_div_##Type(lhs, rhs)); \ + } \ +\ + static inline zig_##Type zig_div_floor_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return zig_libc_name_##Type(floor)(zig_div_##Type(lhs, rhs)); \ + } \ +\ + static inline zig_##Type zig_mod_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return zig_sub_##Type(lhs, zig_mul_##Type(zig_div_floor_##Type(lhs, rhs), rhs)); \ + } +zig_float_builtins(f16) +zig_float_builtins(f32) +zig_float_builtins(f64) +zig_float_builtins(f80) +zig_float_builtins(f128) +zig_float_builtins(c_longdouble) + +#if _MSC_VER && (_M_IX86 || _M_X64) + +// TODO: zig_msvc_atomic_load should load 32 bit without interlocked on x86, and load 64 bit without interlocked on x64 + +#define zig_msvc_atomics(Type, suffix) \ + static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \ + zig_##Type comparand = *expected; \ + zig_##Type initial = _InterlockedCompareExchange##suffix(obj, desired, comparand); \ + bool exchanged = initial == comparand; \ + if (!exchanged) { \ + *expected = initial; \ + } \ + return exchanged; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + return _InterlockedExchange##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + return _InterlockedExchangeAdd##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = prev - value; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + } \ + return prev; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_or_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + return _InterlockedOr##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_xor_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + return _InterlockedXor##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_and_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + return _InterlockedAnd##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_nand_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = ~(prev & value); \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + } \ + return prev; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_min_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = value < prev ? value : prev; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + } \ + return prev; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_max_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = value > prev ? value : prev; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + } \ + return prev; \ + } \ + static inline void zig_msvc_atomic_store_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + _InterlockedExchange##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomic_load_##Type(zig_##Type volatile* obj) { \ + return _InterlockedOr##suffix(obj, 0); \ + } + +zig_msvc_atomics(u8, 8) +zig_msvc_atomics(i8, 8) +zig_msvc_atomics(u16, 16) +zig_msvc_atomics(i16, 16) +zig_msvc_atomics(u32, ) +zig_msvc_atomics(i32, ) + +#if _M_X64 +zig_msvc_atomics(u64, 64) +zig_msvc_atomics(i64, 64) +#endif + +#define zig_msvc_flt_atomics(Type, ReprType, suffix) \ + static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \ + zig_##ReprType comparand = *((zig_##ReprType*)expected); \ + zig_##ReprType initial = _InterlockedCompareExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&desired), comparand); \ + bool exchanged = initial == comparand; \ + if (!exchanged) { \ + *expected = *((zig_##Type*)&initial); \ + } \ + return exchanged; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + zig_##ReprType initial = _InterlockedExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&value)); \ + return *((zig_##Type*)&initial); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##ReprType new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = prev + value; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \ + } \ + return prev; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##ReprType new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = prev - value; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \ + } \ + return prev; \ + } + +zig_msvc_flt_atomics(f32, u32, ) +#if _M_X64 +zig_msvc_flt_atomics(f64, u64, 64) +#endif + +#if _M_IX86 +static inline void zig_msvc_atomic_barrier() { + zig_i32 barrier; + __asm { + xchg barrier, eax + } +} + +static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, zig_u32* arg) { + return _InterlockedExchangePointer(obj, arg); +} + +static inline void zig_msvc_atomic_store_p32(void** obj, zig_u32* arg) { + _InterlockedExchangePointer(obj, arg); +} + +static inline void* zig_msvc_atomic_load_p32(void** obj) { + return (void*)_InterlockedOr((void*)obj, 0); +} + +static inline bool zig_msvc_cmpxchg_p32(void** obj, void** expected, void* desired) { + void* comparand = *expected; + void* initial = _InterlockedCompareExchangePointer(obj, desired, comparand); + bool exchanged = initial == comparand; + if (!exchanged) { + *expected = initial; + } + return exchanged; +} +#else /* _M_IX86 */ +static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, zig_u64* arg) { + return _InterlockedExchangePointer(obj, arg); +} + +static inline void zig_msvc_atomic_store_p64(void** obj, zig_u64* arg) { + _InterlockedExchangePointer(obj, arg); +} + +static inline void* zig_msvc_atomic_load_p64(void** obj) { + return (void*)_InterlockedOr64((void*)obj, 0); +} + +static inline bool zig_msvc_cmpxchg_p64(void** obj, void** expected, void* desired) { + void* comparand = *expected; + void* initial = _InterlockedCompareExchangePointer(obj, desired, comparand); + bool exchanged = initial == comparand; + if (!exchanged) { + *expected = initial; + } + return exchanged; +} + +static inline bool zig_msvc_cmpxchg_u128(zig_u128 volatile* obj, zig_u128* expected, zig_u128 desired) { + return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_i64*)expected); +} + +static inline bool zig_msvc_cmpxchg_i128(zig_i128 volatile* obj, zig_i128* expected, zig_i128 desired) { + return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_u64*)expected); +} + +#define zig_msvc_atomics_128xchg(Type) \ + static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, value); \ + } \ + return prev; \ + } + +zig_msvc_atomics_128xchg(u128) +zig_msvc_atomics_128xchg(i128) + +#define zig_msvc_atomics_128op(Type, operation) \ + static inline zig_##Type zig_msvc_atomicrmw_##operation##_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = zig_##operation##_##Type(prev, value); \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + } \ + return prev; \ + } + +zig_msvc_atomics_128op(u128, add) +zig_msvc_atomics_128op(u128, sub) +zig_msvc_atomics_128op(u128, or) +zig_msvc_atomics_128op(u128, xor) +zig_msvc_atomics_128op(u128, and) +zig_msvc_atomics_128op(u128, nand) +zig_msvc_atomics_128op(u128, min) +zig_msvc_atomics_128op(u128, max) +#endif /* _M_IX86 */ + +#endif /* _MSC_VER && (_M_IX86 || _M_X64) */ + +/* ========================= Special Case Intrinsics ========================= */ + +#if (_MSC_VER && _M_X64) || defined(__x86_64__) + +static inline void* zig_x86_64_windows_teb(void) { +#if _MSC_VER + return (void*)__readgsqword(0x30); +#else + void* teb; + __asm volatile(" movq %%gs:0x30, %[ptr]": [ptr]"=r"(teb)::); + return teb; +#endif +} + +#elif (_MSC_VER && _M_IX86) || defined(__i386__) || defined(__X86__) + +static inline void* zig_x86_windows_teb(void) { +#if _MSC_VER + return (void*)__readfsdword(0x18); +#else + void* teb; + __asm volatile(" movl %%fs:0x18, %[ptr]": [ptr]"=r"(teb)::); + return teb; +#endif +} + +#endif + +#if (_MSC_VER && (_M_IX86 || _M_X64)) || defined(__i386__) || defined(__x86_64__) + +static inline void zig_x86_cpuid(zig_u32 leaf_id, zig_u32 subid, zig_u32* eax, zig_u32* ebx, zig_u32* ecx, zig_u32* edx) { + zig_u32 cpu_info[4]; +#if _MSC_VER + __cpuidex(cpu_info, leaf_id, subid); +#else + __cpuid_count(leaf_id, subid, cpu_info[0], cpu_info[1], cpu_info[2], cpu_info[3]); +#endif + *eax = cpu_info[0]; + *ebx = cpu_info[1]; + *ecx = cpu_info[2]; + *edx = cpu_info[3]; +} + +static inline zig_u32 zig_x86_get_xcr0(void) { +#if _MSC_VER + return (zig_u32)_xgetbv(0); +#else + zig_u32 eax; + zig_u32 edx; + __asm__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(0)); + return eax; +#endif +} + +#endif -- cgit v1.2.3 From cf7200e8f9c995bae8bedaf3c727fe710a93f1e9 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 18 Feb 2023 23:03:11 -0500 Subject: CBE: remove typedef data structures Adds a new mechanism for `@tagName` function generation that doesn't piggyback on the removed typedef system. --- src/Compilation.zig | 7 +- src/codegen/c.zig | 643 +++++++------------------------------------------ src/codegen/c/type.zig | 22 +- src/link/C.zig | 274 +++++++++++---------- 4 files changed, 241 insertions(+), 705 deletions(-) (limited to 'src/codegen') diff --git a/src/Compilation.zig b/src/Compilation.zig index 97153da88b..9359d24dc3 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3277,14 +3277,9 @@ fn processOneJob(comp: *Compilation, job: Job) !void { .decl = decl, .fwd_decl = fwd_decl.toManaged(gpa), .ctypes = .{}, - .typedefs = c_codegen.TypedefMap.initContext(gpa, .{ .mod = module }), - .typedefs_arena = ctypes_arena.allocator(), }; defer { - for (dg.typedefs.values()) |typedef| { - module.gpa.free(typedef.rendered); - } - dg.typedefs.deinit(); + dg.ctypes.deinit(gpa); dg.fwd_decl.deinit(); } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index bed3a37a5c..cf4c7ec21b 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -23,7 +23,7 @@ const libcFloatSuffix = target_util.libcFloatSuffix; const compilerRtFloatAbbrev = target_util.compilerRtFloatAbbrev; const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev; -const Mutability = enum { Const, ConstArgument, Mut }; +const Mutability = enum { @"const", mut }; const BigIntLimb = std.math.big.Limb; const BigInt = std.math.big.int; @@ -63,12 +63,17 @@ const TypedefKind = enum { }; pub const CValueMap = std.AutoHashMap(Air.Inst.Ref, CValue); -pub const TypedefMap = std.ArrayHashMap( - Type, - struct { name: []const u8, rendered: []u8 }, - Type.HashContext32, - true, -); + +pub const LazyFnKey = union(enum) { + tag_name: Decl.Index, +}; +pub const LazyFnValue = struct { + fn_name: []const u8, + data: union { + tag_name: Type, + }, +}; +pub const LazyFnMap = std.AutoArrayHashMapUnmanaged(LazyFnKey, LazyFnValue); const LoopDepth = u16; const Local = struct { @@ -83,11 +88,6 @@ const LocalsList = std.ArrayListUnmanaged(LocalIndex); const LocalsMap = std.ArrayHashMapUnmanaged(Type, LocalsList, Type.HashContext32, true); const LocalsStack = std.ArrayListUnmanaged(LocalsMap); -const FormatTypeAsCIdentContext = struct { - ty: Type, - mod: *Module, -}; - const ValueRenderLocation = enum { FunctionArgument, Initializer, @@ -108,26 +108,6 @@ const BuiltinInfo = enum { Bits, }; -fn formatTypeAsCIdentifier( - data: FormatTypeAsCIdentContext, - comptime fmt: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, -) !void { - var stack = std.heap.stackFallback(128, data.mod.gpa); - const allocator = stack.get(); - const str = std.fmt.allocPrint(allocator, "{}", .{data.ty.fmt(data.mod)}) catch ""; - defer allocator.free(str); - return formatIdent(str, fmt, options, writer); -} - -pub fn typeToCIdentifier(ty: Type, mod: *Module) std.fmt.Formatter(formatTypeAsCIdentifier) { - return .{ .data = .{ - .ty = ty, - .mod = mod, - } }; -} - const reserved_idents = std.ComptimeStringMap(void, .{ // C language .{ "alignas", { @@ -283,6 +263,7 @@ pub const Function = struct { next_arg_index: usize = 0, next_block_index: usize = 0, object: Object, + lazy_fns: LazyFnMap, func: *Module.Fn, /// All the locals, to be emitted at the top of the function. locals: std.ArrayListUnmanaged(Local) = .{}, @@ -319,7 +300,7 @@ pub const Function = struct { const gpa = f.object.dg.gpa; try f.allocs.put(gpa, decl_c_value.local, true); try writer.writeAll("static "); - try f.object.dg.renderTypeAndName(writer, ty, decl_c_value, .Const, alignment, .Complete); + try f.object.dg.renderTypeAndName(writer, ty, decl_c_value, .@"const", alignment, .Complete); try writer.writeAll(" = "); try f.object.dg.renderValue(writer, ty, val, .StaticInitializer); try writer.writeAll(";\n "); @@ -353,7 +334,7 @@ pub const Function = struct { } fn allocLocal(f: *Function, inst: Air.Inst.Index, ty: Type) !CValue { - const result = try f.allocAlignedLocal(ty, .Mut, 0); + const result = try f.allocAlignedLocal(ty, .mut, 0); log.debug("%{d}: allocating t{d}", .{ inst, result.local }); return result; } @@ -448,6 +429,29 @@ pub const Function = struct { return f.object.dg.fmtIntLiteral(ty, val); } + fn getTagNameFn(f: *Function, enum_ty: Type) ![]const u8 { + const gpa = f.object.dg.gpa; + const owner_decl = enum_ty.getOwnerDecl(); + + const gop = try f.lazy_fns.getOrPut(gpa, .{ .tag_name = owner_decl }); + if (!gop.found_existing) { + errdefer _ = f.lazy_fns.pop(); + + var promoted = f.object.dg.ctypes.promote(gpa); + defer f.object.dg.ctypes.demote(promoted); + const arena = promoted.arena.allocator(); + + gop.value_ptr.* = .{ + .fn_name = try std.fmt.allocPrint(arena, "zig_tagName_{}__{d}", .{ + fmtIdent(mem.span(f.object.dg.module.declPtr(owner_decl).name)), + @enumToInt(owner_decl), + }), + .data = .{ .tag_name = try enum_ty.copy(arena) }, + }; + } + return gop.value_ptr.fn_name; + } + pub fn deinit(f: *Function) void { const gpa = f.object.dg.gpa; f.allocs.deinit(gpa); @@ -458,11 +462,8 @@ pub const Function = struct { f.free_locals_stack.deinit(gpa); f.blocks.deinit(gpa); f.value_map.deinit(); + f.lazy_fns.deinit(gpa); f.object.code.deinit(); - for (f.object.dg.typedefs.values()) |typedef| { - gpa.free(typedef.rendered); - } - f.object.dg.typedefs.deinit(); f.object.dg.ctypes.deinit(gpa); f.object.dg.fwd_decl.deinit(); f.arena.deinit(); @@ -492,9 +493,6 @@ pub const DeclGen = struct { fwd_decl: std.ArrayList(u8), error_msg: ?*Module.ErrorMsg, ctypes: CType.Store, - /// The key of this map is Type which has references to typedefs_arena. - typedefs: TypedefMap, - typedefs_arena: std.mem.Allocator, fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { @setCold(true); @@ -504,14 +502,6 @@ pub const DeclGen = struct { return error.AnalysisFail; } - fn getTypedefName(dg: *DeclGen, t: Type) ?[]const u8 { - if (dg.typedefs.get(t)) |typedef| { - return typedef.name; - } else { - return null; - } - } - fn renderDeclValue( dg: *DeclGen, writer: anytype, @@ -1493,7 +1483,7 @@ pub const DeclGen = struct { if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; if (index > 0) try w.writeAll(", "); const name = CValue{ .arg = index }; - try dg.renderTypeAndName(w, param_type, name, .ConstArgument, 0, kind); + try dg.renderTypeAndName(w, param_type, name, .@"const", 0, kind); index += 1; } @@ -1507,453 +1497,6 @@ pub const DeclGen = struct { if (fn_info.alignment > 0 and kind == .Forward) try w.print(" zig_align_fn({})", .{fn_info.alignment}); } - fn renderPtrToFnTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - const bw = buffer.writer(); - - const fn_info = t.fnInfo(); - - const target = dg.module.getTarget(); - var ret_buf: LowerFnRetTyBuffer = undefined; - const ret_ty = lowerFnRetTy(fn_info.return_type, &ret_buf, target); - - try bw.writeAll("typedef "); - try dg.renderType(bw, ret_ty, .Forward); - try bw.writeAll(" (*"); - const name_begin = buffer.items.len; - try bw.print("zig_F_{}", .{typeToCIdentifier(t, dg.module)}); - const name_end = buffer.items.len; - try bw.writeAll(")("); - - const param_len = fn_info.param_types.len; - - var params_written: usize = 0; - var index: usize = 0; - while (index < param_len) : (index += 1) { - const param_ty = fn_info.param_types[index]; - if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue; - if (params_written > 0) { - try bw.writeAll(", "); - } - try dg.renderTypeAndName(bw, param_ty, .{ .bytes = "" }, .Mut, 0, .Forward); - params_written += 1; - } - - if (fn_info.is_var_args) { - if (params_written != 0) try bw.writeAll(", "); - try bw.writeAll("..."); - } else if (params_written == 0) { - try dg.renderType(bw, Type.void, .Forward); - } - try bw.writeAll(");\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - const name = rendered[name_begin..name_end]; - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn renderSliceTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - std.debug.assert(t.sentinel() == null); // expected canonical type - - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - const bw = buffer.writer(); - - var ptr_ty_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = t.slicePtrFieldType(&ptr_ty_buf); - const ptr_name = CValue{ .identifier = "ptr" }; - const len_ty = Type.usize; - const len_name = CValue{ .identifier = "len" }; - - try bw.writeAll("typedef struct {\n "); - try dg.renderTypeAndName(bw, ptr_ty, ptr_name, .Mut, 0, .Complete); - try bw.writeAll(";\n "); - try dg.renderTypeAndName(bw, len_ty, len_name, .Mut, 0, .Complete); - - try bw.writeAll(";\n} "); - const name_begin = buffer.items.len; - try bw.print("zig_{c}_{}", .{ - @as(u8, if (t.isConstPtr()) 'L' else 'M'), - typeToCIdentifier(t.childType(), dg.module), - }); - const name_end = buffer.items.len; - try bw.writeAll(";\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - const name = rendered[name_begin..name_end]; - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn renderFwdTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - // The forward declaration for T is stored with a key of *const T. - const child_ty = t.childType(); - - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - const bw = buffer.writer(); - - const tag = switch (child_ty.zigTypeTag()) { - .Struct, .ErrorUnion, .Optional => "struct", - .Union => if (child_ty.unionTagTypeSafety()) |_| "struct" else "union", - else => unreachable, - }; - try bw.writeAll("typedef "); - try bw.writeAll(tag); - const name_begin = buffer.items.len + " ".len; - try bw.writeAll(" zig_"); - switch (child_ty.zigTypeTag()) { - .Struct, .Union => { - var fqn_buf = std.ArrayList(u8).init(dg.typedefs.allocator); - defer fqn_buf.deinit(); - - const owner_decl_index = child_ty.getOwnerDecl(); - const owner_decl = dg.module.declPtr(owner_decl_index); - try owner_decl.renderFullyQualifiedName(dg.module, fqn_buf.writer()); - - try bw.print("S_{}__{d}", .{ fmtIdent(fqn_buf.items), @enumToInt(owner_decl_index) }); - }, - .ErrorUnion => { - try bw.print("E_{}", .{typeToCIdentifier(child_ty.errorUnionPayload(), dg.module)}); - }, - .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - try bw.print("Q_{}", .{typeToCIdentifier(child_ty.optionalChild(&opt_buf), dg.module)}); - }, - else => unreachable, - } - const name_end = buffer.items.len; - try buffer.ensureUnusedCapacity(" ".len + (name_end - name_begin) + ";\n".len); - buffer.appendAssumeCapacity(' '); - buffer.appendSliceAssumeCapacity(buffer.items[name_begin..name_end]); - buffer.appendSliceAssumeCapacity(";\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - const name = rendered[name_begin..name_end]; - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn renderStructTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - var ptr_pl = Type.Payload.ElemType{ .base = .{ .tag = .single_const_pointer }, .data = t }; - const ptr_ty = Type.initPayload(&ptr_pl.base); - const name = dg.getTypedefName(ptr_ty) orelse - try dg.renderFwdTypedef(ptr_ty); - - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - - try buffer.appendSlice("struct "); - - var needs_pack_attr = false; - { - var it = t.structFields().iterator(); - while (it.next()) |field| { - const field_ty = field.value_ptr.ty; - if (!field_ty.hasRuntimeBits()) continue; - const alignment = field.value_ptr.abi_align; - if (alignment != 0 and alignment < field_ty.abiAlignment(dg.module.getTarget())) { - needs_pack_attr = true; - try buffer.appendSlice("zig_packed("); - break; - } - } - } - - try buffer.appendSlice(name); - try buffer.appendSlice(" {\n"); - { - var it = t.structFields().iterator(); - var empty = true; - while (it.next()) |field| { - const field_ty = field.value_ptr.ty; - if (!field_ty.hasRuntimeBits()) continue; - - const alignment = field.value_ptr.alignment(dg.module.getTarget(), t.containerLayout()); - const field_name = CValue{ .identifier = field.key_ptr.* }; - try buffer.append(' '); - try dg.renderTypeAndName(buffer.writer(), field_ty, field_name, .Mut, alignment, .Complete); - try buffer.appendSlice(";\n"); - - empty = false; - } - if (empty) try buffer.appendSlice(" char empty_struct;\n"); - } - if (needs_pack_attr) try buffer.appendSlice("});\n") else try buffer.appendSlice("};\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn renderTupleTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - - try buffer.appendSlice("typedef struct {\n"); - { - const fields = t.tupleFields(); - var field_id: usize = 0; - for (fields.types, 0..) |field_ty, i| { - if (!field_ty.hasRuntimeBits() or fields.values[i].tag() != .unreachable_value) continue; - - try buffer.append(' '); - try dg.renderTypeAndName(buffer.writer(), field_ty, .{ .field = field_id }, .Mut, 0, .Complete); - try buffer.appendSlice(";\n"); - - field_id += 1; - } - if (field_id == 0) try buffer.appendSlice(" char empty_tuple;\n"); - } - const name_begin = buffer.items.len + "} ".len; - try buffer.writer().print("}} zig_T_{}_{d};\n", .{ typeToCIdentifier(t, dg.module), @truncate(u16, t.hash(dg.module)) }); - const name_end = buffer.items.len - ";\n".len; - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - const name = rendered[name_begin..name_end]; - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn renderUnionTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - var ptr_pl = Type.Payload.ElemType{ .base = .{ .tag = .single_const_pointer }, .data = t }; - const ptr_ty = Type.initPayload(&ptr_pl.base); - const name = dg.getTypedefName(ptr_ty) orelse - try dg.renderFwdTypedef(ptr_ty); - - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - - try buffer.appendSlice(if (t.unionTagTypeSafety()) |_| "struct " else "union "); - try buffer.appendSlice(name); - try buffer.appendSlice(" {\n"); - - const indent = if (t.unionTagTypeSafety()) |tag_ty| indent: { - const target = dg.module.getTarget(); - const layout = t.unionGetLayout(target); - if (layout.tag_size != 0) { - try buffer.append(' '); - try dg.renderTypeAndName(buffer.writer(), tag_ty, .{ .identifier = "tag" }, .Mut, 0, .Complete); - try buffer.appendSlice(";\n"); - } - try buffer.appendSlice(" union {\n"); - break :indent " "; - } else " "; - - { - var it = t.unionFields().iterator(); - var empty = true; - while (it.next()) |field| { - const field_ty = field.value_ptr.ty; - if (!field_ty.hasRuntimeBits()) continue; - - const alignment = field.value_ptr.abi_align; - const field_name = CValue{ .identifier = field.key_ptr.* }; - try buffer.appendSlice(indent); - try dg.renderTypeAndName(buffer.writer(), field_ty, field_name, .Mut, alignment, .Complete); - try buffer.appendSlice(";\n"); - - empty = false; - } - if (empty) { - try buffer.appendSlice(indent); - try buffer.appendSlice("char empty_union;\n"); - } - } - - if (t.unionTagTypeSafety()) |_| try buffer.appendSlice(" } payload;\n"); - try buffer.appendSlice("};\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn renderErrorUnionTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - assert(t.errorUnionSet().tag() == .anyerror); - - var ptr_pl = Type.Payload.ElemType{ .base = .{ .tag = .single_const_pointer }, .data = t }; - const ptr_ty = Type.initPayload(&ptr_pl.base); - const name = dg.getTypedefName(ptr_ty) orelse - try dg.renderFwdTypedef(ptr_ty); - - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - const bw = buffer.writer(); - - const payload_ty = t.errorUnionPayload(); - const payload_name = CValue{ .identifier = "payload" }; - const error_ty = t.errorUnionSet(); - const error_name = CValue{ .identifier = "error" }; - - const target = dg.module.getTarget(); - const payload_align = payload_ty.abiAlignment(target); - const error_align = error_ty.abiAlignment(target); - try bw.writeAll("struct "); - try bw.writeAll(name); - try bw.writeAll(" {\n "); - if (error_align > payload_align) { - try dg.renderTypeAndName(bw, payload_ty, payload_name, .Mut, 0, .Complete); - try bw.writeAll(";\n "); - try dg.renderTypeAndName(bw, error_ty, error_name, .Mut, 0, .Complete); - } else { - try dg.renderTypeAndName(bw, error_ty, error_name, .Mut, 0, .Complete); - try bw.writeAll(";\n "); - try dg.renderTypeAndName(bw, payload_ty, payload_name, .Mut, 0, .Complete); - } - try bw.writeAll(";\n};\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn renderArrayTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - const info = t.arrayInfo(); - std.debug.assert(info.sentinel == null); // expected canonical type - - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - const bw = buffer.writer(); - - try bw.writeAll("typedef "); - try dg.renderType(bw, info.elem_type, .Complete); - - const name_begin = buffer.items.len + " ".len; - try bw.print(" zig_A_{}_{d}", .{ typeToCIdentifier(info.elem_type, dg.module), info.len }); - const name_end = buffer.items.len; - - const c_len = if (info.len > 0) info.len else 1; - var c_len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = c_len }; - const c_len_val = Value.initPayload(&c_len_pl.base); - try bw.print("[{}];\n", .{try dg.fmtIntLiteral(Type.usize, c_len_val)}); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - const name = rendered[name_begin..name_end]; - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn renderOptionalTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - var ptr_pl = Type.Payload.ElemType{ .base = .{ .tag = .single_const_pointer }, .data = t }; - const ptr_ty = Type.initPayload(&ptr_pl.base); - const name = dg.getTypedefName(ptr_ty) orelse - try dg.renderFwdTypedef(ptr_ty); - - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - const bw = buffer.writer(); - - var opt_buf: Type.Payload.ElemType = undefined; - const child_ty = t.optionalChild(&opt_buf); - - try bw.writeAll("struct "); - try bw.writeAll(name); - try bw.writeAll(" {\n"); - try dg.renderTypeAndName(bw, child_ty, .{ .identifier = "payload" }, .Mut, 0, .Complete); - try bw.writeAll(";\n "); - try dg.renderTypeAndName(bw, Type.bool, .{ .identifier = "is_null" }, .Mut, 0, .Complete); - try bw.writeAll(";\n};\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn renderOpaqueTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - const opaque_ty = t.cast(Type.Payload.Opaque).?.data; - const unqualified_name = dg.module.declPtr(opaque_ty.owner_decl).name; - const fqn = try opaque_ty.getFullyQualifiedName(dg.module); - defer dg.typedefs.allocator.free(fqn); - - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - - try buffer.writer().print("typedef struct { } ", .{fmtIdent(std.mem.span(unqualified_name))}); - - const name_begin = buffer.items.len; - try buffer.writer().print("zig_O_{}", .{fmtIdent(fqn)}); - const name_end = buffer.items.len; - try buffer.appendSlice(";\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - const name = rendered[name_begin..name_end]; - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - fn indexToCType(dg: *DeclGen, idx: CType.Index) CType { return dg.ctypes.indexToCType(idx); } @@ -2408,31 +1951,27 @@ pub const DeclGen = struct { const idx = try dg.typeToIndex(ty); try w.print("{}", .{try dg.renderTypePrefix(w, idx, .suffix, CQualifiers.init(.{ .@"const" = switch (mutability) { - .Const, .ConstArgument => true, - .Mut => false, + .mut => false, + .@"const" => true, }, }))}); try dg.writeCValue(w, name); try dg.renderTypeSuffix(w, idx, .suffix); } - fn renderTagNameFn(dg: *DeclGen, enum_ty: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - const bw = buffer.writer(); - + fn renderTagNameFn(dg: *DeclGen, w: anytype, fn_name: []const u8, enum_ty: Type) !void { const name_slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - try buffer.appendSlice("static "); - try dg.renderType(bw, name_slice_ty, .Complete); - const name_begin = buffer.items.len + " ".len; - try bw.print(" zig_tagName_{}_{d}(", .{ typeToCIdentifier(enum_ty, dg.module), @enumToInt(enum_ty.getOwnerDecl()) }); - const name_end = buffer.items.len - "(".len; - try dg.renderTypeAndName(bw, enum_ty, .{ .identifier = "tag" }, .Const, 0, .Complete); - try buffer.appendSlice(") {\n switch (tag) {\n"); + try w.writeAll("static "); + try dg.renderType(w, name_slice_ty, .Complete); + try w.writeByte(' '); + try w.writeAll(fn_name); + try w.writeByte('('); + try dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, .@"const", 0, .Complete); + try w.writeAll(") {\n switch (tag) {\n"); for (enum_ty.enumFields().keys(), 0..) |name, index| { - const name_z = try dg.typedefs.allocator.dupeZ(u8, name); - defer dg.typedefs.allocator.free(name_z); + const name_z = try dg.gpa.dupeZ(u8, name); + defer dg.gpa.free(name_z); const name_bytes = name_z[0 .. name_z.len + 1]; var tag_pl: Value.Payload.U32 = .{ @@ -2453,40 +1992,23 @@ pub const DeclGen = struct { var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; const len_val = Value.initPayload(&len_pl.base); - try bw.print(" case {}: {{\n static ", .{try dg.fmtIntLiteral(enum_ty, int_val)}); - try dg.renderTypeAndName(bw, name_ty, .{ .identifier = "name" }, .Const, 0, .Complete); - try buffer.appendSlice(" = "); - try dg.renderValue(bw, name_ty, name_val, .Initializer); - try buffer.appendSlice(";\n return ("); - try dg.renderTypecast(bw, name_slice_ty); - try bw.print("){{{}, {}}};\n", .{ + try w.print(" case {}: {{\n static ", .{try dg.fmtIntLiteral(enum_ty, int_val)}); + try dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, .@"const", 0, .Complete); + try w.writeAll(" = "); + try dg.renderValue(w, name_ty, name_val, .Initializer); + try w.writeAll(";\n return ("); + try dg.renderTypecast(w, name_slice_ty); + try w.print("){{{}, {}}};\n", .{ fmtIdent("name"), try dg.fmtIntLiteral(Type.usize, len_val), }); - try buffer.appendSlice(" }\n"); + try w.writeAll(" }\n"); } - try buffer.appendSlice(" }\n while ("); - try dg.renderValue(bw, Type.bool, Value.true, .Other); - try buffer.appendSlice(") "); - _ = try airBreakpoint(bw); - try buffer.appendSlice("}\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - const name = rendered[name_begin..name_end]; - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try enum_ty.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn getTagNameFn(dg: *DeclGen, enum_ty: Type) ![]const u8 { - return dg.getTypedefName(enum_ty) orelse - try dg.renderTagNameFn(enum_ty); + try w.writeAll(" }\n while ("); + try dg.renderValue(w, Type.bool, Value.true, .Other); + try w.writeAll(") "); + _ = try airBreakpoint(w); + try w.writeAll("}\n"); } fn declIsGlobal(dg: *DeclGen, tv: TypedValue) bool { @@ -2724,7 +2246,7 @@ pub fn genErrDecls(o: *Object) !void { const name_val = Value.initPayload(&name_pl.base); try writer.writeAll("static "); - try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, .Const, 0, .Complete); + try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, .@"const", 0, .Complete); try writer.writeAll(" = "); try o.dg.renderValue(writer, name_ty, name_val, .StaticInitializer); try writer.writeAll(";\n"); @@ -2737,7 +2259,7 @@ pub fn genErrDecls(o: *Object) !void { const name_array_ty = Type.initPayload(&name_array_ty_pl.base); try writer.writeAll("static "); - try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = name_prefix }, .Const, 0, .Complete); + try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = name_prefix }, .@"const", 0, .Complete); try writer.writeAll(" = {"); for (o.dg.module.error_name_list.items, 0..) |name, value| { if (value != 0) try writer.writeByte(','); @@ -2767,6 +2289,17 @@ fn genExports(o: *Object) !void { }; } +pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { + const writer = o.writer(); + switch (lazy_fn.key_ptr.*) { + .tag_name => _ = try o.dg.renderTagNameFn( + writer, + lazy_fn.value_ptr.fn_name, + lazy_fn.value_ptr.data.tag_name, + ), + } +} + pub fn genFunc(f: *Function) !void { const tracy = trace(@src()); defer tracy.end(); @@ -2845,7 +2378,7 @@ pub fn genFunc(f: *Function) !void { w, local.ty, .{ .local = local_index }, - .Mut, + .mut, local.alignment, .Complete, ); @@ -2886,7 +2419,7 @@ pub fn genDecl(o: *Object) !void { try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static "); if (variable.is_threadlocal) try fwd_decl_writer.writeAll("zig_threadlocal "); - try o.dg.renderTypeAndName(fwd_decl_writer, o.dg.decl.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete); + try o.dg.renderTypeAndName(fwd_decl_writer, o.dg.decl.ty, decl_c_value, .mut, o.dg.decl.@"align", .Complete); try fwd_decl_writer.writeAll(";\n"); try genExports(o); @@ -2896,7 +2429,7 @@ pub fn genDecl(o: *Object) !void { if (!is_global) try w.writeAll("static "); if (variable.is_threadlocal) try w.writeAll("zig_threadlocal "); if (o.dg.decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); - try o.dg.renderTypeAndName(w, o.dg.decl.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete); + try o.dg.renderTypeAndName(w, o.dg.decl.ty, decl_c_value, .mut, o.dg.decl.@"align", .Complete); if (o.dg.decl.@"linksection" != null) try w.writeAll(", read, write)"); try w.writeAll(" = "); try o.dg.renderValue(w, tv.ty, variable.init, .StaticInitializer); @@ -2908,13 +2441,13 @@ pub fn genDecl(o: *Object) !void { const decl_c_value: CValue = .{ .decl = o.dg.decl_index }; try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static "); - try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, .Const, o.dg.decl.@"align", .Complete); + try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, .@"const", o.dg.decl.@"align", .Complete); try fwd_decl_writer.writeAll(";\n"); const w = o.writer(); if (!is_global) try w.writeAll("static "); if (o.dg.decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); - try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .Const, o.dg.decl.@"align", .Complete); + try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .@"const", o.dg.decl.@"align", .Complete); if (o.dg.decl.@"linksection" != null) try w.writeAll(", read)"); try w.writeAll(" = "); try o.dg.renderValue(w, tv.ty, tv.val, .StaticInitializer); @@ -3443,7 +2976,7 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { return CValue{ .undef = inst_ty }; } - const mutability: Mutability = if (inst_ty.isConstPtr()) .Const else .Mut; + const mutability: Mutability = if (inst_ty.isConstPtr()) .@"const" else .mut; const target = f.object.dg.module.getTarget(); const local = try f.allocAlignedLocal(elem_type, mutability, inst_ty.ptrAlignment(target)); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.local }); @@ -3460,7 +2993,7 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { return CValue{ .undef = inst_ty }; } - const mutability: Mutability = if (inst_ty.isConstPtr()) .Const else .Mut; + const mutability: Mutability = if (inst_ty.isConstPtr()) .@"const" else .mut; const target = f.object.dg.module.getTarget(); const local = try f.allocAlignedLocal(elem_ty, mutability, inst_ty.ptrAlignment(target)); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.local }); @@ -4937,7 +4470,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { writer, output_ty, local_value, - .Mut, + .mut, alignment, .Complete, ); @@ -4976,7 +4509,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { writer, input_ty, local_value, - .Const, + .@"const", alignment, .Complete, ); @@ -6474,7 +6007,7 @@ fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); - try writer.print(" = {s}(", .{try f.object.dg.getTagNameFn(enum_ty)}); + try writer.print(" = {s}(", .{try f.getTagNameFn(enum_ty)}); try f.writeCValue(writer, operand, .Other); try writer.writeAll(");\n"); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 601c15abee..ad482024b7 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -290,11 +290,11 @@ pub const CType = extern union { } }; - const Promoted = struct { + pub const Promoted = struct { arena: std.heap.ArenaAllocator, set: Set, - fn gpa(self: *Promoted) Allocator { + pub fn gpa(self: *Promoted) Allocator { return self.arena.child_allocator; } @@ -345,11 +345,11 @@ pub const CType = extern union { } }; - fn promote(self: Store, gpa: Allocator) Promoted { + pub fn promote(self: Store, gpa: Allocator) Promoted { return .{ .arena = self.arena.promote(gpa), .set = self.set }; } - fn demote(self: *Store, promoted: Promoted) void { + pub fn demote(self: *Store, promoted: Promoted) void { self.arena = promoted.arena.state; self.set = promoted.set; } @@ -382,17 +382,17 @@ pub const CType = extern union { _ = promoted.arena.reset(.retain_capacity); } - pub fn shrinkToFit(self: *Store, gpa: Allocator) void { - self.map.shrinkAndFree(gpa, self.map.entries.len); - } - - pub fn shrinkAndFree(self: *Store, gpa: Allocator) void { + pub fn clearAndFree(self: *Store, gpa: Allocator) void { var promoted = self.promote(gpa); defer self.demote(promoted); promoted.set.map.clearAndFree(gpa); _ = promoted.arena.reset(.free_all); } + pub fn shrinkToFit(self: *Store, gpa: Allocator) void { + self.set.map.shrinkAndFree(gpa, self.set.map.count()); + } + pub fn move(self: *Store) Store { const moved = self.*; self.* = .{}; @@ -1252,8 +1252,8 @@ pub const CType = extern union { pub const HashContext64 = struct { store: *const Store.Set, - pub fn hash(_: @This(), cty: CType) u64 { - return cty.hash(); + pub fn hash(self: @This(), cty: CType) u64 { + return cty.hash(self.store.*); } pub fn eql(_: @This(), lhs: CType, rhs: CType) bool { return lhs.eql(rhs); diff --git a/src/link/C.zig b/src/link/C.zig index 7fb23b2642..8eb6fe16af 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -22,26 +22,19 @@ base: link.File, /// Instead, it tracks all declarations in this table, and iterates over it /// in the flush function, stitching pre-rendered pieces of C code together. decl_table: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, DeclBlock) = .{}, -/// Stores Type/Value data for `typedefs` to reference. -/// Accumulates allocations and then there is a periodic garbage collection after flush(). -arena: std.heap.ArenaAllocator, /// Per-declaration data. const DeclBlock = struct { code: std.ArrayListUnmanaged(u8) = .{}, fwd_decl: std.ArrayListUnmanaged(u8) = .{}, + /// Each `Decl` stores a set of used `CType`s. In `flush()`, we iterate + /// over each `Decl` and generate the definition for each used `CType` once. ctypes: codegen.CType.Store = .{}, - /// Each Decl stores a mapping of Zig Types to corresponding C types, for every - /// Zig Type used by the Decl. In flush(), we iterate over each Decl - /// and emit the typedef code for all types, making sure to not emit the same thing twice. - /// Any arena memory the Type points to lives in the `arena` field of `C`. - typedefs: codegen.TypedefMap.Unmanaged = .{}, + /// Key and Value storage use the ctype arena. + lazy_fns: codegen.LazyFnMap = .{}, fn deinit(db: *DeclBlock, gpa: Allocator) void { - for (db.typedefs.values()) |typedef| { - gpa.free(typedef.rendered); - } - db.typedefs.deinit(gpa); + db.lazy_fns.deinit(gpa); db.ctypes.deinit(gpa); db.fwd_decl.deinit(gpa); db.code.deinit(gpa); @@ -66,7 +59,6 @@ pub fn openPath(gpa: Allocator, sub_path: []const u8, options: link.Options) !*C errdefer gpa.destroy(c_file); c_file.* = C{ - .arena = std.heap.ArenaAllocator.init(gpa), .base = .{ .tag = .c, .options = options, @@ -85,8 +77,6 @@ pub fn deinit(self: *C) void { db.deinit(gpa); } self.decl_table.deinit(gpa); - - self.arena.deinit(); } pub fn freeDecl(self: *C, decl_index: Module.Decl.Index) void { @@ -101,44 +91,42 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes const tracy = trace(@src()); defer tracy.end(); + const gpa = self.base.allocator; + const decl_index = func.owner_decl; - const gop = try self.decl_table.getOrPut(self.base.allocator, decl_index); + const gop = try self.decl_table.getOrPut(gpa, decl_index); if (!gop.found_existing) { gop.value_ptr.* = .{}; } - const fwd_decl = &gop.value_ptr.fwd_decl; const ctypes = &gop.value_ptr.ctypes; - const typedefs = &gop.value_ptr.typedefs; + const lazy_fns = &gop.value_ptr.lazy_fns; + const fwd_decl = &gop.value_ptr.fwd_decl; const code = &gop.value_ptr.code; + ctypes.clearRetainingCapacity(gpa); + lazy_fns.clearRetainingCapacity(); fwd_decl.shrinkRetainingCapacity(0); - ctypes.clearRetainingCapacity(module.gpa); - for (typedefs.values()) |typedef| { - module.gpa.free(typedef.rendered); - } - typedefs.clearRetainingCapacity(); code.shrinkRetainingCapacity(0); var function: codegen.Function = .{ - .value_map = codegen.CValueMap.init(module.gpa), + .value_map = codegen.CValueMap.init(gpa), .air = air, .liveness = liveness, .func = func, .object = .{ .dg = .{ - .gpa = module.gpa, + .gpa = gpa, .module = module, .error_msg = null, .decl_index = decl_index, .decl = module.declPtr(decl_index), - .fwd_decl = fwd_decl.toManaged(module.gpa), + .fwd_decl = fwd_decl.toManaged(gpa), .ctypes = ctypes.*, - .typedefs = typedefs.promoteContext(module.gpa, .{ .mod = module }), - .typedefs_arena = self.arena.allocator(), }, - .code = code.toManaged(module.gpa), + .code = code.toManaged(gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code }, - .arena = std.heap.ArenaAllocator.init(module.gpa), + .lazy_fns = lazy_fns.*, + .arena = std.heap.ArenaAllocator.init(gpa), }; function.object.indent_writer = .{ .underlying_writer = function.object.code.writer() }; @@ -146,91 +134,79 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes codegen.genFunc(&function) catch |err| switch (err) { error.AnalysisFail => { - try module.failed_decls.put(module.gpa, decl_index, function.object.dg.error_msg.?); + try module.failed_decls.put(gpa, decl_index, function.object.dg.error_msg.?); return; }, else => |e| return e, }; - fwd_decl.* = function.object.dg.fwd_decl.moveToUnmanaged(); ctypes.* = function.object.dg.ctypes.move(); - typedefs.* = function.object.dg.typedefs.unmanaged; - function.object.dg.typedefs.unmanaged = .{}; + lazy_fns.* = function.lazy_fns.move(); + fwd_decl.* = function.object.dg.fwd_decl.moveToUnmanaged(); code.* = function.object.code.moveToUnmanaged(); // Free excess allocated memory for this Decl. - fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len); - code.shrinkAndFree(module.gpa, code.items.len); - ctypes.shrinkAndFree(module.gpa); + ctypes.shrinkToFit(gpa); + lazy_fns.shrinkAndFree(gpa, lazy_fns.count()); + fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len); + code.shrinkAndFree(gpa, code.items.len); } pub fn updateDecl(self: *C, module: *Module, decl_index: Module.Decl.Index) !void { const tracy = trace(@src()); defer tracy.end(); - const gop = try self.decl_table.getOrPut(self.base.allocator, decl_index); + const gpa = self.base.allocator; + + const gop = try self.decl_table.getOrPut(gpa, decl_index); if (!gop.found_existing) { gop.value_ptr.* = .{}; } - const fwd_decl = &gop.value_ptr.fwd_decl; const ctypes = &gop.value_ptr.ctypes; - const typedefs = &gop.value_ptr.typedefs; + const fwd_decl = &gop.value_ptr.fwd_decl; const code = &gop.value_ptr.code; + ctypes.clearRetainingCapacity(gpa); fwd_decl.shrinkRetainingCapacity(0); - ctypes.clearRetainingCapacity(module.gpa); - for (typedefs.values()) |value| { - module.gpa.free(value.rendered); - } - typedefs.clearRetainingCapacity(); code.shrinkRetainingCapacity(0); const decl = module.declPtr(decl_index); var object: codegen.Object = .{ .dg = .{ - .gpa = module.gpa, + .gpa = gpa, .module = module, .error_msg = null, .decl_index = decl_index, .decl = decl, - .fwd_decl = fwd_decl.toManaged(module.gpa), + .fwd_decl = fwd_decl.toManaged(gpa), .ctypes = ctypes.*, - .typedefs = typedefs.promoteContext(module.gpa, .{ .mod = module }), - .typedefs_arena = self.arena.allocator(), }, - .code = code.toManaged(module.gpa), + .code = code.toManaged(gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code }; object.indent_writer = .{ .underlying_writer = object.code.writer() }; defer { object.code.deinit(); - for (object.dg.typedefs.values()) |typedef| { - module.gpa.free(typedef.rendered); - } - object.dg.typedefs.deinit(); object.dg.ctypes.deinit(object.dg.gpa); object.dg.fwd_decl.deinit(); } codegen.genDecl(&object) catch |err| switch (err) { error.AnalysisFail => { - try module.failed_decls.put(module.gpa, decl_index, object.dg.error_msg.?); + try module.failed_decls.put(gpa, decl_index, object.dg.error_msg.?); return; }, else => |e| return e, }; + ctypes.* = object.dg.ctypes.move(); fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); - ctypes.* = object.dg.ctypes; - object.dg.ctypes = .{}; - typedefs.* = object.dg.typedefs.unmanaged; - object.dg.typedefs.unmanaged = .{}; code.* = object.code.moveToUnmanaged(); // Free excess allocated memory for this Decl. - fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len); - code.shrinkAndFree(module.gpa, code.items.len); - ctypes.shrinkAndFree(module.gpa); + ctypes.shrinkToFit(gpa); + fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len); + code.shrinkAndFree(gpa, code.items.len); } pub fn updateDeclLineNumber(self: *C, module: *Module, decl_index: Module.Decl.Index) !void { @@ -260,7 +236,7 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) sub_prog_node.activate(); defer sub_prog_node.end(); - const gpa = comp.gpa; + const gpa = self.base.allocator; const module = self.base.options.module.?; // This code path happens exclusively with -ofmt=c. The flush logic for @@ -271,19 +247,17 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) const abi_define = abiDefine(comp); - // Covers defines, zig.h, typedef, and asm. - var buf_count: usize = 2; - if (abi_define != null) buf_count += 1; - try f.all_buffers.ensureUnusedCapacity(gpa, buf_count); + // Covers defines, zig.h, ctypes, asm. + try f.all_buffers.ensureUnusedCapacity(gpa, 4); if (abi_define) |buf| f.appendBufAssumeCapacity(buf); f.appendBufAssumeCapacity(zig_h); - const typedef_index = f.all_buffers.items.len; + const ctypes_index = f.all_buffers.items.len; f.all_buffers.items.len += 1; { - var asm_buf = f.asm_buf.toManaged(module.gpa); + var asm_buf = f.asm_buf.toManaged(gpa); defer asm_buf.deinit(); try codegen.genGlobalAsm(module, &asm_buf); @@ -294,7 +268,7 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) try self.flushErrDecls(&f); - // Typedefs, forward decls, and non-functions first. + // `CType`s, forward decls, and non-functions first. // Unlike other backends, the .c code we are emitting is order-dependent. Therefore // we must traverse the set of Decls that we are emitting according to their dependencies. // Our strategy is to populate a set of remaining decls, pop Decls one by one, @@ -321,11 +295,11 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) } } - f.all_buffers.items[typedef_index] = .{ - .iov_base = if (f.typedef_buf.items.len > 0) f.typedef_buf.items.ptr else "", - .iov_len = f.typedef_buf.items.len, + f.all_buffers.items[ctypes_index] = .{ + .iov_base = if (f.ctypes_buf.items.len > 0) f.ctypes_buf.items.ptr else "", + .iov_len = f.ctypes_buf.items.len, }; - f.file_size += f.typedef_buf.items.len; + f.file_size += f.ctypes_buf.items.len; // Now the code. try f.all_buffers.ensureUnusedCapacity(gpa, decl_values.len); @@ -338,31 +312,23 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) } const Flush = struct { - err_decls: DeclBlock = .{}, remaining_decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, void) = .{}, - ctypes: CTypes = .{}, - typedefs: Typedefs = .{}, - typedef_buf: std.ArrayListUnmanaged(u8) = .{}, + ctypes: codegen.CType.Store = .{}, + ctypes_map: std.ArrayListUnmanaged(codegen.CType.Index) = .{}, + ctypes_buf: std.ArrayListUnmanaged(u8) = .{}, + + err_decls: DeclBlock = .{}, + + lazy_fns: LazyFns = .{}, + asm_buf: std.ArrayListUnmanaged(u8) = .{}, /// We collect a list of buffers to write, and write them all at once with pwritev 😎 all_buffers: std.ArrayListUnmanaged(std.os.iovec_const) = .{}, /// Keeps track of the total bytes of `all_buffers`. file_size: u64 = 0, - const CTypes = std.ArrayHashMapUnmanaged( - codegen.CType, - void, - codegen.CType.HashContext32, - true, - ); - - const Typedefs = std.HashMapUnmanaged( - Type, - void, - Type.HashContext64, - std.hash_map.default_max_load_percentage, - ); + const LazyFns = std.AutoHashMapUnmanaged(codegen.LazyFnKey, DeclBlock); fn appendBufAssumeCapacity(f: *Flush, buf: []const u8) void { if (buf.len == 0) return; @@ -372,11 +338,14 @@ const Flush = struct { fn deinit(f: *Flush, gpa: Allocator) void { f.all_buffers.deinit(gpa); - f.typedef_buf.deinit(gpa); - f.typedefs.deinit(gpa); + var lazy_fns_it = f.lazy_fns.valueIterator(); + while (lazy_fns_it.next()) |db| db.deinit(gpa); + f.lazy_fns.deinit(gpa); + f.err_decls.deinit(gpa); + f.ctypes_buf.deinit(gpa); + f.ctypes_map.deinit(gpa); f.ctypes.deinit(gpa); f.remaining_decls.deinit(gpa); - f.err_decls.deinit(gpa); } }; @@ -384,56 +353,36 @@ const FlushDeclError = error{ OutOfMemory, }; -fn flushTypedefs(self: *C, f: *Flush, typedefs: codegen.TypedefMap.Unmanaged) FlushDeclError!void { - if (typedefs.count() == 0) return; - const gpa = self.base.allocator; - const module = self.base.options.module.?; - - try f.typedefs.ensureUnusedCapacityContext(gpa, @intCast(u32, typedefs.count()), .{ - .mod = module, - }); - var it = typedefs.iterator(); - while (it.next()) |new| { - const gop = f.typedefs.getOrPutAssumeCapacityContext(new.key_ptr.*, .{ - .mod = module, - }); - if (!gop.found_existing) { - try f.typedef_buf.appendSlice(gpa, new.value_ptr.rendered); - } - } +fn flushCTypes(self: *C, f: *Flush, ctypes: codegen.CType.Store) FlushDeclError!void { + _ = self; + _ = f; + _ = ctypes; } fn flushErrDecls(self: *C, f: *Flush) FlushDeclError!void { - const module = self.base.options.module.?; + const gpa = self.base.allocator; const fwd_decl = &f.err_decls.fwd_decl; const ctypes = &f.err_decls.ctypes; - const typedefs = &f.err_decls.typedefs; const code = &f.err_decls.code; var object = codegen.Object{ .dg = .{ - .gpa = module.gpa, - .module = module, + .gpa = gpa, + .module = self.base.options.module.?, .error_msg = null, .decl_index = undefined, .decl = undefined, - .fwd_decl = fwd_decl.toManaged(module.gpa), + .fwd_decl = fwd_decl.toManaged(gpa), .ctypes = ctypes.*, - .typedefs = typedefs.promoteContext(module.gpa, .{ .mod = module }), - .typedefs_arena = self.arena.allocator(), }, - .code = code.toManaged(module.gpa), + .code = code.toManaged(gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code }; object.indent_writer = .{ .underlying_writer = object.code.writer() }; defer { object.code.deinit(); - object.dg.ctypes.deinit(module.gpa); - for (object.dg.typedefs.values()) |typedef| { - module.gpa.free(typedef.rendered); - } - object.dg.typedefs.deinit(); + object.dg.ctypes.deinit(gpa); object.dg.fwd_decl.deinit(); } @@ -443,16 +392,75 @@ fn flushErrDecls(self: *C, f: *Flush) FlushDeclError!void { }; fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); - typedefs.* = object.dg.typedefs.unmanaged; - object.dg.typedefs.unmanaged = .{}; + ctypes.* = object.dg.ctypes.move(); + code.* = object.code.moveToUnmanaged(); + + try self.flushCTypes(f, ctypes.*); + try f.all_buffers.ensureUnusedCapacity(gpa, 2); + f.appendBufAssumeCapacity(fwd_decl.items); + f.appendBufAssumeCapacity(code.items); +} + +fn flushLazyFn( + self: *C, + f: *Flush, + db: *DeclBlock, + lazy_fn: codegen.LazyFnMap.Entry, +) FlushDeclError!void { + const gpa = self.base.allocator; + + const fwd_decl = &db.fwd_decl; + const ctypes = &db.ctypes; + const code = &db.code; + + var object = codegen.Object{ + .dg = .{ + .gpa = gpa, + .module = self.base.options.module.?, + .error_msg = null, + .decl_index = undefined, + .decl = undefined, + .fwd_decl = fwd_decl.toManaged(gpa), + .ctypes = ctypes.*, + }, + .code = code.toManaged(gpa), + .indent_writer = undefined, // set later so we can get a pointer to object.code + }; + object.indent_writer = .{ .underlying_writer = object.code.writer() }; + defer { + object.code.deinit(); + object.dg.ctypes.deinit(gpa); + object.dg.fwd_decl.deinit(); + } + + codegen.genLazyFn(&object, lazy_fn) catch |err| switch (err) { + error.AnalysisFail => unreachable, + else => |e| return e, + }; + + fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); + ctypes.* = object.dg.ctypes.move(); code.* = object.code.moveToUnmanaged(); - try self.flushTypedefs(f, typedefs.*); - try f.all_buffers.ensureUnusedCapacity(self.base.allocator, 1); + try self.flushCTypes(f, ctypes.*); + try f.all_buffers.ensureUnusedCapacity(gpa, 2); f.appendBufAssumeCapacity(fwd_decl.items); f.appendBufAssumeCapacity(code.items); } +fn flushLazyFns(self: *C, f: *Flush, lazy_fns: codegen.LazyFnMap) FlushDeclError!void { + const gpa = self.base.allocator; + try f.lazy_fns.ensureUnusedCapacity(gpa, @intCast(Flush.LazyFns.Size, lazy_fns.count())); + + var it = lazy_fns.iterator(); + while (it.next()) |entry| { + const gop = f.lazy_fns.getOrPutAssumeCapacity(entry.key_ptr.*); + if (gop.found_existing) continue; + gop.value_ptr.* = .{}; + try self.flushLazyFn(f, gop.value_ptr, entry); + } +} + /// Assumes `decl` was in the `remaining_decls` set, and has already been removed. fn flushDecl( self: *C, @@ -460,8 +468,8 @@ fn flushDecl( decl_index: Module.Decl.Index, export_names: std.StringHashMapUnmanaged(void), ) FlushDeclError!void { - const module = self.base.options.module.?; - const decl = module.declPtr(decl_index); + const gpa = self.base.allocator; + const decl = self.base.options.module.?.declPtr(decl_index); // Before flushing any particular Decl we must ensure its // dependencies are already flushed, so that the order in the .c // file comes out correctly. @@ -472,10 +480,10 @@ fn flushDecl( } const decl_block = self.decl_table.getPtr(decl_index).?; - const gpa = self.base.allocator; - try self.flushTypedefs(f, decl_block.typedefs); - try f.all_buffers.ensureUnusedCapacity(gpa, 2); + try self.flushCTypes(f, decl_block.ctypes); + try self.flushLazyFns(f, decl_block.lazy_fns); + try f.all_buffers.ensureUnusedCapacity(gpa, 1); if (!(decl.isExtern() and export_names.contains(mem.span(decl.name)))) f.appendBufAssumeCapacity(decl_block.fwd_decl.items); } -- cgit v1.2.3 From 064b355912dd85bd06ee87101066ed0db2783796 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 20 Feb 2023 20:50:19 -0500 Subject: CBE: use CType for type definitions --- src/Compilation.zig | 2 +- src/codegen/c.zig | 1219 ++++++++++++++++++++++++++++-------------------- src/codegen/c/type.zig | 972 +++++++++++++++++++++++++++----------- src/link/C.zig | 187 ++++++-- 4 files changed, 1568 insertions(+), 812 deletions(-) (limited to 'src/codegen') diff --git a/src/Compilation.zig b/src/Compilation.zig index 9359d24dc3..aea1876747 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3273,7 +3273,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void { .gpa = gpa, .module = module, .error_msg = null, - .decl_index = decl_index, + .decl_index = decl_index.toOptional(), .decl = decl, .fwd_decl = fwd_decl.toManaged(gpa), .ctypes = .{}, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index cf4c7ec21b..35c826e2d1 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -31,6 +31,7 @@ pub const CType = @import("c/type.zig").CType; pub const CValue = union(enum) { none: void, + new_local: LocalIndex, local: LocalIndex, /// Address of a local. local_ref: LocalIndex, @@ -38,6 +39,8 @@ pub const CValue = union(enum) { constant: Air.Inst.Ref, /// Index into the parameters arg: usize, + /// The payload field of a parameter + arg_array: usize, /// Index into a tuple's fields field: usize, /// By-value @@ -298,7 +301,7 @@ pub const Function = struct { const alignment = 0; const decl_c_value = try f.allocLocalValue(ty, alignment); const gpa = f.object.dg.gpa; - try f.allocs.put(gpa, decl_c_value.local, true); + try f.allocs.put(gpa, decl_c_value.new_local, true); try writer.writeAll("static "); try f.object.dg.renderTypeAndName(writer, ty, decl_c_value, .@"const", alignment, .Complete); try writer.writeAll(" = "); @@ -330,12 +333,12 @@ pub const Function = struct { .alignment = alignment, .loop_depth = @intCast(LoopDepth, f.free_locals_stack.items.len - 1), }); - return CValue{ .local = @intCast(LocalIndex, f.locals.items.len - 1) }; + return CValue{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) }; } fn allocLocal(f: *Function, inst: Air.Inst.Index, ty: Type) !CValue { const result = try f.allocAlignedLocal(ty, .mut, 0); - log.debug("%{d}: allocating t{d}", .{ inst, result.local }); + log.debug("%{d}: allocating t{d}", .{ inst, result.new_local }); return result; } @@ -349,7 +352,7 @@ pub const Function = struct { if (local.alignment >= alignment) { local.loop_depth = @intCast(LoopDepth, f.free_locals_stack.items.len - 1); _ = locals_list.swapRemove(i); - return CValue{ .local = local_index }; + return CValue{ .new_local = local_index }; } } } @@ -488,8 +491,8 @@ pub const Object = struct { pub const DeclGen = struct { gpa: std.mem.Allocator, module: *Module, - decl: *Decl, - decl_index: Decl.Index, + decl: ?*Decl, + decl_index: Decl.OptionalIndex, fwd_decl: std.ArrayList(u8), error_msg: ?*Module.ErrorMsg, ctypes: CType.Store, @@ -497,7 +500,7 @@ pub const DeclGen = struct { fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { @setCold(true); const src = LazySrcLoc.nodeOffset(0); - const src_loc = src.toSrcLoc(dg.decl); + const src_loc = src.toSrcLoc(dg.decl.?); dg.error_msg = try Module.ErrorMsg.create(dg.gpa, src_loc, format, args); return error.AnalysisFail; } @@ -816,7 +819,7 @@ pub const DeclGen = struct { empty = false; } - if (empty) try writer.print("{x}", .{try dg.fmtIntLiteral(Type.u8, Value.undef)}); + return writer.writeByte('}'); }, .Packed => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, Value.undef)}), @@ -1287,7 +1290,6 @@ pub const DeclGen = struct { empty = false; } - if (empty) try writer.print("{}", .{try dg.fmtIntLiteral(Type.u8, Value.zero)}); try writer.writeByte('}'); }, .Packed => { @@ -1304,7 +1306,7 @@ pub const DeclGen = struct { const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); var eff_num_fields: usize = 0; - for (field_vals, 0..) |_, index| { + for (0..field_vals.len) |index| { const field_ty = ty.structFieldType(index); if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; @@ -1408,6 +1410,7 @@ pub const DeclGen = struct { return; } + var has_payload_init = false; try writer.writeByte('{'); if (ty.unionTagTypeSafety()) |tag_ty| { const layout = ty.unionGetLayout(target); @@ -1416,7 +1419,10 @@ pub const DeclGen = struct { try dg.renderValue(writer, tag_ty, union_obj.tag, initializer_type); try writer.writeAll(", "); } - try writer.writeAll(".payload = {"); + if (!ty.unionHasAllZeroBitFieldTypes()) { + try writer.writeAll(".payload = {"); + has_payload_init = true; + } } var it = ty.unionFields().iterator(); @@ -1428,8 +1434,8 @@ pub const DeclGen = struct { try writer.print(".{ } = ", .{fmtIdent(field.key_ptr.*)}); try dg.renderValue(writer, field.value_ptr.ty, Value.undef, initializer_type); break; - } else try writer.writeAll(".empty_union = 0"); - if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}'); + } + if (has_payload_init) try writer.writeByte('}'); try writer.writeByte('}'); }, @@ -1452,337 +1458,61 @@ pub const DeclGen = struct { } fn renderFunctionSignature(dg: *DeclGen, w: anytype, kind: TypedefKind, export_index: u32) !void { - const fn_info = dg.decl.ty.fnInfo(); + const store = &dg.ctypes.set; + const module = dg.module; + + const fn_ty = dg.decl.?.ty; + const fn_cty_idx = try dg.typeToIndex(fn_ty, switch (kind) { + .Forward => .forward, + .Complete => .complete, + }); + + const fn_info = fn_ty.fnInfo(); if (fn_info.cc == .Naked) { switch (kind) { .Forward => try w.writeAll("zig_naked_decl "), .Complete => try w.writeAll("zig_naked "), } } - if (dg.decl.val.castTag(.function)) |func_payload| + if (dg.decl.?.val.castTag(.function)) |func_payload| if (func_payload.data.is_cold) try w.writeAll("zig_cold "); - const target = dg.module.getTarget(); - var ret_buf: LowerFnRetTyBuffer = undefined; - const ret_ty = lowerFnRetTy(fn_info.return_type, &ret_buf, target); - - try dg.renderType(w, ret_ty, kind); - try w.writeByte(' '); + const trailing = try renderTypePrefix( + dg.decl_index, + store.*, + module, + w, + fn_cty_idx, + .suffix, + CQualifiers.init(.{}), + ); + try w.print("{}", .{trailing}); if (toCallingConvention(fn_info.cc)) |call_conv| { try w.print("zig_callconv({s}) ", .{call_conv}); } - if (fn_info.alignment > 0 and kind == .Complete) try w.print(" zig_align_fn({})", .{fn_info.alignment}); + if (fn_info.alignment > 0 and kind == .Complete) { + try w.print(" zig_align_fn({})", .{fn_info.alignment}); + } - try dg.renderDeclName(w, dg.decl_index, export_index); - try w.writeByte('('); + try dg.renderDeclName(w, dg.decl_index.unwrap().?, export_index); - var index: usize = 0; - for (fn_info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; - if (index > 0) try w.writeAll(", "); - const name = CValue{ .arg = index }; - try dg.renderTypeAndName(w, param_type, name, .@"const", 0, kind); - index += 1; - } + try renderTypeSuffix(dg.decl_index, store.*, module, w, fn_cty_idx, .suffix); - if (fn_info.is_var_args) { - if (index > 0) try w.writeAll(", "); - try w.writeAll("..."); - } else if (index == 0) { - try dg.renderType(w, Type.void, kind); + if (fn_info.alignment > 0 and kind == .Forward) { + try w.print(" zig_align_fn({})", .{fn_info.alignment}); } - try w.writeByte(')'); - if (fn_info.alignment > 0 and kind == .Forward) try w.print(" zig_align_fn({})", .{fn_info.alignment}); } fn indexToCType(dg: *DeclGen, idx: CType.Index) CType { return dg.ctypes.indexToCType(idx); } - fn typeToCType(dg: *DeclGen, ty: Type) !CType { - return dg.ctypes.typeToCType(dg.gpa, ty, dg.module); - } - fn typeToIndex(dg: *DeclGen, ty: Type) !CType.Index { - return dg.ctypes.typeToIndex(dg.gpa, ty, dg.module); - } - - const CTypeFix = enum { prefix, suffix }; - const CQualifiers = std.enums.EnumSet(enum { @"const", @"volatile", restrict }); - const CTypeRenderTrailing = enum { - no_space, - maybe_space, - - pub fn format( - self: @This(), - comptime fmt: []const u8, - _: std.fmt.FormatOptions, - w: anytype, - ) @TypeOf(w).Error!void { - if (fmt.len != 0) - @compileError("invalid format string '" ++ fmt ++ "' for type '" ++ - @typeName(@This()) ++ "'"); - comptime assert(fmt.len == 0); - switch (self) { - .no_space => {}, - .maybe_space => try w.writeByte(' '), - } - } - }; - fn renderTypePrefix( - dg: *DeclGen, - w: anytype, - idx: CType.Index, - parent_fix: CTypeFix, - qualifiers: CQualifiers, - ) @TypeOf(w).Error!CTypeRenderTrailing { - var trailing = CTypeRenderTrailing.maybe_space; - - const cty = dg.indexToCType(idx); - switch (cty.tag()) { - .void, - .char, - .@"signed char", - .short, - .int, - .long, - .@"long long", - ._Bool, - .@"unsigned char", - .@"unsigned short", - .@"unsigned int", - .@"unsigned long", - .@"unsigned long long", - .float, - .double, - .@"long double", - .bool, - .size_t, - .ptrdiff_t, - .uint8_t, - .int8_t, - .uint16_t, - .int16_t, - .uint32_t, - .int32_t, - .uint64_t, - .int64_t, - .uintptr_t, - .intptr_t, - .zig_u128, - .zig_i128, - .zig_f16, - .zig_f32, - .zig_f64, - .zig_f80, - .zig_f128, - => |tag| try w.writeAll(@tagName(tag)), - - .pointer, - .pointer_const, - .pointer_volatile, - .pointer_const_volatile, - => |tag| { - const child_idx = cty.cast(CType.Payload.Child).?.data; - try w.print("{}*", .{try dg.renderTypePrefix(w, child_idx, .prefix, CQualifiers.init(.{ - .@"const" = switch (tag) { - .pointer, .pointer_volatile => false, - .pointer_const, .pointer_const_volatile => true, - else => unreachable, - }, - .@"volatile" = switch (tag) { - .pointer, .pointer_const => false, - .pointer_volatile, .pointer_const_volatile => true, - else => unreachable, - }, - }))}); - trailing = .no_space; - }, - - .array, - .vector, - => { - const child_idx = cty.cast(CType.Payload.Sequence).?.data.elem_type; - const child_trailing = try dg.renderTypePrefix(w, child_idx, .suffix, qualifiers); - switch (parent_fix) { - .prefix => { - try w.print("{}(", .{child_trailing}); - return .no_space; - }, - .suffix => return child_trailing, - } - }, - - .fwd_struct, - .fwd_union, - .anon_struct, - .packed_anon_struct, - => |tag| try w.print("{s} {}__{d}", .{ - switch (tag) { - .fwd_struct, - .anon_struct, - .packed_anon_struct, - => "struct", - .fwd_union => "union", - else => unreachable, - }, - fmtIdent(switch (tag) { - .fwd_struct, - .fwd_union, - => mem.span(dg.module.declPtr(cty.cast(CType.Payload.FwdDecl).?.data).name), - .anon_struct, - .packed_anon_struct, - => "anon", - else => unreachable, - }), - idx, - }), - - .@"struct", - .packed_struct, - .@"union", - .packed_union, - => return dg.renderTypePrefix( - w, - cty.cast(CType.Payload.Aggregate).?.data.fwd_decl, - parent_fix, - qualifiers, - ), - - .function, - .varargs_function, - => { - const child_trailing = try dg.renderTypePrefix( - w, - cty.cast(CType.Payload.Function).?.data.return_type, - .suffix, - CQualifiers.initEmpty(), - ); - switch (parent_fix) { - .prefix => { - try w.print("{}(", .{child_trailing}); - return .no_space; - }, - .suffix => return child_trailing, - } - }, - } - - var qualifier_it = qualifiers.iterator(); - while (qualifier_it.next()) |qualifier| { - try w.print("{}{s}", .{ trailing, @tagName(qualifier) }); - trailing = .maybe_space; - } - - return trailing; + fn typeToIndex(dg: *DeclGen, ty: Type, kind: CType.Kind) !CType.Index { + return dg.ctypes.typeToIndex(dg.gpa, ty, dg.module, kind); } - fn renderTypeSuffix( - dg: *DeclGen, - w: anytype, - idx: CType.Index, - parent_fix: CTypeFix, - ) @TypeOf(w).Error!void { - const cty = dg.indexToCType(idx); - switch (cty.tag()) { - .void, - .char, - .@"signed char", - .short, - .int, - .long, - .@"long long", - ._Bool, - .@"unsigned char", - .@"unsigned short", - .@"unsigned int", - .@"unsigned long", - .@"unsigned long long", - .float, - .double, - .@"long double", - .bool, - .size_t, - .ptrdiff_t, - .uint8_t, - .int8_t, - .uint16_t, - .int16_t, - .uint32_t, - .int32_t, - .uint64_t, - .int64_t, - .uintptr_t, - .intptr_t, - .zig_u128, - .zig_i128, - .zig_f16, - .zig_f32, - .zig_f64, - .zig_f80, - .zig_f128, - => {}, - - .pointer, - .pointer_const, - .pointer_volatile, - .pointer_const_volatile, - => try dg.renderTypeSuffix(w, cty.cast(CType.Payload.Child).?.data, .prefix), - - .array, - .vector, - => { - switch (parent_fix) { - .prefix => try w.writeByte(')'), - .suffix => {}, - } - - try w.print("[{}]", .{cty.cast(CType.Payload.Sequence).?.data.len}); - try dg.renderTypeSuffix(w, cty.cast(CType.Payload.Sequence).?.data.elem_type, .suffix); - }, - - .fwd_struct, - .fwd_union, - .anon_struct, - .packed_anon_struct, - .@"struct", - .@"union", - .packed_struct, - .packed_union, - => {}, - - .function, - .varargs_function, - => |tag| { - switch (parent_fix) { - .prefix => try w.writeByte(')'), - .suffix => {}, - } - - const data = cty.cast(CType.Payload.Function).?.data; - - try w.writeByte('('); - var need_comma = false; - for (data.param_types) |param_type| { - if (need_comma) try w.writeAll(", "); - need_comma = true; - _ = try dg.renderTypePrefix(w, param_type, .suffix, CQualifiers.initEmpty()); - try dg.renderTypeSuffix(w, param_type, .suffix); - } - switch (tag) { - .function => {}, - .varargs_function => { - if (need_comma) try w.writeAll(", "); - need_comma = true; - try w.writeAll("..."); - }, - else => unreachable, - } - if (!need_comma) try w.writeAll("void"); - try w.writeByte(')'); - - try dg.renderTypeSuffix(w, data.return_type, .suffix); - }, - } + fn typeToCType(dg: *DeclGen, ty: Type, kind: CType.Kind) !CType { + return dg.ctypes.typeToCType(dg.gpa, ty, dg.module, kind); } /// Renders a type as a single identifier, generating intermediate typedefs @@ -1803,9 +1533,19 @@ pub const DeclGen = struct { t: Type, _: TypedefKind, ) error{ OutOfMemory, AnalysisFail }!void { - const idx = try dg.typeToIndex(t); - _ = try dg.renderTypePrefix(w, idx, .suffix, CQualifiers.initEmpty()); - try dg.renderTypeSuffix(w, idx, .suffix); + const store = &dg.ctypes.set; + const module = dg.module; + const idx = try dg.typeToIndex(t, .complete); + _ = try renderTypePrefix( + dg.decl_index, + store.*, + module, + w, + idx, + .suffix, + CQualifiers.init(.{}), + ); + try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix); } const IntCastContext = union(enum) { @@ -1939,24 +1679,28 @@ pub const DeclGen = struct { alignment: u32, _: TypedefKind, ) error{ OutOfMemory, AnalysisFail }!void { - if (alignment != 0) { - const abi_alignment = ty.abiAlignment(dg.module.getTarget()); - if (alignment < abi_alignment) { - try w.print("zig_under_align({}) ", .{alignment}); - } else if (alignment > abi_alignment) { - try w.print("zig_align({}) ", .{alignment}); - } - } + const store = &dg.ctypes.set; + const module = dg.module; - const idx = try dg.typeToIndex(ty); - try w.print("{}", .{try dg.renderTypePrefix(w, idx, .suffix, CQualifiers.init(.{ - .@"const" = switch (mutability) { - .mut => false, - .@"const" => true, - }, - }))}); + if (alignment != 0) switch (std.math.order(alignment, ty.abiAlignment(dg.module.getTarget()))) { + .lt => try w.print("zig_under_align({}) ", .{alignment}), + .eq => {}, + .gt => try w.print("zig_align({}) ", .{alignment}), + }; + + const idx = try dg.typeToIndex(ty, .complete); + const trailing = try renderTypePrefix( + dg.decl_index, + store.*, + module, + w, + idx, + .suffix, + CQualifiers.init(.{ .@"const" = mutability == .@"const" }), + ); + try w.print("{}", .{trailing}); try dg.writeCValue(w, name); - try dg.renderTypeSuffix(w, idx, .suffix); + try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix); } fn renderTagNameFn(dg: *DeclGen, w: anytype, fn_name: []const u8, enum_ty: Type) !void { @@ -2029,10 +1773,11 @@ pub const DeclGen = struct { fn writeCValue(dg: *DeclGen, w: anytype, c_value: CValue) !void { switch (c_value) { .none => unreachable, - .local => |i| return w.print("t{d}", .{i}), + .local, .new_local => |i| return w.print("t{d}", .{i}), .local_ref => |i| return w.print("&t{d}", .{i}), .constant => unreachable, .arg => |i| return w.print("a{d}", .{i}), + .arg_array => |i| return dg.writeCValueMember(w, .{ .arg = i }, .{ .identifier = "array" }), .field => |i| return w.print("f{d}", .{i}), .decl => |decl| return dg.renderDeclName(w, decl, 0), .decl_ref => |decl| { @@ -2048,10 +1793,15 @@ pub const DeclGen = struct { fn writeCValueDeref(dg: *DeclGen, w: anytype, c_value: CValue) !void { switch (c_value) { .none => unreachable, - .local => |i| return w.print("(*t{d})", .{i}), + .local, .new_local => |i| return w.print("(*t{d})", .{i}), .local_ref => |i| return w.print("t{d}", .{i}), .constant => unreachable, .arg => |i| return w.print("(*a{d})", .{i}), + .arg_array => |i| { + try w.writeAll("(*"); + try dg.writeCValueMember(w, .{ .arg = i }, .{ .identifier = "array" }); + return w.writeByte(')'); + }, .field => |i| return w.print("f{d}", .{i}), .decl => |decl| { try w.writeAll("(*"); @@ -2078,7 +1828,7 @@ pub const DeclGen = struct { fn writeCValueDerefMember(dg: *DeclGen, writer: anytype, c_value: CValue, member: CValue) !void { switch (c_value) { .none, .constant, .field, .undef => unreachable, - .local, .arg, .decl, .identifier, .bytes => { + .new_local, .local, .arg, .arg_array, .decl, .identifier, .bytes => { try dg.writeCValue(writer, c_value); try writer.writeAll("->"); }, @@ -2205,10 +1955,491 @@ pub const DeclGen = struct { } }; -pub fn genGlobalAsm(mod: *Module, code: *std.ArrayList(u8)) !void { +const CTypeFix = enum { prefix, suffix }; +const CQualifiers = std.enums.EnumSet(enum { @"const", @"volatile", restrict }); +const CTypeRenderTrailing = enum { + no_space, + maybe_space, + + pub fn format( + self: @This(), + comptime fmt: []const u8, + _: std.fmt.FormatOptions, + w: anytype, + ) @TypeOf(w).Error!void { + if (fmt.len != 0) + @compileError("invalid format string '" ++ fmt ++ "' for type '" ++ + @typeName(@This()) ++ "'"); + comptime assert(fmt.len == 0); + switch (self) { + .no_space => {}, + .maybe_space => try w.writeByte(' '), + } + } +}; +fn renderTypeName( + mod: *Module, + w: anytype, + idx: CType.Index, + cty: CType, + attributes: []const u8, +) !void { + switch (cty.tag()) { + else => unreachable, + + .fwd_anon_struct, + .fwd_anon_union, + => |tag| try w.print("{s} {s}anon__lazy_{d}", .{ + @tagName(tag)["fwd_anon_".len..], + attributes, + idx, + }), + + .fwd_struct, + .fwd_union, + => |tag| { + const owner_decl = cty.cast(CType.Payload.FwdDecl).?.data; + try w.print("{s} {s}{}__{d}", .{ + @tagName(tag)["fwd_".len..], + attributes, + fmtIdent(mem.span(mod.declPtr(owner_decl).name)), + @enumToInt(owner_decl), + }); + }, + } +} +fn renderTypePrefix( + decl: Decl.OptionalIndex, + store: CType.Store.Set, + mod: *Module, + w: anytype, + idx: CType.Index, + parent_fix: CTypeFix, + qualifiers: CQualifiers, +) @TypeOf(w).Error!CTypeRenderTrailing { + var trailing = CTypeRenderTrailing.maybe_space; + + const cty = store.indexToCType(idx); + switch (cty.tag()) { + .void, + .char, + .@"signed char", + .short, + .int, + .long, + .@"long long", + ._Bool, + .@"unsigned char", + .@"unsigned short", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .float, + .double, + .@"long double", + .bool, + .size_t, + .ptrdiff_t, + .uint8_t, + .int8_t, + .uint16_t, + .int16_t, + .uint32_t, + .int32_t, + .uint64_t, + .int64_t, + .uintptr_t, + .intptr_t, + .zig_u128, + .zig_i128, + .zig_f16, + .zig_f32, + .zig_f64, + .zig_f80, + .zig_f128, + => |tag| try w.writeAll(@tagName(tag)), + + .pointer, + .pointer_const, + .pointer_volatile, + .pointer_const_volatile, + => |tag| { + const child_idx = cty.cast(CType.Payload.Child).?.data; + const child_trailing = try renderTypePrefix( + decl, + store, + mod, + w, + child_idx, + .prefix, + CQualifiers.init(.{ .@"const" = switch (tag) { + .pointer, .pointer_volatile => false, + .pointer_const, .pointer_const_volatile => true, + else => unreachable, + }, .@"volatile" = switch (tag) { + .pointer, .pointer_const => false, + .pointer_volatile, .pointer_const_volatile => true, + else => unreachable, + } }), + ); + try w.print("{}*", .{child_trailing}); + trailing = .no_space; + }, + + .array, + .vector, + => { + const child_idx = cty.cast(CType.Payload.Sequence).?.data.elem_type; + const child_trailing = try renderTypePrefix( + decl, + store, + mod, + w, + child_idx, + .suffix, + qualifiers, + ); + switch (parent_fix) { + .prefix => { + try w.print("{}(", .{child_trailing}); + return .no_space; + }, + .suffix => return child_trailing, + } + }, + + .fwd_anon_struct, + .fwd_anon_union, + => if (decl.unwrap()) |decl_index| + try w.print("anon__{d}_{d}", .{ @enumToInt(decl_index), idx }) + else + try renderTypeName(mod, w, idx, cty, ""), + + .fwd_struct, + .fwd_union, + => try renderTypeName(mod, w, idx, cty, ""), + + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + => |tag| { + try w.print("{s} {s}", .{ + @tagName(tag)["unnamed_".len..], + if (cty.isPacked()) "zig_packed(" else "", + }); + try renderAggregateFields(mod, w, store, cty, 1); + if (cty.isPacked()) try w.writeByte(')'); + }, + + .anon_struct, + .anon_union, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => return renderTypePrefix( + decl, + store, + mod, + w, + cty.cast(CType.Payload.Aggregate).?.data.fwd_decl, + parent_fix, + qualifiers, + ), + + .function, + .varargs_function, + => { + const child_trailing = try renderTypePrefix( + decl, + store, + mod, + w, + cty.cast(CType.Payload.Function).?.data.return_type, + .suffix, + CQualifiers.init(.{}), + ); + switch (parent_fix) { + .prefix => { + try w.print("{}(", .{child_trailing}); + return .no_space; + }, + .suffix => return child_trailing, + } + }, + } + + var qualifier_it = qualifiers.iterator(); + while (qualifier_it.next()) |qualifier| { + try w.print("{}{s}", .{ trailing, @tagName(qualifier) }); + trailing = .maybe_space; + } + + return trailing; +} +fn renderTypeSuffix( + decl: Decl.OptionalIndex, + store: CType.Store.Set, + mod: *Module, + w: anytype, + idx: CType.Index, + parent_fix: CTypeFix, +) @TypeOf(w).Error!void { + const cty = store.indexToCType(idx); + switch (cty.tag()) { + .void, + .char, + .@"signed char", + .short, + .int, + .long, + .@"long long", + ._Bool, + .@"unsigned char", + .@"unsigned short", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .float, + .double, + .@"long double", + .bool, + .size_t, + .ptrdiff_t, + .uint8_t, + .int8_t, + .uint16_t, + .int16_t, + .uint32_t, + .int32_t, + .uint64_t, + .int64_t, + .uintptr_t, + .intptr_t, + .zig_u128, + .zig_i128, + .zig_f16, + .zig_f32, + .zig_f64, + .zig_f80, + .zig_f128, + => {}, + + .pointer, + .pointer_const, + .pointer_volatile, + .pointer_const_volatile, + => try renderTypeSuffix(decl, store, mod, w, cty.cast(CType.Payload.Child).?.data, .prefix), + + .array, + .vector, + => { + switch (parent_fix) { + .prefix => try w.writeByte(')'), + .suffix => {}, + } + + try w.print("[{}]", .{cty.cast(CType.Payload.Sequence).?.data.len}); + try renderTypeSuffix( + decl, + store, + mod, + w, + cty.cast(CType.Payload.Sequence).?.data.elem_type, + .suffix, + ); + }, + + .fwd_anon_struct, + .fwd_anon_union, + .fwd_struct, + .fwd_union, + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + .anon_struct, + .anon_union, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => {}, + + .function, + .varargs_function, + => |tag| { + switch (parent_fix) { + .prefix => try w.writeByte(')'), + .suffix => {}, + } + + const data = cty.cast(CType.Payload.Function).?.data; + + try w.writeByte('('); + var need_comma = false; + for (data.param_types, 0..) |param_type, param_i| { + if (need_comma) try w.writeAll(", "); + need_comma = true; + const trailing = try renderTypePrefix( + decl, + store, + mod, + w, + param_type, + .suffix, + CQualifiers.init(.{}), + ); + try w.print("{}a{d}", .{ trailing, param_i }); + try renderTypeSuffix(decl, store, mod, w, param_type, .suffix); + } + switch (tag) { + .function => {}, + .varargs_function => { + if (need_comma) try w.writeAll(", "); + need_comma = true; + try w.writeAll("..."); + }, + else => unreachable, + } + if (!need_comma) try w.writeAll("void"); + try w.writeByte(')'); + + try renderTypeSuffix(decl, store, mod, w, data.return_type, .suffix); + }, + } +} +fn renderAggregateFields( + mod: *Module, + writer: anytype, + store: CType.Store.Set, + cty: CType, + indent: usize, +) !void { + try writer.writeAll("{\n"); + const fields = cty.fields(); + for (fields) |field| { + try writer.writeByteNTimes(' ', indent + 1); + switch (std.math.order(field.alignas.@"align", field.alignas.abi)) { + .lt => try writer.print("zig_under_align({}) ", .{field.alignas.getAlign()}), + .eq => {}, + .gt => try writer.print("zig_align({}) ", .{field.alignas.getAlign()}), + } + const trailing = try renderTypePrefix( + .none, + store, + mod, + writer, + field.type, + .suffix, + CQualifiers.init(.{}), + ); + try writer.print("{}{ }", .{ trailing, fmtIdent(mem.span(field.name)) }); + try renderTypeSuffix(.none, store, mod, writer, field.type, .suffix); + try writer.writeAll(";\n"); + } + try writer.writeByteNTimes(' ', indent); + try writer.writeByte('}'); +} + +pub fn genTypeDecl( + mod: *Module, + writer: anytype, + global_store: CType.Store.Set, + global_idx: CType.Index, + decl: Decl.OptionalIndex, + decl_store: CType.Store.Set, + decl_idx: CType.Index, + found_existing: bool, +) !void { + const global_cty = global_store.indexToCType(global_idx); + switch (global_cty.tag()) { + .fwd_anon_struct => if (decl != .none) { + try writer.writeAll("typedef "); + _ = try renderTypePrefix( + .none, + global_store, + mod, + writer, + global_idx, + .suffix, + CQualifiers.init(.{}), + ); + try writer.writeByte(' '); + _ = try renderTypePrefix( + decl, + decl_store, + mod, + writer, + decl_idx, + .suffix, + CQualifiers.init(.{}), + ); + try writer.writeAll(";\n"); + }, + + .fwd_struct, + .fwd_union, + .anon_struct, + .anon_union, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => |tag| if (!found_existing) { + switch (tag) { + .fwd_struct, + .fwd_union, + => { + const owner_decl = global_cty.cast(CType.Payload.FwdDecl).?.data; + _ = try renderTypePrefix( + .none, + global_store, + mod, + writer, + global_idx, + .suffix, + CQualifiers.init(.{}), + ); + try writer.writeAll("; // "); + try mod.declPtr(owner_decl).renderFullyQualifiedName(mod, writer); + try writer.writeByte('\n'); + }, + + .anon_struct, + .anon_union, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => { + const fwd_idx = global_cty.cast(CType.Payload.Aggregate).?.data.fwd_decl; + try renderTypeName( + mod, + writer, + fwd_idx, + global_store.indexToCType(fwd_idx), + if (global_cty.isPacked()) "zig_packed(" else "", + ); + try writer.writeByte(' '); + try renderAggregateFields(mod, writer, global_store, global_cty, 0); + if (global_cty.isPacked()) try writer.writeByte(')'); + try writer.writeAll(";\n"); + }, + + else => unreachable, + } + }, + + else => {}, + } +} + +pub fn genGlobalAsm(mod: *Module, writer: anytype) !void { var it = mod.global_assembly.valueIterator(); while (it.next()) |asm_source| { - try code.writer().print("__asm({s});\n", .{fmtStringLiteral(asm_source.*)}); + try writer.print("__asm({s});\n", .{fmtStringLiteral(asm_source.*)}); } } @@ -2279,14 +2510,16 @@ fn genExports(o: *Object) !void { defer tracy.end(); const fwd_decl_writer = o.dg.fwd_decl.writer(); - if (o.dg.module.decl_exports.get(o.dg.decl_index)) |exports| for (exports.items[1..], 0..) |@"export", i| { - try fwd_decl_writer.writeAll("zig_export("); - try o.dg.renderFunctionSignature(fwd_decl_writer, .Forward, @intCast(u32, 1 + i)); - try fwd_decl_writer.print(", {s}, {s});\n", .{ - fmtStringLiteral(exports.items[0].options.name), - fmtStringLiteral(@"export".options.name), - }); - }; + if (o.dg.module.decl_exports.get(o.dg.decl_index.unwrap().?)) |exports| { + for (exports.items[1..], 1..) |@"export", i| { + try fwd_decl_writer.writeAll("zig_export("); + try o.dg.renderFunctionSignature(fwd_decl_writer, .Forward, @intCast(u32, i)); + try fwd_decl_writer.print(", {s}, {s});\n", .{ + fmtStringLiteral(exports.items[0].options.name), + fmtStringLiteral(@"export".options.name), + }); + } + } } pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { @@ -2307,8 +2540,8 @@ pub fn genFunc(f: *Function) !void { const o = &f.object; const gpa = o.dg.gpa; const tv: TypedValue = .{ - .ty = o.dg.decl.ty, - .val = o.dg.decl.val, + .ty = o.dg.decl.?.ty, + .val = o.dg.decl.?.val, }; o.code_header = std.ArrayList(u8).init(gpa); @@ -2347,9 +2580,8 @@ pub fn genFunc(f: *Function) !void { // missing. These are added now to complete the map. Then we can sort by // alignment, descending. const free_locals = f.getFreeLocals(); - const values = f.allocs.values(); - for (f.allocs.keys(), 0..) |local_index, i| { - if (values[i]) continue; // static + for (f.allocs.keys(), f.allocs.values()) |local_index, value| { + if (value) continue; // static const local = f.locals.items[local_index]; log.debug("inserting local {d} into free_locals", .{local_index}); const gop = try free_locals.getOrPutContext(gpa, local.ty, f.tyHashCtx()); @@ -2398,10 +2630,10 @@ pub fn genDecl(o: *Object) !void { const tracy = trace(@src()); defer tracy.end(); - const tv: TypedValue = .{ - .ty = o.dg.decl.ty, - .val = o.dg.decl.val, - }; + const decl = o.dg.decl.?; + const decl_c_value: CValue = .{ .decl = o.dg.decl_index.unwrap().? }; + const tv: TypedValue = .{ .ty = decl.ty, .val = decl.val }; + if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime()) return; if (tv.val.tag() == .extern_fn) { const fwd_decl_writer = o.dg.fwd_decl.writer(); @@ -2415,11 +2647,9 @@ pub fn genDecl(o: *Object) !void { const is_global = o.dg.declIsGlobal(tv) or variable.is_extern; const fwd_decl_writer = o.dg.fwd_decl.writer(); - const decl_c_value = CValue{ .decl = o.dg.decl_index }; - try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static "); if (variable.is_threadlocal) try fwd_decl_writer.writeAll("zig_threadlocal "); - try o.dg.renderTypeAndName(fwd_decl_writer, o.dg.decl.ty, decl_c_value, .mut, o.dg.decl.@"align", .Complete); + try o.dg.renderTypeAndName(fwd_decl_writer, decl.ty, decl_c_value, .mut, decl.@"align", .Complete); try fwd_decl_writer.writeAll(";\n"); try genExports(o); @@ -2428,27 +2658,26 @@ pub fn genDecl(o: *Object) !void { const w = o.writer(); if (!is_global) try w.writeAll("static "); if (variable.is_threadlocal) try w.writeAll("zig_threadlocal "); - if (o.dg.decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); - try o.dg.renderTypeAndName(w, o.dg.decl.ty, decl_c_value, .mut, o.dg.decl.@"align", .Complete); - if (o.dg.decl.@"linksection" != null) try w.writeAll(", read, write)"); + if (decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); + try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .mut, decl.@"align", .Complete); + if (decl.@"linksection" != null) try w.writeAll(", read, write)"); try w.writeAll(" = "); try o.dg.renderValue(w, tv.ty, variable.init, .StaticInitializer); try w.writeByte(';'); try o.indent_writer.insertNewline(); } else { - const is_global = o.dg.module.decl_exports.contains(o.dg.decl_index); + const is_global = o.dg.module.decl_exports.contains(decl_c_value.decl); const fwd_decl_writer = o.dg.fwd_decl.writer(); - const decl_c_value: CValue = .{ .decl = o.dg.decl_index }; try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static "); - try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, .@"const", o.dg.decl.@"align", .Complete); + try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, .@"const", decl.@"align", .Complete); try fwd_decl_writer.writeAll(";\n"); const w = o.writer(); if (!is_global) try w.writeAll("static "); - if (o.dg.decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); - try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .@"const", o.dg.decl.@"align", .Complete); - if (o.dg.decl.@"linksection" != null) try w.writeAll(", read)"); + if (decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); + try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .@"const", decl.@"align", .Complete); + if (decl.@"linksection" != null) try w.writeAll(", read)"); try w.writeAll(" = "); try o.dg.renderValue(w, tv.ty, tv.val, .StaticInitializer); try w.writeAll(";\n"); @@ -2460,8 +2689,8 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { defer tracy.end(); const tv: TypedValue = .{ - .ty = dg.decl.ty, - .val = dg.decl.val, + .ty = dg.decl.?.ty, + .val = dg.decl.?.val, }; const writer = dg.fwd_decl.writer(); @@ -2499,7 +2728,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, // zig fmt: off .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies - .arg => airArg(f), + .arg => try airArg(f, inst), .breakpoint => try airBreakpoint(f.object.writer()), .ret_addr => try airRetAddr(f, inst), @@ -2748,13 +2977,14 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, .c_va_start => return f.fail("TODO implement c_va_start", .{}), // zig fmt: on }; - if (result_value == .local) { - log.debug("map %{d} to t{d}", .{ inst, result_value.local }); - } - switch (result_value) { - .none => {}, - else => try f.value_map.putNoClobber(Air.indexToRef(inst), result_value), + if (result_value == .new_local) { + log.debug("map %{d} to t{d}", .{ inst, result_value.new_local }); } + try f.value_map.putNoClobber(Air.indexToRef(inst), switch (result_value) { + .none => continue, + .new_local => |i| .{ .local = i }, + else => result_value, + }); } } @@ -2979,10 +3209,10 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { const mutability: Mutability = if (inst_ty.isConstPtr()) .@"const" else .mut; const target = f.object.dg.module.getTarget(); const local = try f.allocAlignedLocal(elem_type, mutability, inst_ty.ptrAlignment(target)); - log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.local }); + log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.module.gpa; - try f.allocs.put(gpa, local.local, false); - return CValue{ .local_ref = local.local }; + try f.allocs.put(gpa, local.new_local, false); + return CValue{ .local_ref = local.new_local }; } fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { @@ -2996,16 +3226,22 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { const mutability: Mutability = if (inst_ty.isConstPtr()) .@"const" else .mut; const target = f.object.dg.module.getTarget(); const local = try f.allocAlignedLocal(elem_ty, mutability, inst_ty.ptrAlignment(target)); - log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.local }); + log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.module.gpa; - try f.allocs.put(gpa, local.local, false); - return CValue{ .local_ref = local.local }; + try f.allocs.put(gpa, local.new_local, false); + return CValue{ .local_ref = local.new_local }; } -fn airArg(f: *Function) CValue { +fn airArg(f: *Function, inst: Air.Inst.Index) !CValue { + const inst_ty = f.air.typeOfIndex(inst); + const inst_cty = try f.object.dg.typeToIndex(inst_ty, .parameter); + const i = f.next_arg_index; f.next_arg_index += 1; - return .{ .arg = i }; + return if (inst_cty != try f.object.dg.typeToIndex(inst_ty, .complete)) + .{ .arg_array = i } + else + .{ .arg = i }; } fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { @@ -3115,7 +3351,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { const ret_val = if (is_array) ret_val: { const array_local = try f.allocLocal(inst, try lowered_ret_ty.copy(f.arena.allocator())); try writer.writeAll("memcpy("); - try f.writeCValueMember(writer, array_local, .{ .field = 0 }); + try f.writeCValueMember(writer, array_local, .{ .identifier = "array" }); try writer.writeAll(", "); if (deref) try f.writeCValueDeref(writer, operand) @@ -3135,14 +3371,13 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { try f.writeCValue(writer, ret_val, .Other); try writer.writeAll(";\n"); if (is_array) { - try freeLocal(f, inst, ret_val.local, 0); + try freeLocal(f, inst, ret_val.new_local, 0); } } else { try reap(f, inst, &.{un_op}); - if (f.object.dg.decl.ty.fnCallingConvention() != .Naked) { + if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() != .Naked) // Not even allowed to return void in a naked function. try writer.writeAll("return;\n"); - } } return CValue.none; } @@ -3344,7 +3579,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderTypecast(writer, src_ty); try writer.writeAll("))"); if (src_val == .constant) { - try freeLocal(f, inst, array_src.local, 0); + try freeLocal(f, inst, array_src.new_local, 0); } } else if (ptr_info.host_size != 0) { const host_bits = ptr_info.host_size * 8; @@ -3770,8 +4005,12 @@ fn airCall( modifier: std.builtin.CallModifier, ) !CValue { // Not even allowed to call panic in a naked function. - if (f.object.dg.decl.ty.fnCallingConvention() == .Naked) return .none; + if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none; + const gpa = f.object.dg.gpa; + const module = f.object.dg.module; + const target = module.getTarget(); + const writer = f.object.writer(); switch (modifier) { .auto => {}, @@ -3786,8 +4025,28 @@ fn airCall( const resolved_args = try gpa.alloc(CValue, args.len); defer gpa.free(resolved_args); - for (args, 0..) |arg, i| { - resolved_args[i] = try f.resolveInst(arg); + for (resolved_args, args) |*resolved_arg, arg| { + const arg_ty = f.air.typeOf(arg); + const arg_cty = try f.object.dg.typeToIndex(arg_ty, .parameter); + if (f.object.dg.indexToCType(arg_cty).tag() == .void) { + resolved_arg.* = .none; + continue; + } + resolved_arg.* = try f.resolveInst(arg); + if (arg_cty != try f.object.dg.typeToIndex(arg_ty, .complete)) { + var lowered_arg_buf: LowerFnRetTyBuffer = undefined; + const lowered_arg_ty = lowerFnRetTy(arg_ty, &lowered_arg_buf, target); + + const array_local = try f.allocLocal(inst, try lowered_arg_ty.copy(f.arena.allocator())); + try writer.writeAll("memcpy("); + try f.writeCValueMember(writer, array_local, .{ .identifier = "array" }); + try writer.writeAll(", "); + try f.writeCValue(writer, resolved_arg.*, .FunctionArgument); + try writer.writeAll(", sizeof("); + try f.renderTypecast(writer, lowered_arg_ty); + try writer.writeAll("));\n"); + resolved_arg.* = array_local; + } } const callee = try f.resolveInst(pl_op.operand); @@ -3804,9 +4063,7 @@ fn airCall( .Pointer => callee_ty.childType(), else => unreachable, }; - const writer = f.object.writer(); - const target = f.object.dg.module.getTarget(); const ret_ty = fn_ty.fnReturnType(); var lowered_ret_buf: LowerFnRetTyBuffer = undefined; const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target); @@ -3841,7 +4098,7 @@ fn airCall( else => break :known, }; }; - name = f.object.dg.module.declPtr(fn_decl).name; + name = module.declPtr(fn_decl).name; try f.object.dg.renderDeclName(writer, fn_decl, 0); break :callee; } @@ -3851,22 +4108,11 @@ fn airCall( try writer.writeByte('('); var args_written: usize = 0; - for (args, 0..) |arg, arg_i| { - const ty = f.air.typeOf(arg); - if (!ty.hasRuntimeBitsIgnoreComptime()) continue; - if (args_written != 0) { - try writer.writeAll(", "); - } - if ((is_extern or std.mem.eql(u8, std.mem.span(name), "main")) and - ty.isCPtr() and ty.childType().tag() == .u8) - { - // Corresponds with hack in renderType .Pointer case. - try writer.writeAll("(char"); - if (ty.isConstPtr()) try writer.writeAll(" const"); - if (ty.isVolatilePtr()) try writer.writeAll(" volatile"); - try writer.writeAll(" *)"); - } - try f.writeCValue(writer, resolved_args[arg_i], .FunctionArgument); + for (resolved_args) |resolved_arg| { + if (resolved_arg == .none) continue; + if (args_written != 0) try writer.writeAll(", "); + try f.writeCValue(writer, resolved_arg, .FunctionArgument); + if (resolved_arg == .new_local) try freeLocal(f, inst, resolved_arg.new_local, 0); args_written += 1; } try writer.writeAll(");\n"); @@ -3879,11 +4125,11 @@ fn airCall( try writer.writeAll("memcpy("); try f.writeCValue(writer, array_local, .FunctionArgument); try writer.writeAll(", "); - try f.writeCValueMember(writer, result_local, .{ .field = 0 }); + try f.writeCValueMember(writer, result_local, .{ .identifier = "array" }); try writer.writeAll(", sizeof("); try f.renderTypecast(writer, ret_ty); try writer.writeAll("));\n"); - try freeLocal(f, inst, result_local.local, 0); + try freeLocal(f, inst, result_local.new_local, 0); break :r array_local; }; @@ -4147,7 +4393,7 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { } if (operand == .constant) { - try freeLocal(f, inst, operand_lval.local, 0); + try freeLocal(f, inst, operand_lval.new_local, 0); } return local; @@ -4193,7 +4439,7 @@ fn airFence(f: *Function, inst: Air.Inst.Index) !CValue { fn airUnreach(f: *Function) !CValue { // Not even allowed to call unreachable in a naked function. - if (f.object.dg.decl.ty.fnCallingConvention() == .Naked) return .none; + if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none; try f.object.writer().writeAll("zig_unreachable();\n"); return CValue.none; @@ -4667,7 +4913,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const is_reg = constraint[1] == '{'; if (is_reg) { try f.writeCValueDeref(writer, if (output == .none) - CValue{ .local_ref = local.local } + CValue{ .local_ref = local.new_local } else try f.resolveInst(output)); try writer.writeAll(" = "); @@ -4967,18 +5213,20 @@ fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struc else => .none, }; - const FieldLoc = union(enum) { + const field_loc: union(enum) { begin: void, field: CValue, end: void, - }; - const field_loc = switch (struct_ty.tag()) { - .@"struct" => switch (struct_ty.containerLayout()) { - .Auto, .Extern => for (struct_ty.structFields().values()[index..], 0..) |field, offset| { - if (field.ty.hasRuntimeBitsIgnoreComptime()) break FieldLoc{ .field = .{ - .identifier = struct_ty.structFieldName(index + offset), - } }; - } else @as(FieldLoc, .end), + } = switch (struct_ty.tag()) { + .tuple, .anon_struct, .@"struct" => switch (struct_ty.containerLayout()) { + .Auto, .Extern => for (index..struct_ty.structFieldCount()) |field_i| { + if (!struct_ty.structFieldIsComptime(field_i) and + struct_ty.structFieldType(field_i).hasRuntimeBitsIgnoreComptime()) + break .{ .field = if (struct_ty.isSimpleTuple()) + .{ .field = field_i } + else + .{ .identifier = struct_ty.structFieldName(field_i) } }; + } else .end, .Packed => if (field_ptr_info.data.host_size == 0) { const target = f.object.dg.module.getTarget(); @@ -5003,27 +5251,15 @@ fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struc try f.writeCValue(writer, struct_ptr, .Other); try writer.print(")[{}];\n", .{try f.fmtIntLiteral(Type.usize, byte_offset_val)}); return local; - } else @as(FieldLoc, .begin), + } else .begin, }, .@"union", .union_safety_tagged, .union_tagged => if (struct_ty.containerLayout() == .Packed) { try f.writeCValue(writer, struct_ptr, .Other); try writer.writeAll(";\n"); return local; - } else if (field_ty.hasRuntimeBitsIgnoreComptime()) FieldLoc{ .field = .{ + } else if (field_ty.hasRuntimeBitsIgnoreComptime()) .{ .field = .{ .identifier = struct_ty.unionFields().keys()[index], - } } else @as(FieldLoc, .end), - .tuple, .anon_struct => field_name: { - const tuple = struct_ty.tupleFields(); - if (tuple.values[index].tag() != .unreachable_value) return CValue.none; - - var id: usize = 0; - break :field_name for (tuple.values, 0..) |value, i| { - if (value.tag() != .unreachable_value) continue; - if (!tuple.types[i].hasRuntimeBitsIgnoreComptime()) continue; - if (i >= index) break FieldLoc{ .field = .{ .field = id } }; - id += 1; - } else @as(FieldLoc, .end); - }, + } } else .end, else => unreachable, }; @@ -5076,8 +5312,11 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { }; const field_name: CValue = switch (struct_ty.tag()) { - .@"struct" => switch (struct_ty.containerLayout()) { - .Auto, .Extern => .{ .identifier = struct_ty.structFieldName(extra.field_index) }, + .tuple, .anon_struct, .@"struct" => switch (struct_ty.containerLayout()) { + .Auto, .Extern => if (struct_ty.isSimpleTuple()) + .{ .field = extra.field_index } + else + .{ .identifier = struct_ty.structFieldName(extra.field_index) }, .Packed => { const struct_obj = struct_ty.castTag(.@"struct").?.data; const int_info = struct_ty.intInfo(target); @@ -5135,13 +5374,13 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const local = try f.allocLocal(inst, inst_ty); try writer.writeAll("memcpy("); - try f.writeCValue(writer, .{ .local_ref = local.local }, .FunctionArgument); + try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument); try writer.writeAll(", "); - try f.writeCValue(writer, .{ .local_ref = temp_local.local }, .FunctionArgument); + try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument); try writer.writeAll(", sizeof("); try f.renderTypecast(writer, inst_ty); try writer.writeAll("));\n"); - try freeLocal(f, inst, temp_local.local, 0); + try freeLocal(f, inst, temp_local.new_local, 0); return local; }, }, @@ -5165,22 +5404,13 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("));\n"); if (struct_byval == .constant) { - try freeLocal(f, inst, operand_lval.local, 0); + try freeLocal(f, inst, operand_lval.new_local, 0); } return local; } else .{ .identifier = struct_ty.unionFields().keys()[extra.field_index], }, - .tuple, .anon_struct => blk: { - const tuple = struct_ty.tupleFields(); - if (tuple.values[extra.field_index].tag() != .unreachable_value) return CValue.none; - - var id: usize = 0; - for (tuple.values[0..extra.field_index]) |value| - id += @boolToInt(value.tag() == .unreachable_value); - break :blk .{ .field = id }; - }, else => unreachable, }; @@ -5765,7 +5995,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue } if (f.liveness.isUnused(inst)) { - try freeLocal(f, inst, local.local, 0); + try freeLocal(f, inst, local.new_local, 0); return CValue.none; } @@ -5808,7 +6038,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(");\n"); if (f.liveness.isUnused(inst)) { - try freeLocal(f, inst, local.local, 0); + try freeLocal(f, inst, local.new_local, 0); return CValue.none; } @@ -5905,7 +6135,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(";\n"); try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs }); - try freeLocal(f, inst, index.local, 0); + try freeLocal(f, inst, index.new_local, 0); return CValue.none; } @@ -6222,7 +6452,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(";\n"); - try freeLocal(f, inst, it.local, 0); + try freeLocal(f, inst, it.new_local, 0); return accum; } @@ -6235,8 +6465,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const gpa = f.object.dg.gpa; const resolved_elements = try gpa.alloc(CValue, elements.len); defer gpa.free(resolved_elements); - for (elements, 0..) |element, i| { - resolved_elements[i] = try f.resolveInst(element); + for (resolved_elements, elements) |*resolved_element, element| { + resolved_element.* = try f.resolveInst(element); } { var bt = iterateBigTomb(f, inst); @@ -6275,46 +6505,47 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(")"); try writer.writeByte('{'); var empty = true; - for (elements, 0..) |element, index| { - if (inst_ty.structFieldValueComptime(index)) |_| continue; + for (elements, resolved_elements, 0..) |element, resolved_element, field_i| { + if (inst_ty.structFieldValueComptime(field_i)) |_| continue; if (!empty) try writer.writeAll(", "); - if (!inst_ty.isTupleOrAnonStruct()) { - try writer.print(".{ } = ", .{fmtIdent(inst_ty.structFieldName(index))}); - } + + const field_name: CValue = if (inst_ty.isSimpleTuple()) + .{ .field = field_i } + else + .{ .identifier = inst_ty.structFieldName(field_i) }; + try writer.writeByte('.'); + try f.object.dg.writeCValue(writer, field_name); + try writer.writeAll(" = "); const element_ty = f.air.typeOf(element); try f.writeCValue(writer, switch (element_ty.zigTypeTag()) { .Array => CValue{ .undef = element_ty }, - else => resolved_elements[index], + else => resolved_element, }, .Initializer); empty = false; } - if (empty) try writer.print("{}", .{try f.fmtIntLiteral(Type.u8, Value.zero)}); try writer.writeAll("};\n"); - var field_id: usize = 0; - for (elements, 0..) |element, index| { - if (inst_ty.structFieldValueComptime(index)) |_| continue; + for (elements, resolved_elements, 0..) |element, resolved_element, field_i| { + if (inst_ty.structFieldValueComptime(field_i)) |_| continue; const element_ty = f.air.typeOf(element); if (element_ty.zigTypeTag() != .Array) continue; - const field_name = if (inst_ty.isTupleOrAnonStruct()) - CValue{ .field = field_id } + const field_name: CValue = if (inst_ty.isSimpleTuple()) + .{ .field = field_i } else - CValue{ .identifier = inst_ty.structFieldName(index) }; + .{ .identifier = inst_ty.structFieldName(field_i) }; try writer.writeAll(";\n"); try writer.writeAll("memcpy("); try f.writeCValueMember(writer, local, field_name); try writer.writeAll(", "); - try f.writeCValue(writer, resolved_elements[index], .FunctionArgument); + try f.writeCValue(writer, resolved_element, .FunctionArgument); try writer.writeAll(", sizeof("); try f.renderTypecast(writer, element_ty); try writer.writeAll("));\n"); - - field_id += 1; } }, .Packed => { @@ -6332,7 +6563,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); var empty = true; - for (elements, 0..) |_, index| { + for (0..elements.len) |index| { const field_ty = inst_ty.structFieldType(index); if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; @@ -6381,13 +6612,6 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { empty = false; } - if (empty) { - try writer.writeByte('('); - try f.renderTypecast(writer, inst_ty); - try writer.writeByte(')'); - try f.writeCValue(writer, .{ .undef = inst_ty }, .Initializer); - } - try writer.writeAll(";\n"); }, }, @@ -7020,17 +7244,20 @@ fn isByRef(ty: Type) bool { } const LowerFnRetTyBuffer = struct { + names: [1][]const u8, types: [1]Type, values: [1]Value, - payload: Type.Payload.Tuple, + payload: Type.Payload.AnonStruct, }; fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, target: std.Target) Type { if (ret_ty.zigTypeTag() == .NoReturn) return Type.initTag(.noreturn); if (lowersToArray(ret_ty, target)) { + buffer.names = [1][]const u8{"array"}; buffer.types = [1]Type{ret_ty}; buffer.values = [1]Value{Value.initTag(.unreachable_value)}; buffer.payload = .{ .data = .{ + .names = &buffer.names, .types = &buffer.types, .values = &buffer.values, } }; @@ -7086,7 +7313,7 @@ fn die(f: *Function, inst: Air.Inst.Index, ref: Air.Inst.Ref) !void { if (f.air.instructions.items(.tag)[ref_inst] == .constant) return; const c_value = (f.value_map.fetchRemove(ref) orelse return).value; const local_index = switch (c_value) { - .local => |l| l, + .local, .new_local => |l| l, else => return, }; try freeLocal(f, inst, local_index, ref_inst); @@ -7161,8 +7388,8 @@ fn deinitFreeLocalsMap(gpa: mem.Allocator, map: *LocalsMap) void { } fn noticeBranchFrees(f: *Function, pre_locals_len: LocalIndex, inst: Air.Inst.Index) !void { - for (f.locals.items[pre_locals_len..], 0..) |*local, local_offset| { - const local_index = pre_locals_len + @intCast(LocalIndex, local_offset); + for (f.locals.items[pre_locals_len..], pre_locals_len..) |*local, local_i| { + const local_index = @intCast(LocalIndex, local_i); if (f.allocs.contains(local_index)) continue; // allocs are not freeable // free more deeply nested locals from other branches at current depth diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index ad482024b7..d6424b1f27 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -110,10 +110,16 @@ pub const CType = extern union { pointer_const_volatile, array, vector, + fwd_anon_struct, + fwd_anon_union, fwd_struct, fwd_union, + unnamed_struct, + unnamed_union, + packed_unnamed_struct, + packed_unnamed_union, anon_struct, - packed_anon_struct, + anon_union, @"struct", @"union", packed_struct, @@ -183,14 +189,22 @@ pub const CType = extern union { .vector, => Payload.Sequence, + .fwd_anon_struct, + .fwd_anon_union, + => Payload.Fields, + .fwd_struct, .fwd_union, => Payload.FwdDecl, - .anon_struct, - .packed_anon_struct, - => Payload.Fields, + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + => Payload.Unnamed, + .anon_struct, + .anon_union, .@"struct", .@"union", .packed_struct, @@ -229,14 +243,55 @@ pub const CType = extern union { base: Payload, data: Data, - const Data = []const Field; - const Field = struct { + pub const Data = []const Field; + pub const Field = struct { name: [*:0]const u8, type: Index, - alignas: u32, + alignas: AlignAs, + }; + pub const AlignAs = struct { + @"align": std.math.Log2Int(u32), + abi: std.math.Log2Int(u32), + + pub fn init(alignment: u32, abi_alignment: u32) AlignAs { + assert(std.math.isPowerOfTwo(alignment)); + assert(std.math.isPowerOfTwo(abi_alignment)); + return .{ + .@"align" = std.math.log2_int(u32, alignment), + .abi = std.math.log2_int(u32, abi_alignment), + }; + } + pub fn abiAlign(ty: Type, target: Target) AlignAs { + const abi_align = ty.abiAlignment(target); + return init(abi_align, abi_align); + } + pub fn fieldAlign(struct_ty: Type, field_i: usize, target: Target) AlignAs { + return init( + struct_ty.structFieldAlign(field_i, target), + struct_ty.structFieldType(field_i).abiAlignment(target), + ); + } + pub fn unionPayloadAlign(union_ty: Type, target: Target) AlignAs { + const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_payload_align = union_obj.abiAlignment(target, false); + return init(union_payload_align, union_payload_align); + } + + pub fn getAlign(self: AlignAs) u32 { + return @as(u32, 1) << self.@"align"; + } }; }; + pub const Unnamed = struct { + base: Payload, + data: struct { + fields: Fields.Data, + owner_decl: Module.Decl.Index, + id: u32, + }, + }; + pub const Aggregate = struct { base: Payload, data: struct { @@ -259,22 +314,23 @@ pub const CType = extern union { arena: std.heap.ArenaAllocator.State = .{}, set: Set = .{}, - const Set = struct { - const Map = std.ArrayHashMapUnmanaged(CType, void, HashContext32, true); + pub const Set = struct { + pub const Map = std.ArrayHashMapUnmanaged(CType, void, HashContext32, true); map: Map = .{}, - fn indexToCType(self: Set, index: Index) CType { + pub fn indexToCType(self: Set, index: Index) CType { if (index < Tag.no_payload_count) return initTag(@intToEnum(Tag, index)); return self.map.keys()[index - Tag.no_payload_count]; } - fn indexToHash(self: Set, index: Index) Map.Hash { - if (index < Tag.no_payload_count) return self.indexToCType(index).hash(self); + pub fn indexToHash(self: Set, index: Index) Map.Hash { + if (index < Tag.no_payload_count) + return (HashContext32{ .store = &self }).hash(self.indexToCType(index)); return self.map.entries.items(.hash)[index - Tag.no_payload_count]; } - fn typeToIndex(self: Set, ty: Type, target: Target, kind: Kind) ?Index { + pub fn typeToIndex(self: Set, ty: Type, target: Target, kind: Kind) ?Index { const lookup = Convert.Lookup{ .imm = .{ .set = &self, .target = target } }; var convert: Convert = undefined; @@ -298,21 +354,27 @@ pub const CType = extern union { return self.arena.child_allocator; } - fn cTypeToIndex(self: *Promoted, cty: CType) Allocator.Error!Index { + pub fn cTypeToIndex(self: *Promoted, cty: CType) Allocator.Error!Index { const t = cty.tag(); if (@enumToInt(t) < Tag.no_payload_count) return @intCast(Index, @enumToInt(t)); const gop = try self.set.map.getOrPutContext(self.gpa(), cty, .{ .store = &self.set }); if (!gop.found_existing) gop.key_ptr.* = cty; if (std.debug.runtime_safety) { - const key = self.set.map.entries.items(.key)[gop.index]; - assert(key.eql(cty)); + const key = &self.set.map.entries.items(.key)[gop.index]; + assert(key == gop.key_ptr); + assert(cty.eql(key.*)); assert(cty.hash(self.set) == key.hash(self.set)); } return @intCast(Index, Tag.no_payload_count + gop.index); } - fn typeToIndex(self: *Promoted, ty: Type, mod: *Module, kind: Kind) Allocator.Error!Index { + pub fn typeToIndex( + self: *Promoted, + ty: Type, + mod: *Module, + kind: Kind, + ) Allocator.Error!Index { const lookup = Convert.Lookup{ .mut = .{ .promoted = self, .mod = mod } }; var convert: Convert = undefined; @@ -337,9 +399,10 @@ pub const CType = extern union { .lookup = lookup.freeze(), .convert = &convert, }; - const key = self.set.map.entries.items(.key)[gop.index]; - assert(adapter.eql(ty, key)); - assert(adapter.hash(ty) == key.hash(self.set)); + const cty = &self.set.map.entries.items(.key)[gop.index]; + assert(cty == gop.key_ptr); + assert(adapter.eql(ty, cty.*)); + assert(adapter.hash(ty) == cty.hash(self.set)); } return @intCast(Index, Tag.no_payload_count + gop.index); } @@ -358,21 +421,25 @@ pub const CType = extern union { return self.set.indexToCType(index); } + pub fn indexToHash(self: Store, index: Index) Set.Map.Hash { + return self.set.indexToHash(index); + } + pub fn cTypeToIndex(self: *Store, gpa: Allocator, cty: CType) !Index { var promoted = self.promote(gpa); defer self.demote(promoted); return promoted.cTypeToIndex(cty); } - pub fn typeToCType(self: *Store, gpa: Allocator, ty: Type, mod: *Module) !CType { - const idx = try self.typeToIndex(gpa, ty, mod); + pub fn typeToCType(self: *Store, gpa: Allocator, ty: Type, mod: *Module, kind: Kind) !CType { + const idx = try self.typeToIndex(gpa, ty, mod, kind); return self.indexToCType(idx); } - pub fn typeToIndex(self: *Store, gpa: Allocator, ty: Type, mod: *Module) !Index { + pub fn typeToIndex(self: *Store, gpa: Allocator, ty: Type, mod: *Module, kind: Kind) !Index { var promoted = self.promote(gpa); defer self.demote(promoted); - return promoted.typeToIndex(ty, mod, .complete); + return promoted.typeToIndex(ty, mod, kind); } pub fn clearRetainingCapacity(self: *Store, gpa: Allocator) void { @@ -389,8 +456,16 @@ pub const CType = extern union { _ = promoted.arena.reset(.free_all); } - pub fn shrinkToFit(self: *Store, gpa: Allocator) void { - self.set.map.shrinkAndFree(gpa, self.set.map.count()); + pub fn shrinkRetainingCapacity(self: *Store, gpa: Allocator, new_len: usize) void { + self.set.map.shrinkRetainingCapacity(gpa, new_len); + } + + pub fn shrinkAndFree(self: *Store, gpa: Allocator, new_len: usize) void { + self.set.map.shrinkAndFree(gpa, new_len); + } + + pub fn count(self: Store) usize { + return self.set.map.count(); } pub fn move(self: *Store) Store { @@ -407,7 +482,37 @@ pub const CType = extern union { } }; + pub fn isPacked(self: CType) bool { + return switch (self.tag()) { + else => false, + .packed_unnamed_struct, + .packed_unnamed_union, + .packed_struct, + .packed_union, + => true, + }; + } + + pub fn fields(self: CType) Payload.Fields.Data { + return if (self.cast(Payload.Aggregate)) |pl| + pl.data.fields + else if (self.cast(Payload.Unnamed)) |pl| + pl.data.fields + else if (self.cast(Payload.Fields)) |pl| + pl.data + else + unreachable; + } + pub fn eql(lhs: CType, rhs: CType) bool { + return lhs.eqlContext(rhs, struct { + pub fn eqlIndex(_: @This(), lhs_idx: Index, rhs_idx: Index) bool { + return lhs_idx == rhs_idx; + } + }{}); + } + + pub fn eqlContext(lhs: CType, rhs: CType, ctx: anytype) bool { // As a shortcut, if the small tags / addresses match, we're done. if (lhs.tag_if_small_enough == rhs.tag_if_small_enough) return true; @@ -458,35 +563,52 @@ pub const CType = extern union { .pointer_const, .pointer_volatile, .pointer_const_volatile, - => lhs.cast(Payload.Child).?.data == rhs.cast(Payload.Child).?.data, + => ctx.eqlIndex(lhs.cast(Payload.Child).?.data, rhs.cast(Payload.Child).?.data), .array, .vector, - => std.meta.eql(lhs.cast(Payload.Sequence).?.data, rhs.cast(Payload.Sequence).?.data), - - .fwd_struct, - .fwd_union, - => lhs.cast(Payload.FwdDecl).?.data == rhs.cast(Payload.FwdDecl).?.data, + => { + const lhs_data = lhs.cast(Payload.Sequence).?.data; + const rhs_data = rhs.cast(Payload.Sequence).?.data; + return lhs_data.len == rhs_data.len and + ctx.eqlIndex(lhs_data.elem_type, rhs_data.elem_type); + }, - .anon_struct, - .packed_anon_struct, + .fwd_anon_struct, + .fwd_anon_union, => { const lhs_data = lhs.cast(Payload.Fields).?.data; const rhs_data = rhs.cast(Payload.Fields).?.data; if (lhs_data.len != rhs_data.len) return false; for (lhs_data, rhs_data) |lhs_field, rhs_field| { - if (lhs_field.type != rhs_field.type) return false; - if (lhs_field.alignas != rhs_field.alignas) return false; + if (!ctx.eqlIndex(lhs_field.type, rhs_field.type)) return false; + if (lhs_field.alignas.@"align" != rhs_field.alignas.@"align") return false; if (cstr.cmp(lhs_field.name, rhs_field.name) != 0) return false; } return true; }, + .fwd_struct, + .fwd_union, + => lhs.cast(Payload.FwdDecl).?.data == rhs.cast(Payload.FwdDecl).?.data, + + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + => { + const lhs_data = lhs.cast(Payload.Unnamed).?.data; + const rhs_data = rhs.cast(Payload.Unnamed).?.data; + return lhs_data.owner_decl == rhs_data.owner_decl and lhs_data.id == rhs_data.id; + }, + + .anon_struct, + .anon_union, .@"struct", .@"union", .packed_struct, .packed_union, - => std.meta.eql( + => ctx.eqlIndex( lhs.cast(Payload.Aggregate).?.data.fwd_decl, rhs.cast(Payload.Aggregate).?.data.fwd_decl, ), @@ -496,10 +618,10 @@ pub const CType = extern union { => { const lhs_data = lhs.cast(Payload.Function).?.data; const rhs_data = rhs.cast(Payload.Function).?.data; - if (lhs_data.return_type != rhs_data.return_type) return false; if (lhs_data.param_types.len != rhs_data.param_types.len) return false; - for (lhs_data.param_types, rhs_data.param_types) |lhs_param_cty, rhs_param_cty| { - if (lhs_param_cty != rhs_param_cty) return false; + if (!ctx.eqlIndex(lhs_data.return_type, rhs_data.return_type)) return false; + for (lhs_data.param_types, rhs_data.param_types) |lhs_param_idx, rhs_param_idx| { + if (!ctx.eqlIndex(lhs_param_idx, rhs_param_idx)) return false; } return true; }, @@ -568,18 +690,30 @@ pub const CType = extern union { store.indexToCType(data.elem_type).updateHasher(hasher, store); }, + .fwd_anon_struct, + .fwd_anon_union, + => for (self.cast(Payload.Fields).?.data) |field| { + store.indexToCType(field.type).updateHasher(hasher, store); + hasher.update(mem.span(field.name)); + autoHash(hasher, field.alignas.@"align"); + }, + .fwd_struct, .fwd_union, => autoHash(hasher, self.cast(Payload.FwdDecl).?.data), - .anon_struct, - .packed_anon_struct, - => for (self.cast(Payload.Fields).?.data) |field| { - store.indexToCType(field.type).updateHasher(hasher, store); - hasher.update(mem.span(field.name)); - autoHash(hasher, field.alignas); + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + => { + const data = self.cast(Payload.Unnamed).?.data; + autoHash(hasher, data.owner_decl); + autoHash(hasher, data.id); }, + .anon_struct, + .anon_union, .@"struct", .@"union", .packed_struct, @@ -599,7 +733,7 @@ pub const CType = extern union { } } - pub const Kind = enum { forward, complete, global, parameter }; + pub const Kind = enum { forward, forward_parameter, complete, global, parameter, payload }; const Convert = struct { storage: union { @@ -609,9 +743,11 @@ pub const CType = extern union { fwd: Payload.FwdDecl, anon: struct { fields: [2]Payload.Fields.Field, - pl: Payload.Fields, + pl: union { + forward: Payload.Fields, + complete: Payload.Aggregate, + }, }, - agg: Payload.Aggregate, }, value: union(enum) { tag: Tag, @@ -716,6 +852,66 @@ pub const CType = extern union { } }; + fn sortFields(self: *@This(), fields_len: usize) []Payload.Fields.Field { + const Field = Payload.Fields.Field; + const slice = self.storage.anon.fields[0..fields_len]; + std.sort.sort(Field, slice, {}, struct { + fn before(_: void, lhs: Field, rhs: Field) bool { + return lhs.alignas.@"align" > rhs.alignas.@"align"; + } + }.before); + return slice; + } + + fn initAnon(self: *@This(), kind: Kind, fwd_idx: Index, fields_len: usize) void { + switch (kind) { + .forward, .forward_parameter => { + self.storage.anon.pl = .{ .forward = .{ + .base = .{ .tag = .fwd_anon_struct }, + .data = self.sortFields(fields_len), + } }; + self.value = .{ .cty = initPayload(&self.storage.anon.pl.forward) }; + }, + .complete, .parameter, .global => { + self.storage.anon.pl = .{ .complete = .{ + .base = .{ .tag = .anon_struct }, + .data = .{ + .fields = self.sortFields(fields_len), + .fwd_decl = fwd_idx, + }, + } }; + self.value = .{ .cty = initPayload(&self.storage.anon.pl.complete) }; + }, + .payload => unreachable, + } + } + + fn initArrayParameter(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void { + if (switch (kind) { + .forward_parameter => @as(Index, undefined), + .parameter => try lookup.typeToIndex(ty, .forward_parameter), + .forward, .complete, .global, .payload => unreachable, + }) |fwd_idx| { + if (try lookup.typeToIndex(ty, switch (kind) { + .forward_parameter => .forward, + .parameter => .complete, + .forward, .complete, .global, .payload => unreachable, + })) |array_idx| { + self.storage = .{ .anon = undefined }; + self.storage.anon.fields[0] = .{ + .name = "array", + .type = array_idx, + .alignas = Payload.Fields.AlignAs.abiAlign(ty, lookup.getTarget()), + }; + self.initAnon(kind, fwd_idx, 1); + } else self.init(switch (kind) { + .forward_parameter => .fwd_anon_struct, + .parameter => .anon_struct, + .forward, .complete, .global, .payload => unreachable, + }); + } else self.init(.anon_struct); + } + pub fn initType(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void { const target = lookup.getTarget(); @@ -739,17 +935,23 @@ pub const CType = extern union { switch (t) { .void => unreachable, else => self.init(t), - .array => { - const abi_size = ty.abiSize(target); - const abi_align = ty.abiAlignment(target); - self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{ - .len = @divExact(abi_size, abi_align), - .elem_type = tagFromIntInfo( - .unsigned, - @intCast(u16, abi_align * 8), - ).toIndex(), - } } }; - self.value = .{ .cty = initPayload(&self.storage.seq) }; + .array => switch (kind) { + .forward, .complete, .global => { + const abi_size = ty.abiSize(target); + const abi_align = ty.abiAlignment(target); + self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{ + .len = @divExact(abi_size, abi_align), + .elem_type = tagFromIntInfo( + .unsigned, + @intCast(u16, abi_align * 8), + ).toIndex(), + } } }; + self.value = .{ .cty = initPayload(&self.storage.seq) }; + }, + .forward_parameter, + .parameter, + => try self.initArrayParameter(ty, kind, lookup), + .payload => unreachable, }, } }, @@ -782,165 +984,297 @@ pub const CType = extern union { else => unreachable, }), - .Pointer => switch (ty.ptrSize()) { - .Slice => { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf); - if (try lookup.typeToIndex(ptr_ty, kind)) |ptr_idx| { - self.storage = .{ .anon = .{ .fields = .{ - .{ - .name = "ptr", - .type = ptr_idx, - .alignas = ptr_ty.abiAlignment(target), + .Pointer => { + const info = ty.ptrInfo().data; + switch (info.size) { + .Slice => { + if (switch (kind) { + .forward, .forward_parameter => @as(Index, undefined), + .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward), + .payload => unreachable, + }) |fwd_idx| { + var buf: Type.SlicePtrFieldTypeBuffer = undefined; + const ptr_ty = ty.slicePtrFieldType(&buf); + if (try lookup.typeToIndex(ptr_ty, kind)) |ptr_idx| { + self.storage = .{ .anon = undefined }; + self.storage.anon.fields[0] = .{ + .name = "ptr", + .type = ptr_idx, + .alignas = Payload.Fields.AlignAs.abiAlign(ptr_ty, target), + }; + self.storage.anon.fields[1] = .{ + .name = "len", + .type = Tag.uintptr_t.toIndex(), + .alignas = Payload.Fields.AlignAs.abiAlign(Type.usize, target), + }; + self.initAnon(kind, fwd_idx, 2); + } else self.init(switch (kind) { + .forward, .forward_parameter => .fwd_anon_struct, + .complete, .parameter, .global => .anon_struct, + .payload => unreachable, + }); + } else self.init(.anon_struct); + }, + + .One, .Many, .C => { + const t: Tag = switch (info.@"volatile") { + false => switch (info.mutable) { + true => .pointer, + false => .pointer_const, }, - .{ - .name = "len", - .type = Tag.size_t.toIndex(), - .alignas = Type.usize.abiAlignment(target), + true => switch (info.mutable) { + true => .pointer_volatile, + false => .pointer_const_volatile, }, - }, .pl = undefined } }; - self.storage.anon.pl = .{ - .base = .{ .tag = .anon_struct }, - .data = self.storage.anon.fields[0..2], }; - self.value = .{ .cty = initPayload(&self.storage.anon.pl) }; - } else self.init(.anon_struct); - }, - .One, .Many, .C => { - const t: Tag = switch (ty.isVolatilePtr()) { - false => switch (ty.isConstPtr()) { - false => .pointer, - true => .pointer_const, - }, - true => switch (ty.isConstPtr()) { - false => .pointer_volatile, - true => .pointer_const_volatile, - }, - }; - if (try lookup.typeToIndex(ty.childType(), .forward)) |child_idx| { - self.storage = .{ .child = .{ .base = .{ .tag = t }, .data = child_idx } }; - self.value = .{ .cty = initPayload(&self.storage.child) }; - } else self.init(t); - }, + var host_int_pl = Type.Payload.Bits{ + .base = .{ .tag = .int_unsigned }, + .data = info.host_size * 8, + }; + const pointee_ty = if (info.host_size > 0) + Type.initPayload(&host_int_pl.base) + else + info.pointee_type; + + if (if (info.size == .C and pointee_ty.tag() == .u8) + Tag.char.toIndex() + else + try lookup.typeToIndex(pointee_ty, .forward)) |child_idx| + { + self.storage = .{ .child = .{ + .base = .{ .tag = t }, + .data = child_idx, + } }; + self.value = .{ .cty = initPayload(&self.storage.child) }; + } else self.init(t); + }, + } }, - .Struct, .Union => |zig_tag| if (ty.isTupleOrAnonStruct()) { + .Struct, .Union => |zig_tag| if (ty.containerLayout() == .Packed) { + if (ty.castTag(.@"struct")) |struct_obj| { + try self.initType(struct_obj.data.backing_int_ty, kind, lookup); + } else { + var buf: Type.Payload.Bits = .{ + .base = .{ .tag = .int_unsigned }, + .data = @intCast(u16, ty.bitSize(target)), + }; + try self.initType(Type.initPayload(&buf.base), kind, lookup); + } + } else if (ty.isTupleOrAnonStruct()) { if (lookup.isMutable()) { for (0..ty.structFieldCount()) |field_i| { const field_ty = ty.structFieldType(field_i); if (ty.structFieldIsComptime(field_i) or !field_ty.hasRuntimeBitsIgnoreComptime()) continue; _ = try lookup.typeToIndex(field_ty, switch (kind) { - .forward, .complete, .parameter => .complete, + .forward, .forward_parameter => .forward, + .complete, .parameter => .complete, .global => .global, + .payload => unreachable, }); } + switch (kind) { + .forward, .forward_parameter => {}, + .complete, .parameter, .global => _ = try lookup.typeToIndex(ty, .forward), + .payload => unreachable, + } } - self.init(.anon_struct); + self.init(switch (kind) { + .forward, .forward_parameter => .fwd_anon_struct, + .complete, .parameter, .global => .anon_struct, + .payload => unreachable, + }); } else { - const is_struct = zig_tag == .Struct or ty.unionTagTypeSafety() != null; + const tag_ty = ty.unionTagTypeSafety(); + const is_tagged_union_wrapper = kind != .payload and tag_ty != null; + const is_struct = zig_tag == .Struct or is_tagged_union_wrapper; switch (kind) { - .forward => { + .forward, .forward_parameter => { self.storage = .{ .fwd = .{ .base = .{ .tag = if (is_struct) .fwd_struct else .fwd_union }, .data = ty.getOwnerDecl(), } }; self.value = .{ .cty = initPayload(&self.storage.fwd) }; }, - else => { - if (lookup.isMutable()) { - for (0..switch (zig_tag) { - .Struct => ty.structFieldCount(), - .Union => ty.cast(Type.Payload.Union).?.data.fields.count(), - else => unreachable, - }) |field_i| { - const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + .complete, .parameter, .global, .payload => if (is_tagged_union_wrapper) { + const fwd_idx = try lookup.typeToIndex(ty, .forward); + const payload_idx = try lookup.typeToIndex(ty, .payload); + const tag_idx = try lookup.typeToIndex(tag_ty.?, kind); + if (fwd_idx != null and payload_idx != null and tag_idx != null) { + self.storage = .{ .anon = undefined }; + var field_count: usize = 0; + if (payload_idx != Tag.void.toIndex()) { + self.storage.anon.fields[field_count] = .{ + .name = "payload", + .type = payload_idx.?, + .alignas = Payload.Fields.AlignAs.unionPayloadAlign(ty, target), + }; + field_count += 1; + } + if (tag_idx != Tag.void.toIndex()) { + self.storage.anon.fields[field_count] = .{ + .name = "tag", + .type = tag_idx.?, + .alignas = Payload.Fields.AlignAs.abiAlign(tag_ty.?, target), + }; + field_count += 1; + } + self.storage.anon.pl = .{ .complete = .{ + .base = .{ .tag = .@"struct" }, + .data = .{ + .fields = self.sortFields(field_count), + .fwd_decl = fwd_idx.?, + }, + } }; + self.value = .{ .cty = initPayload(&self.storage.anon.pl.complete) }; + } else self.init(.@"struct"); + } else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes()) { + self.init(.void); + } else { + var is_packed = false; + for (0..switch (zig_tag) { + .Struct => ty.structFieldCount(), + .Union => ty.unionFields().count(), + else => unreachable, + }) |field_i| { + const field_ty = ty.structFieldType(field_i); + if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + + const field_align = Payload.Fields.AlignAs.fieldAlign( + ty, + field_i, + target, + ); + if (field_align.@"align" < field_align.abi) { + is_packed = true; + if (!lookup.isMutable()) break; + } + + if (lookup.isMutable()) { _ = try lookup.typeToIndex(field_ty, switch (kind) { - .forward => unreachable, - .complete, .parameter => .complete, + .forward, .forward_parameter => unreachable, + .complete, .parameter, .payload => .complete, .global => .global, }); } - _ = try lookup.typeToIndex(ty, .forward); } - self.init(if (is_struct) .@"struct" else .@"union"); + switch (kind) { + .forward, .forward_parameter => unreachable, + .complete, .parameter, .global => { + _ = try lookup.typeToIndex(ty, .forward); + self.init(if (is_struct) + if (is_packed) .packed_struct else .@"struct" + else if (is_packed) .packed_union else .@"union"); + }, + .payload => self.init(if (is_packed) + .packed_unnamed_union + else + .unnamed_union), + } }, } }, .Array, .Vector => |zig_tag| { - const t: Tag = switch (zig_tag) { - .Array => .array, - .Vector => .vector, - else => unreachable, - }; - if (try lookup.typeToIndex(ty.childType(), kind)) |child_idx| { - self.storage = .{ .seq = .{ .base = .{ .tag = t }, .data = .{ - .len = ty.arrayLenIncludingSentinel(), - .elem_type = child_idx, - } } }; - self.value = .{ .cty = initPayload(&self.storage.seq) }; - } else self.init(t); + switch (kind) { + .forward, .complete, .global => { + const t: Tag = switch (zig_tag) { + .Array => .array, + .Vector => .vector, + else => unreachable, + }; + if (try lookup.typeToIndex(ty.childType(), kind)) |child_idx| { + self.storage = .{ .seq = .{ .base = .{ .tag = t }, .data = .{ + .len = ty.arrayLenIncludingSentinel(), + .elem_type = child_idx, + } } }; + self.value = .{ .cty = initPayload(&self.storage.seq) }; + } else self.init(t); + }, + .forward_parameter, .parameter => try self.initArrayParameter(ty, kind, lookup), + .payload => unreachable, + } }, .Optional => { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); if (payload_ty.hasRuntimeBitsIgnoreComptime()) { - if (ty.optionalReprIsPayload()) - try self.initType(payload_ty, kind, lookup) - else if (try lookup.typeToIndex(payload_ty, kind)) |payload_idx| { - self.storage = .{ .anon = .{ .fields = .{ - .{ + if (ty.optionalReprIsPayload()) { + try self.initType(payload_ty, kind, lookup); + } else if (switch (kind) { + .forward, .forward_parameter => @as(Index, undefined), + .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward), + .payload => unreachable, + }) |fwd_idx| { + if (try lookup.typeToIndex(payload_ty, switch (kind) { + .forward, .forward_parameter => .forward, + .complete, .parameter => .complete, + .global => .global, + .payload => unreachable, + })) |payload_idx| { + self.storage = .{ .anon = undefined }; + self.storage.anon.fields[0] = .{ .name = "payload", .type = payload_idx, - .alignas = payload_ty.abiAlignment(target), - }, - .{ + .alignas = Payload.Fields.AlignAs.abiAlign(payload_ty, target), + }; + self.storage.anon.fields[1] = .{ .name = "is_null", .type = Tag.bool.toIndex(), - .alignas = Type.bool.abiAlignment(target), - }, - }, .pl = undefined } }; - self.storage.anon.pl = .{ - .base = .{ .tag = .anon_struct }, - .data = self.storage.anon.fields[0..2], - }; - self.value = .{ .cty = initPayload(&self.storage.anon.pl) }; + .alignas = Payload.Fields.AlignAs.abiAlign(Type.bool, target), + }; + self.initAnon(kind, fwd_idx, 2); + } else self.init(switch (kind) { + .forward, .forward_parameter => .fwd_anon_struct, + .complete, .parameter, .global => .anon_struct, + .payload => unreachable, + }); } else self.init(.anon_struct); } else self.init(.bool); }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); - if (try lookup.typeToIndex(payload_ty, switch (kind) { - .forward, .complete, .parameter => .complete, - .global => .global, - })) |payload_idx| { - const error_ty = ty.errorUnionSet(); - if (payload_idx == Tag.void.toIndex()) - try self.initType(error_ty, kind, lookup) - else if (try lookup.typeToIndex(error_ty, kind)) |error_idx| { - self.storage = .{ .anon = .{ .fields = .{ - .{ + if (switch (kind) { + .forward, .forward_parameter => @as(Index, undefined), + .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward), + .payload => unreachable, + }) |fwd_idx| { + const payload_ty = ty.errorUnionPayload(); + if (try lookup.typeToIndex(payload_ty, switch (kind) { + .forward, .forward_parameter => .forward, + .complete, .parameter => .complete, + .global => .global, + .payload => unreachable, + })) |payload_idx| { + const error_ty = ty.errorUnionSet(); + if (payload_idx == Tag.void.toIndex()) { + try self.initType(error_ty, kind, lookup); + } else if (try lookup.typeToIndex(error_ty, kind)) |error_idx| { + self.storage = .{ .anon = undefined }; + self.storage.anon.fields[0] = .{ .name = "payload", .type = payload_idx, - .alignas = payload_ty.abiAlignment(target), - }, - .{ + .alignas = Payload.Fields.AlignAs.abiAlign(payload_ty, target), + }; + self.storage.anon.fields[1] = .{ .name = "error", .type = error_idx, - .alignas = error_ty.abiAlignment(target), - }, - }, .pl = undefined } }; - self.storage.anon.pl = .{ - .base = .{ .tag = .anon_struct }, - .data = self.storage.anon.fields[0..2], - }; - self.value = .{ .cty = initPayload(&self.storage.anon.pl) }; - } else self.init(.anon_struct); + .alignas = Payload.Fields.AlignAs.abiAlign(error_ty, target), + }; + self.initAnon(kind, fwd_idx, 2); + } else self.init(switch (kind) { + .forward, .forward_parameter => .fwd_anon_struct, + .complete, .parameter, .global => .anon_struct, + .payload => unreachable, + }); + } else self.init(switch (kind) { + .forward, .forward_parameter => .fwd_anon_struct, + .complete, .parameter, .global => .anon_struct, + .payload => unreachable, + }); } else self.init(.anon_struct); }, @@ -959,16 +1293,15 @@ pub const CType = extern union { .Fn => { const info = ty.fnInfo(); if (lookup.isMutable()) { - _ = try lookup.typeToIndex(info.return_type, switch (kind) { - .forward => .forward, - .complete, .parameter, .global => .complete, - }); + const param_kind: Kind = switch (kind) { + .forward, .forward_parameter => .forward_parameter, + .complete, .parameter, .global => .parameter, + .payload => unreachable, + }; + _ = try lookup.typeToIndex(info.return_type, param_kind); for (info.param_types) |param_type| { if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; - _ = try lookup.typeToIndex(param_type, switch (kind) { - .forward => .forward, - .complete, .parameter, .global => unreachable, - }); + _ = try lookup.typeToIndex(param_type, param_kind); } } self.init(if (info.is_var_args) .varargs_function else .function); @@ -977,16 +1310,33 @@ pub const CType = extern union { } }; - fn copyFields(arena: Allocator, fields: Payload.Fields.Data) !Payload.Fields.Data { - const new_fields = try arena.dupe(Payload.Fields.Field, fields); - for (new_fields) |*new_field| { - new_field.name = try arena.dupeZ(u8, mem.span(new_field.name)); - new_field.type = new_field.type; + pub fn copy(self: CType, arena: Allocator) !CType { + return self.copyContext(struct { + arena: Allocator, + pub fn copyIndex(_: @This(), idx: Index) Index { + return idx; + } + }{ .arena = arena }); + } + + fn copyFields(ctx: anytype, old_fields: Payload.Fields.Data) !Payload.Fields.Data { + const new_fields = try ctx.arena.alloc(Payload.Fields.Field, old_fields.len); + for (new_fields, old_fields) |*new_field, old_field| { + new_field.name = try ctx.arena.dupeZ(u8, mem.span(old_field.name)); + new_field.type = ctx.copyIndex(old_field.type); + new_field.alignas = old_field.alignas; } return new_fields; } - pub fn copy(self: CType, arena: Allocator) !CType { + fn copyParams(ctx: anytype, old_param_types: []const Index) ![]const Index { + const new_param_types = try ctx.arena.alloc(Index, old_param_types.len); + for (new_param_types, old_param_types) |*new_param_type, old_param_type| + new_param_type.* = ctx.copyIndex(old_param_type); + return new_param_types; + } + + pub fn copyContext(self: CType, ctx: anytype) !CType { switch (self.tag()) { .void, .char, @@ -1032,8 +1382,8 @@ pub const CType = extern union { .pointer_const_volatile, => { const pl = self.cast(Payload.Child).?; - const new_pl = try arena.create(Payload.Child); - new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = pl.data }; + const new_pl = try ctx.arena.create(Payload.Child); + new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = ctx.copyIndex(pl.data) }; return initPayload(new_pl); }, @@ -1041,48 +1391,62 @@ pub const CType = extern union { .vector, => { const pl = self.cast(Payload.Sequence).?; - const new_pl = try arena.create(Payload.Sequence); + const new_pl = try ctx.arena.create(Payload.Sequence); new_pl.* = .{ .base = .{ .tag = pl.base.tag }, - .data = .{ .len = pl.data.len, .elem_type = pl.data.elem_type }, + .data = .{ .len = pl.data.len, .elem_type = ctx.copyIndex(pl.data.elem_type) }, }; return initPayload(new_pl); }, - .fwd_struct, - .fwd_union, + .fwd_anon_struct, + .fwd_anon_union, => { - const pl = self.cast(Payload.FwdDecl).?; - const new_pl = try arena.create(Payload.FwdDecl); + const pl = self.cast(Payload.Fields).?; + const new_pl = try ctx.arena.create(Payload.Fields); new_pl.* = .{ .base = .{ .tag = pl.base.tag }, - .data = pl.data, + .data = try copyFields(ctx, pl.data), }; return initPayload(new_pl); }, - .anon_struct, - .packed_anon_struct, + .fwd_struct, + .fwd_union, => { - const pl = self.cast(Payload.Fields).?; - const new_pl = try arena.create(Payload.Fields); - new_pl.* = .{ - .base = .{ .tag = pl.base.tag }, - .data = try copyFields(arena, pl.data), - }; + const pl = self.cast(Payload.FwdDecl).?; + const new_pl = try ctx.arena.create(Payload.FwdDecl); + new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = pl.data }; + return initPayload(new_pl); + }, + + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + => { + const pl = self.cast(Payload.Unnamed).?; + const new_pl = try ctx.arena.create(Payload.Unnamed); + new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = .{ + .fields = try copyFields(ctx, pl.data.fields), + .owner_decl = pl.data.owner_decl, + .id = pl.data.id, + } }; return initPayload(new_pl); }, + .anon_struct, + .anon_union, .@"struct", .@"union", .packed_struct, .packed_union, => { const pl = self.cast(Payload.Aggregate).?; - const new_pl = try arena.create(Payload.Aggregate); + const new_pl = try ctx.arena.create(Payload.Aggregate); new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = .{ - .fields = try copyFields(arena, pl.data.fields), - .fwd_decl = pl.data.fwd_decl, + .fields = try copyFields(ctx, pl.data.fields), + .fwd_decl = ctx.copyIndex(pl.data.fwd_decl), } }; return initPayload(new_pl); }, @@ -1091,10 +1455,10 @@ pub const CType = extern union { .varargs_function, => { const pl = self.cast(Payload.Function).?; - const new_pl = try arena.create(Payload.Function); + const new_pl = try ctx.arena.create(Payload.Function); new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = .{ - .return_type = pl.data.return_type, - .param_types = try arena.dupe(Index, pl.data.param_types), + .return_type = ctx.copyIndex(pl.data.return_type), + .param_types = try copyParams(ctx, pl.data.param_types), } }; return initPayload(new_pl); }, @@ -1118,8 +1482,14 @@ pub const CType = extern union { switch (convert.value) { .cty => |c| return c.copy(arena), .tag => |t| switch (t) { + .fwd_anon_struct, + .fwd_anon_union, + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, .anon_struct, - .packed_anon_struct, + .anon_union, .@"struct", .@"union", .packed_struct, @@ -1149,31 +1519,44 @@ pub const CType = extern union { else arena.dupeZ(u8, ty.structFieldName(field_i)), .type = store.set.typeToIndex(field_ty, target, switch (kind) { - .forward, .complete, .parameter => .complete, + .forward, .forward_parameter => .forward, + .complete, .parameter => .complete, .global => .global, + .payload => unreachable, }).?, - .alignas = ty.structFieldAlign(field_i, target), + .alignas = Payload.Fields.AlignAs.fieldAlign(ty, field_i, target), }; c_field_i += 1; } - if (ty.isTupleOrAnonStruct()) { - const anon_pl = try arena.create(Payload.Fields); - anon_pl.* = .{ .base = .{ .tag = .anon_struct }, .data = fields_pl }; - return initPayload(anon_pl); - } + switch (t) { + .fwd_anon_struct => { + const anon_pl = try arena.create(Payload.Fields); + anon_pl.* = .{ .base = .{ .tag = t }, .data = fields_pl }; + return initPayload(anon_pl); + }, - const struct_pl = try arena.create(Payload.Aggregate); - struct_pl.* = .{ .base = .{ .tag = t }, .data = .{ - .fields = fields_pl, - .fwd_decl = store.set.typeToIndex(ty, target, .forward).?, - } }; - return initPayload(struct_pl); + .anon_struct, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => { + const struct_pl = try arena.create(Payload.Aggregate); + struct_pl.* = .{ .base = .{ .tag = t }, .data = .{ + .fields = fields_pl, + .fwd_decl = store.set.typeToIndex(ty, target, .forward).?, + } }; + return initPayload(struct_pl); + }, + + else => unreachable, + } }, .Union => { - const fields = ty.unionFields(); - const fields_len = fields.count(); + const union_fields = ty.unionFields(); + const fields_len = union_fields.count(); var c_fields_len: usize = 0; for (0..fields_len) |field_i| { @@ -1185,7 +1568,7 @@ pub const CType = extern union { const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len); var field_i: usize = 0; var c_field_i: usize = 0; - var field_it = fields.iterator(); + var field_it = union_fields.iterator(); while (field_it.next()) |field| { defer field_i += 1; if (!field.value_ptr.ty.hasRuntimeBitsIgnoreComptime()) continue; @@ -1193,21 +1576,35 @@ pub const CType = extern union { fields_pl[c_field_i] = .{ .name = try arena.dupeZ(u8, field.key_ptr.*), .type = store.set.typeToIndex(field.value_ptr.ty, target, switch (kind) { - .forward => unreachable, - .complete, .parameter => .complete, + .forward, .forward_parameter => unreachable, + .complete, .parameter, .payload => .complete, .global => .global, }).?, - .alignas = ty.structFieldAlign(field_i, target), + .alignas = Payload.Fields.AlignAs.fieldAlign(ty, field_i, target), }; c_field_i += 1; } - const union_pl = try arena.create(Payload.Aggregate); - union_pl.* = .{ .base = .{ .tag = t }, .data = .{ - .fields = fields_pl, - .fwd_decl = store.set.typeToIndex(ty, target, .forward).?, - } }; - return initPayload(union_pl); + switch (kind) { + .forward, .forward_parameter => unreachable, + .complete, .parameter, .global => { + const union_pl = try arena.create(Payload.Aggregate); + union_pl.* = .{ .base = .{ .tag = t }, .data = .{ + .fields = fields_pl, + .fwd_decl = store.set.typeToIndex(ty, target, .forward).?, + } }; + return initPayload(union_pl); + }, + .payload => if (ty.unionTagTypeSafety()) |_| { + const union_pl = try arena.create(Payload.Unnamed); + union_pl.* = .{ .base = .{ .tag = t }, .data = .{ + .fields = fields_pl, + .owner_decl = ty.getOwnerDecl(), + .id = 0, + } }; + return initPayload(union_pl); + } else unreachable, + } }, else => unreachable, @@ -1217,9 +1614,10 @@ pub const CType = extern union { .varargs_function, => { const info = ty.fnInfo(); - const recurse_kind: Kind = switch (kind) { - .forward => .forward, - .complete, .parameter, .global => unreachable, + const param_kind: Kind = switch (kind) { + .forward, .forward_parameter => .forward_parameter, + .complete, .parameter, .global => .parameter, + .payload => unreachable, }; var c_params_len: usize = 0; @@ -1232,13 +1630,13 @@ pub const CType = extern union { var c_param_i: usize = 0; for (info.param_types) |param_type| { if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; - params_pl[c_param_i] = store.set.typeToIndex(param_type, target, recurse_kind).?; + params_pl[c_param_i] = store.set.typeToIndex(param_type, target, param_kind).?; c_param_i += 1; } const fn_pl = try arena.create(Payload.Function); fn_pl.* = .{ .base = .{ .tag = t }, .data = .{ - .return_type = store.set.typeToIndex(info.return_type, target, recurse_kind).?, + .return_type = store.set.typeToIndex(info.return_type, target, param_kind).?, .param_types = params_pl, } }; return initPayload(fn_pl); @@ -1294,8 +1692,8 @@ pub const CType = extern union { const target = self.lookup.getTarget(); switch (t) { - .anon_struct, - .packed_anon_struct, + .fwd_anon_struct, + .fwd_anon_union, => { if (!ty.isTupleOrAnonStruct()) return false; @@ -1313,26 +1711,38 @@ pub const CType = extern union { const c_field = &c_fields[c_field_i]; c_field_i += 1; - if (!self.eqlRecurse( - ty.structFieldType(field_i), - c_field.type, - switch (self.kind) { - .forward, .complete, .parameter => .complete, - .global => .global, - }, - ) or !mem.eql( + if (!self.eqlRecurse(field_ty, c_field.type, switch (self.kind) { + .forward, .forward_parameter => .forward, + .complete, .parameter => .complete, + .global => .global, + .payload => unreachable, + }) or !mem.eql( u8, if (ty.isSimpleTuple()) std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable else ty.structFieldName(field_i), mem.span(c_field.name), - ) or ty.structFieldAlign(field_i, target) != c_field.alignas) - return false; + ) or Payload.Fields.AlignAs.fieldAlign(ty, field_i, target).@"align" != + c_field.alignas.@"align") return false; } return true; }, + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + => switch (self.kind) { + .forward, .forward_parameter, .complete, .parameter, .global => unreachable, + .payload => if (ty.unionTagTypeSafety()) |_| { + const data = cty.cast(Payload.Unnamed).?.data; + return ty.getOwnerDecl() == data.owner_decl and data.id == 0; + } else unreachable, + }, + + .anon_struct, + .anon_union, .@"struct", .@"union", .packed_struct, @@ -1350,19 +1760,27 @@ pub const CType = extern union { const info = ty.fnInfo(); const data = cty.cast(Payload.Function).?.data; - const recurse_kind: Kind = switch (self.kind) { - .forward => .forward, - .complete, .parameter, .global => unreachable, + const param_kind: Kind = switch (self.kind) { + .forward, .forward_parameter => .forward_parameter, + .complete, .parameter, .global => .parameter, + .payload => unreachable, }; - if (info.param_types.len != data.param_types.len or - !self.eqlRecurse(info.return_type, data.return_type, recurse_kind)) + if (!self.eqlRecurse(info.return_type, data.return_type, param_kind)) return false; - for (info.param_types, data.param_types) |param_ty, param_cty| { - if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue; - if (!self.eqlRecurse(param_ty, param_cty, recurse_kind)) return false; + + var c_param_i: usize = 0; + for (info.param_types) |param_type| { + if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + + if (c_param_i >= data.param_types.len) return false; + const param_cty = data.param_types[c_param_i]; + c_param_i += 1; + + if (!self.eqlRecurse(param_type, param_cty, param_kind)) + return false; } - return true; + return c_param_i == data.param_types.len; }, else => unreachable, @@ -1395,13 +1813,17 @@ pub const CType = extern union { const target = self.lookup.getTarget(); switch (t) { - .anon_struct, - .packed_anon_struct, + .fwd_anon_struct, + .fwd_anon_union, => { var name_buf: [ std.fmt.count("f{}", .{std.math.maxInt(usize)}) ]u8 = undefined; - for (0..ty.structFieldCount()) |field_i| { + for (0..switch (ty.zigTypeTag()) { + .Struct => ty.structFieldCount(), + .Union => ty.unionFields().count(), + else => unreachable, + }) |field_i| { const field_ty = ty.structFieldType(field_i); if (ty.structFieldIsComptime(field_i) or !field_ty.hasRuntimeBitsIgnoreComptime()) continue; @@ -1410,18 +1832,37 @@ pub const CType = extern union { hasher, ty.structFieldType(field_i), switch (self.kind) { - .forward, .complete, .parameter => .complete, + .forward, .forward_parameter => .forward, + .complete, .parameter => .complete, .global => .global, + .payload => unreachable, }, ); hasher.update(if (ty.isSimpleTuple()) std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable else ty.structFieldName(field_i)); - autoHash(hasher, ty.structFieldAlign(field_i, target)); + autoHash( + hasher, + Payload.Fields.AlignAs.fieldAlign(ty, field_i, target).@"align", + ); } }, + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + => switch (self.kind) { + .forward, .forward_parameter, .complete, .parameter, .global => unreachable, + .payload => if (ty.unionTagTypeSafety()) |_| { + autoHash(hasher, ty.getOwnerDecl()); + autoHash(hasher, @as(u32, 0)); + } else unreachable, + }, + + .anon_struct, + .anon_union, .@"struct", .@"union", .packed_struct, @@ -1432,15 +1873,16 @@ pub const CType = extern union { .varargs_function, => { const info = ty.fnInfo(); - const recurse_kind: Kind = switch (self.kind) { - .forward => .forward, - .complete, .parameter, .global => unreachable, + const param_kind: Kind = switch (self.kind) { + .forward, .forward_parameter => .forward_parameter, + .complete, .parameter, .global => .parameter, + .payload => unreachable, }; - self.updateHasherRecurse(hasher, info.return_type, recurse_kind); + self.updateHasherRecurse(hasher, info.return_type, param_kind); for (info.param_types) |param_type| { if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; - self.updateHasherRecurse(hasher, param_type, recurse_kind); + self.updateHasherRecurse(hasher, param_type, param_kind); } }, diff --git a/src/link/C.zig b/src/link/C.zig index 8eb6fe16af..262e4e4923 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -117,7 +117,7 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes .gpa = gpa, .module = module, .error_msg = null, - .decl_index = decl_index, + .decl_index = decl_index.toOptional(), .decl = module.declPtr(decl_index), .fwd_decl = fwd_decl.toManaged(gpa), .ctypes = ctypes.*, @@ -146,7 +146,7 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes code.* = function.object.code.moveToUnmanaged(); // Free excess allocated memory for this Decl. - ctypes.shrinkToFit(gpa); + ctypes.shrinkAndFree(gpa, ctypes.count()); lazy_fns.shrinkAndFree(gpa, lazy_fns.count()); fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len); code.shrinkAndFree(gpa, code.items.len); @@ -176,7 +176,7 @@ pub fn updateDecl(self: *C, module: *Module, decl_index: Module.Decl.Index) !voi .gpa = gpa, .module = module, .error_msg = null, - .decl_index = decl_index, + .decl_index = decl_index.toOptional(), .decl = decl, .fwd_decl = fwd_decl.toManaged(gpa), .ctypes = ctypes.*, @@ -204,7 +204,7 @@ pub fn updateDecl(self: *C, module: *Module, decl_index: Module.Decl.Index) !voi code.* = object.code.moveToUnmanaged(); // Free excess allocated memory for this Decl. - ctypes.shrinkToFit(gpa); + ctypes.shrinkAndFree(gpa, ctypes.count()); fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len); code.shrinkAndFree(gpa, code.items.len); } @@ -247,8 +247,8 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) const abi_define = abiDefine(comp); - // Covers defines, zig.h, ctypes, asm. - try f.all_buffers.ensureUnusedCapacity(gpa, 4); + // Covers defines, zig.h, ctypes, asm, lazy fwd, lazy code. + try f.all_buffers.ensureUnusedCapacity(gpa, 6); if (abi_define) |buf| f.appendBufAssumeCapacity(buf); f.appendBufAssumeCapacity(zig_h); @@ -258,15 +258,15 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) { var asm_buf = f.asm_buf.toManaged(gpa); - defer asm_buf.deinit(); - - try codegen.genGlobalAsm(module, &asm_buf); - - f.asm_buf = asm_buf.moveToUnmanaged(); - f.appendBufAssumeCapacity(f.asm_buf.items); + defer f.asm_buf = asm_buf.moveToUnmanaged(); + try codegen.genGlobalAsm(module, asm_buf.writer()); + f.appendBufAssumeCapacity(asm_buf.items); } - try self.flushErrDecls(&f); + const lazy_indices = f.all_buffers.items.len; + f.all_buffers.items.len += 2; + + try self.flushErrDecls(&f.lazy_db); // `CType`s, forward decls, and non-functions first. // Unlike other backends, the .c code we are emitting is order-dependent. Therefore @@ -295,6 +295,30 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) } } + { + // We need to flush lazy ctypes after flushing all decls but before flushing any decl ctypes. + assert(f.ctypes.count() == 0); + try self.flushCTypes(&f, .none, f.lazy_db.ctypes); + + var it = self.decl_table.iterator(); + while (it.next()) |entry| + try self.flushCTypes(&f, entry.key_ptr.toOptional(), entry.value_ptr.ctypes); + } + + { + f.all_buffers.items[lazy_indices + 0] = .{ + .iov_base = if (f.lazy_db.fwd_decl.items.len > 0) f.lazy_db.fwd_decl.items.ptr else "", + .iov_len = f.lazy_db.fwd_decl.items.len, + }; + f.file_size += f.lazy_db.fwd_decl.items.len; + + f.all_buffers.items[lazy_indices + 1] = .{ + .iov_base = if (f.lazy_db.code.items.len > 0) f.lazy_db.code.items.ptr else "", + .iov_len = f.lazy_db.code.items.len, + }; + f.file_size += f.lazy_db.code.items.len; + } + f.all_buffers.items[ctypes_index] = .{ .iov_base = if (f.ctypes_buf.items.len > 0) f.ctypes_buf.items.ptr else "", .iov_len = f.ctypes_buf.items.len, @@ -318,17 +342,17 @@ const Flush = struct { ctypes_map: std.ArrayListUnmanaged(codegen.CType.Index) = .{}, ctypes_buf: std.ArrayListUnmanaged(u8) = .{}, - err_decls: DeclBlock = .{}, - + lazy_db: DeclBlock = .{}, lazy_fns: LazyFns = .{}, asm_buf: std.ArrayListUnmanaged(u8) = .{}, + /// We collect a list of buffers to write, and write them all at once with pwritev 😎 all_buffers: std.ArrayListUnmanaged(std.os.iovec_const) = .{}, /// Keeps track of the total bytes of `all_buffers`. file_size: u64 = 0, - const LazyFns = std.AutoHashMapUnmanaged(codegen.LazyFnKey, DeclBlock); + const LazyFns = std.AutoHashMapUnmanaged(codegen.LazyFnKey, void); fn appendBufAssumeCapacity(f: *Flush, buf: []const u8) void { if (buf.len == 0) return; @@ -338,10 +362,9 @@ const Flush = struct { fn deinit(f: *Flush, gpa: Allocator) void { f.all_buffers.deinit(gpa); - var lazy_fns_it = f.lazy_fns.valueIterator(); - while (lazy_fns_it.next()) |db| db.deinit(gpa); + f.asm_buf.deinit(gpa); f.lazy_fns.deinit(gpa); - f.err_decls.deinit(gpa); + f.lazy_db.deinit(gpa); f.ctypes_buf.deinit(gpa); f.ctypes_map.deinit(gpa); f.ctypes.deinit(gpa); @@ -353,26 +376,106 @@ const FlushDeclError = error{ OutOfMemory, }; -fn flushCTypes(self: *C, f: *Flush, ctypes: codegen.CType.Store) FlushDeclError!void { - _ = self; - _ = f; - _ = ctypes; +fn flushCTypes( + self: *C, + f: *Flush, + decl_index: Module.Decl.OptionalIndex, + decl_ctypes: codegen.CType.Store, +) FlushDeclError!void { + const gpa = self.base.allocator; + const mod = self.base.options.module.?; + + const decl_ctypes_len = decl_ctypes.count(); + f.ctypes_map.clearRetainingCapacity(); + try f.ctypes_map.ensureTotalCapacity(gpa, decl_ctypes_len); + + var global_ctypes = f.ctypes.promote(gpa); + defer f.ctypes.demote(global_ctypes); + + var ctypes_buf = f.ctypes_buf.toManaged(gpa); + defer f.ctypes_buf = ctypes_buf.moveToUnmanaged(); + const writer = ctypes_buf.writer(); + + const slice = decl_ctypes.set.map.entries.slice(); + for (slice.items(.key), 0..) |decl_cty, decl_i| { + const Context = struct { + arena: Allocator, + ctypes_map: []codegen.CType.Index, + cached_hash: codegen.CType.Store.Set.Map.Hash, + idx: codegen.CType.Index, + + pub fn hash(ctx: @This(), _: codegen.CType) codegen.CType.Store.Set.Map.Hash { + return ctx.cached_hash; + } + pub fn eql(ctx: @This(), lhs: codegen.CType, rhs: codegen.CType, _: usize) bool { + return lhs.eqlContext(rhs, ctx); + } + pub fn eqlIndex( + ctx: @This(), + lhs_idx: codegen.CType.Index, + rhs_idx: codegen.CType.Index, + ) bool { + if (lhs_idx < codegen.CType.Tag.no_payload_count or + rhs_idx < codegen.CType.Tag.no_payload_count) return lhs_idx == rhs_idx; + const lhs_i = lhs_idx - codegen.CType.Tag.no_payload_count; + if (lhs_i >= ctx.ctypes_map.len) return false; + return ctx.ctypes_map[lhs_i] == rhs_idx; + } + pub fn copyIndex(ctx: @This(), idx: codegen.CType.Index) codegen.CType.Index { + if (idx < codegen.CType.Tag.no_payload_count) return idx; + return ctx.ctypes_map[idx - codegen.CType.Tag.no_payload_count]; + } + }; + const decl_idx = @intCast(codegen.CType.Index, codegen.CType.Tag.no_payload_count + decl_i); + const ctx = Context{ + .arena = global_ctypes.arena.allocator(), + .ctypes_map = f.ctypes_map.items, + .cached_hash = decl_ctypes.indexToHash(decl_idx), + .idx = decl_idx, + }; + const gop = try global_ctypes.set.map.getOrPutContextAdapted(gpa, decl_cty, ctx, .{ + .store = &global_ctypes.set, + }); + const global_idx = + @intCast(codegen.CType.Index, codegen.CType.Tag.no_payload_count + gop.index); + f.ctypes_map.appendAssumeCapacity(global_idx); + if (!gop.found_existing) { + errdefer _ = global_ctypes.set.map.pop(); + gop.key_ptr.* = try decl_cty.copyContext(ctx); + } + if (std.debug.runtime_safety) { + const global_cty = &global_ctypes.set.map.entries.items(.key)[gop.index]; + assert(global_cty == gop.key_ptr); + assert(decl_cty.eqlContext(global_cty.*, ctx)); + assert(decl_cty.hash(decl_ctypes.set) == global_cty.hash(global_ctypes.set)); + } + try codegen.genTypeDecl( + mod, + writer, + global_ctypes.set, + global_idx, + decl_index, + decl_ctypes.set, + decl_idx, + gop.found_existing, + ); + } } -fn flushErrDecls(self: *C, f: *Flush) FlushDeclError!void { +fn flushErrDecls(self: *C, db: *DeclBlock) FlushDeclError!void { const gpa = self.base.allocator; - const fwd_decl = &f.err_decls.fwd_decl; - const ctypes = &f.err_decls.ctypes; - const code = &f.err_decls.code; + const fwd_decl = &db.fwd_decl; + const ctypes = &db.ctypes; + const code = &db.code; var object = codegen.Object{ .dg = .{ .gpa = gpa, .module = self.base.options.module.?, .error_msg = null, - .decl_index = undefined, - .decl = undefined, + .decl_index = .none, + .decl = null, .fwd_decl = fwd_decl.toManaged(gpa), .ctypes = ctypes.*, }, @@ -394,19 +497,9 @@ fn flushErrDecls(self: *C, f: *Flush) FlushDeclError!void { fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); ctypes.* = object.dg.ctypes.move(); code.* = object.code.moveToUnmanaged(); - - try self.flushCTypes(f, ctypes.*); - try f.all_buffers.ensureUnusedCapacity(gpa, 2); - f.appendBufAssumeCapacity(fwd_decl.items); - f.appendBufAssumeCapacity(code.items); } -fn flushLazyFn( - self: *C, - f: *Flush, - db: *DeclBlock, - lazy_fn: codegen.LazyFnMap.Entry, -) FlushDeclError!void { +fn flushLazyFn(self: *C, db: *DeclBlock, lazy_fn: codegen.LazyFnMap.Entry) FlushDeclError!void { const gpa = self.base.allocator; const fwd_decl = &db.fwd_decl; @@ -418,8 +511,8 @@ fn flushLazyFn( .gpa = gpa, .module = self.base.options.module.?, .error_msg = null, - .decl_index = undefined, - .decl = undefined, + .decl_index = .none, + .decl = null, .fwd_decl = fwd_decl.toManaged(gpa), .ctypes = ctypes.*, }, @@ -441,11 +534,6 @@ fn flushLazyFn( fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); ctypes.* = object.dg.ctypes.move(); code.* = object.code.moveToUnmanaged(); - - try self.flushCTypes(f, ctypes.*); - try f.all_buffers.ensureUnusedCapacity(gpa, 2); - f.appendBufAssumeCapacity(fwd_decl.items); - f.appendBufAssumeCapacity(code.items); } fn flushLazyFns(self: *C, f: *Flush, lazy_fns: codegen.LazyFnMap) FlushDeclError!void { @@ -456,8 +544,8 @@ fn flushLazyFns(self: *C, f: *Flush, lazy_fns: codegen.LazyFnMap) FlushDeclError while (it.next()) |entry| { const gop = f.lazy_fns.getOrPutAssumeCapacity(entry.key_ptr.*); if (gop.found_existing) continue; - gop.value_ptr.* = .{}; - try self.flushLazyFn(f, gop.value_ptr, entry); + gop.value_ptr.* = {}; + try self.flushLazyFn(&f.lazy_db, entry); } } @@ -481,7 +569,6 @@ fn flushDecl( const decl_block = self.decl_table.getPtr(decl_index).?; - try self.flushCTypes(f, decl_block.ctypes); try self.flushLazyFns(f, decl_block.lazy_fns); try f.all_buffers.ensureUnusedCapacity(gpa, 1); if (!(decl.isExtern() and export_names.contains(mem.span(decl.name)))) -- cgit v1.2.3 From 25a3c933b9d708d907293b1a46b6661641ccf9ea Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 21 Feb 2023 02:21:39 -0500 Subject: CBE: fix test failures --- lib/zig.h | 8 ++++---- src/codegen/c.zig | 25 ++++++++++++++----------- test/stage2/cbe.zig | 12 ++++++------ 3 files changed, 24 insertions(+), 21 deletions(-) (limited to 'src/codegen') diff --git a/lib/zig.h b/lib/zig.h index 5ee8e3dd76..9a5e751f79 100644 --- a/lib/zig.h +++ b/lib/zig.h @@ -288,13 +288,13 @@ typedef char bool; #endif #if __STDC_VERSION__ >= 201112L -#define zig_noreturn _Noreturn void +#define zig_noreturn _Noreturn #elif zig_has_attribute(noreturn) || defined(zig_gnuc) -#define zig_noreturn __attribute__((noreturn)) void +#define zig_noreturn __attribute__((noreturn)) #elif _MSC_VER -#define zig_noreturn __declspec(noreturn) void +#define zig_noreturn __declspec(noreturn) #else -#define zig_noreturn void +#define zig_noreturn #endif #define zig_bitSizeOf(T) (CHAR_BIT * sizeof(T)) diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 35c826e2d1..4ddfe3213a 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1476,6 +1476,7 @@ pub const DeclGen = struct { } if (dg.decl.?.val.castTag(.function)) |func_payload| if (func_payload.data.is_cold) try w.writeAll("zig_cold "); + if (fn_info.return_type.tag() == .noreturn) try w.writeAll("zig_noreturn "); const trailing = try renderTypePrefix( dg.decl_index, @@ -2289,7 +2290,7 @@ fn renderTypeSuffix( w, param_type, .suffix, - CQualifiers.init(.{}), + CQualifiers.init(.{ .@"const" = true }), ); try w.print("{}a{d}", .{ trailing, param_i }); try renderTypeSuffix(decl, store, mod, w, param_type, .suffix); @@ -5737,26 +5738,28 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.air.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - try f.writeCValue(writer, local, .Other); - const array_len = f.air.typeOf(ty_op.operand).elemType().arrayLen(); + const array_ty = f.air.typeOf(ty_op.operand).childType(); - try writer.writeAll(".ptr = "); + try f.writeCValueMember(writer, local, .{ .identifier = "ptr" }); + try writer.writeAll(" = "); + // Unfortunately, C does not support any equivalent to + // &(*(void *)p)[0], although LLVM does via GetElementPtr if (operand == .undef) { - // Unfortunately, C does not support any equivalent to - // &(*(void *)p)[0], although LLVM does via GetElementPtr var buf: Type.SlicePtrFieldTypeBuffer = undefined; try f.writeCValue(writer, CValue{ .undef = inst_ty.slicePtrFieldType(&buf) }, .Initializer); - } else { + } else if (array_ty.hasRuntimeBitsIgnoreComptime()) { try writer.writeAll("&("); try f.writeCValueDeref(writer, operand); try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); - } + } else try f.writeCValue(writer, operand, .Initializer); + try writer.writeAll("; "); + const array_len = array_ty.arrayLen(); var len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = array_len }; const len_val = Value.initPayload(&len_pl.base); - try writer.writeAll("; "); - try f.writeCValue(writer, local, .Other); - try writer.print(".len = {};\n", .{try f.fmtIntLiteral(Type.usize, len_val)}); + try f.writeCValueMember(writer, local, .{ .identifier = "len" }); + try writer.print(" = {};\n", .{try f.fmtIntLiteral(Type.usize, len_val)}); + return local; } diff --git a/test/stage2/cbe.zig b/test/stage2/cbe.zig index 6c0c5e03cf..e9750853a6 100644 --- a/test/stage2/cbe.zig +++ b/test/stage2/cbe.zig @@ -959,7 +959,7 @@ pub fn addCases(ctx: *TestContext) !void { \\ _ = a; \\} , - \\zig_extern void start(zig_u8 const a0); + \\zig_extern void start(uint8_t const a0); \\ ); ctx.h("header with multiple param function", linux_x64, @@ -967,19 +967,19 @@ pub fn addCases(ctx: *TestContext) !void { \\ _ = a; _ = b; _ = c; \\} , - \\zig_extern void start(zig_u8 const a0, zig_u8 const a1, zig_u8 const a2); + \\zig_extern void start(uint8_t const a0, uint8_t const a1, uint8_t const a2); \\ ); ctx.h("header with u32 param function", linux_x64, \\export fn start(a: u32) void{ _ = a; } , - \\zig_extern void start(zig_u32 const a0); + \\zig_extern void start(uint32_t const a0); \\ ); ctx.h("header with usize param function", linux_x64, \\export fn start(a: usize) void{ _ = a; } , - \\zig_extern void start(zig_usize const a0); + \\zig_extern void start(uintptr_t const a0); \\ ); ctx.h("header with bool param function", linux_x64, @@ -993,7 +993,7 @@ pub fn addCases(ctx: *TestContext) !void { \\ unreachable; \\} , - \\zig_extern zig_noreturn start(void); + \\zig_extern zig_noreturn void start(void); \\ ); ctx.h("header with multiple functions", linux_x64, @@ -1009,7 +1009,7 @@ pub fn addCases(ctx: *TestContext) !void { ctx.h("header with multiple includes", linux_x64, \\export fn start(a: u32, b: usize) void{ _ = a; _ = b; } , - \\zig_extern void start(zig_u32 const a0, zig_usize const a1); + \\zig_extern void start(uint32_t const a0, uintptr_t const a1); \\ ); } -- cgit v1.2.3 From 05da5b32a820c031001098034840940964f41a81 Mon Sep 17 00:00:00 2001 From: Isaac Freund Date: Mon, 20 Feb 2023 23:31:48 +0100 Subject: Sema: implement @fieldParentPtr for unions --- src/Sema.zig | 54 +++++++++------ src/arch/arm/CodeGen.zig | 5 ++ src/arch/wasm/CodeGen.zig | 4 +- src/codegen/c.zig | 6 ++ src/codegen/llvm.zig | 4 +- test/behavior/field_parent_ptr.zig | 81 ++++++++++++++++++++++ .../compile_errors/fieldParentPtr-non_struct.zig | 2 +- 7 files changed, 131 insertions(+), 25 deletions(-) (limited to 'src/codegen') diff --git a/src/Sema.zig b/src/Sema.zig index 41e5fdc20e..40a4a114b4 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -21482,24 +21482,32 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; - const struct_ty = try sema.resolveType(block, ty_src, extra.parent_type); + const parent_ty = try sema.resolveType(block, ty_src, extra.parent_type); const field_name = try sema.resolveConstString(block, name_src, extra.field_name, "field name must be comptime-known"); const field_ptr = try sema.resolveInst(extra.field_ptr); const field_ptr_ty = sema.typeOf(field_ptr); - if (struct_ty.zigTypeTag() != .Struct) { - return sema.fail(block, ty_src, "expected struct type, found '{}'", .{struct_ty.fmt(sema.mod)}); + if (parent_ty.zigTypeTag() != .Struct and parent_ty.zigTypeTag() != .Union) { + return sema.fail(block, ty_src, "expected struct or union type, found '{}'", .{parent_ty.fmt(sema.mod)}); } - try sema.resolveTypeLayout(struct_ty); + try sema.resolveTypeLayout(parent_ty); - const field_index = if (struct_ty.isTuple()) blk: { - if (mem.eql(u8, field_name, "len")) { - return sema.fail(block, src, "cannot get @fieldParentPtr of 'len' field of tuple", .{}); - } - break :blk try sema.tupleFieldIndex(block, struct_ty, field_name, name_src); - } else try sema.structFieldIndex(block, struct_ty, field_name, name_src); + const field_index = switch (parent_ty.zigTypeTag()) { + .Struct => blk: { + if (parent_ty.isTuple()) { + if (mem.eql(u8, field_name, "len")) { + return sema.fail(block, src, "cannot get @fieldParentPtr of 'len' field of tuple", .{}); + } + break :blk try sema.tupleFieldIndex(block, parent_ty, field_name, name_src); + } else { + break :blk try sema.structFieldIndex(block, parent_ty, field_name, name_src); + } + }, + .Union => try sema.unionFieldIndex(block, parent_ty, field_name, name_src), + else => unreachable, + }; - if (struct_ty.structFieldIsComptime(field_index)) { + if (parent_ty.zigTypeTag() == .Struct and parent_ty.structFieldIsComptime(field_index)) { return sema.fail(block, src, "cannot get @fieldParentPtr of a comptime field", .{}); } @@ -21507,23 +21515,29 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const field_ptr_ty_info = field_ptr_ty.ptrInfo().data; var ptr_ty_data: Type.Payload.Pointer.Data = .{ - .pointee_type = struct_ty.structFieldType(field_index), + .pointee_type = parent_ty.structFieldType(field_index), .mutable = field_ptr_ty_info.mutable, .@"addrspace" = field_ptr_ty_info.@"addrspace", }; - if (struct_ty.containerLayout() == .Packed) { - return sema.fail(block, src, "TODO handle packed structs with @fieldParentPtr", .{}); + if (parent_ty.containerLayout() == .Packed) { + return sema.fail(block, src, "TODO handle packed structs/unions with @fieldParentPtr", .{}); } else { - ptr_ty_data.@"align" = if (struct_ty.castTag(.@"struct")) |struct_obj| b: { - break :b struct_obj.data.fields.values()[field_index].abi_align; - } else 0; + ptr_ty_data.@"align" = blk: { + if (parent_ty.castTag(.@"struct")) |struct_obj| { + break :blk struct_obj.data.fields.values()[field_index].abi_align; + } else if (parent_ty.cast(Type.Payload.Union)) |union_obj| { + break :blk union_obj.data.fields.values()[field_index].abi_align; + } else { + break :blk 0; + } + }; } const actual_field_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, ptr_src); - ptr_ty_data.pointee_type = struct_ty; + ptr_ty_data.pointee_type = parent_ty; const result_ptr = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| { @@ -21540,11 +21554,11 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr field_name, field_index, payload.data.field_index, - struct_ty.fmt(sema.mod), + parent_ty.fmt(sema.mod), }, ); errdefer msg.destroy(sema.gpa); - try sema.addDeclaredHereNote(msg, struct_ty); + try sema.addDeclaredHereNote(msg, parent_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index fc89b2e26b..01a1d6b7eb 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2973,6 +2973,11 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); const struct_ty = self.air.getRefType(ty_pl.ty).childType(); + + if (struct_ty.zigTypeTag() == .Union) { + return self.fail("TODO implement @fieldParentPtr codegen for unions", .{}); + } + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*)); switch (field_ptr) { .ptr_stack_offset => |off| { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index b229a67e70..2f191fd834 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -4944,8 +4944,8 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{extra.field_ptr}); const field_ptr = try func.resolveInst(extra.field_ptr); - const struct_ty = func.air.getRefType(ty_pl.ty).childType(); - const field_offset = struct_ty.structFieldOffset(extra.field_index, func.target); + const parent_ty = func.air.getRefType(ty_pl.ty).childType(); + const field_offset = parent_ty.structFieldOffset(extra.field_index, func.target); const result = if (field_offset != 0) result: { const base = try func.buildPointerOffset(field_ptr, 0, .new); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 0beb00b236..399549da3a 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -5367,12 +5367,18 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { } const struct_ptr_ty = f.air.typeOfIndex(inst); + const field_ptr_ty = f.air.typeOf(extra.field_ptr); const field_ptr_val = try f.resolveInst(extra.field_ptr); try reap(f, inst, &.{extra.field_ptr}); const target = f.object.dg.module.getTarget(); const struct_ty = struct_ptr_ty.childType(); + + if (struct_ty.zigTypeTag() == .Union) { + return f.fail("TODO: CBE: @fieldParentPtr for unions", .{}); + } + const field_offset = struct_ty.structFieldOffset(extra.field_index, target); var field_offset_pl = Value.Payload.I64{ diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index aa794827a8..558556f108 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -6020,8 +6020,8 @@ pub const FuncGen = struct { const field_ptr = try self.resolveInst(extra.field_ptr); const target = self.dg.module.getTarget(); - const struct_ty = self.air.getRefType(ty_pl.ty).childType(); - const field_offset = struct_ty.structFieldOffset(extra.field_index, target); + const parent_ty = self.air.getRefType(ty_pl.ty).childType(); + const field_offset = parent_ty.structFieldOffset(extra.field_index, target); const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty)); if (field_offset == 0) { diff --git a/test/behavior/field_parent_ptr.zig b/test/behavior/field_parent_ptr.zig index 14a2362f5d..b9e171db57 100644 --- a/test/behavior/field_parent_ptr.zig +++ b/test/behavior/field_parent_ptr.zig @@ -44,3 +44,84 @@ fn testParentFieldPtrFirst(a: *const bool) !void { try expect(base == &foo); try expect(&base.a == a); } + +test "@fieldParentPtr untagged union" { + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + + try testFieldParentPtrUnion(&bar.c); + comptime try testFieldParentPtrUnion(&bar.c); +} + +const Bar = union(enum) { + a: bool, + b: f32, + c: i32, + d: i32, +}; + +const bar = Bar{ .c = 42 }; + +fn testFieldParentPtrUnion(c: *const i32) !void { + try expect(c == &bar.c); + + const base = @fieldParentPtr(Bar, "c", c); + try expect(base == &bar); + try expect(&base.c == c); +} + +test "@fieldParentPtr tagged union" { + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + + try testFieldParentPtrTaggedUnion(&bar_tagged.c); + comptime try testFieldParentPtrTaggedUnion(&bar_tagged.c); +} + +const BarTagged = union(enum) { + a: bool, + b: f32, + c: i32, + d: i32, +}; + +const bar_tagged = BarTagged{ .c = 42 }; + +fn testFieldParentPtrTaggedUnion(c: *const i32) !void { + try expect(c == &bar_tagged.c); + + const base = @fieldParentPtr(BarTagged, "c", c); + try expect(base == &bar_tagged); + try expect(&base.c == c); +} + +test "@fieldParentPtr extern union" { + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + + try testFieldParentPtrExternUnion(&bar_extern.c); + comptime try testFieldParentPtrExternUnion(&bar_extern.c); +} + +const BarExtern = extern union { + a: bool, + b: f32, + c: i32, + d: i32, +}; + +const bar_extern = BarExtern{ .c = 42 }; + +fn testFieldParentPtrExternUnion(c: *const i32) !void { + try expect(c == &bar_extern.c); + + const base = @fieldParentPtr(BarExtern, "c", c); + try expect(base == &bar_extern); + try expect(&base.c == c); +} diff --git a/test/cases/compile_errors/fieldParentPtr-non_struct.zig b/test/cases/compile_errors/fieldParentPtr-non_struct.zig index 0a2f46e5c9..7950c88537 100644 --- a/test/cases/compile_errors/fieldParentPtr-non_struct.zig +++ b/test/cases/compile_errors/fieldParentPtr-non_struct.zig @@ -7,4 +7,4 @@ export fn foo(a: *i32) *Foo { // backend=llvm // target=native // -// :3:28: error: expected struct type, found 'i32' +// :3:28: error: expected struct or union type, found 'i32' -- cgit v1.2.3 From 248fb40dcc5eb50cf19e711197c5d1b210abf1b3 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 21 Feb 2023 15:05:41 -0500 Subject: CBE: fix windows test failures --- lib/zig.h | 131 +++++++++++++++++++++++++------------------------ src/codegen/c.zig | 78 +++++++++++++++-------------- src/codegen/c/type.zig | 9 +++- test/behavior/asm.zig | 13 +++++ 4 files changed, 129 insertions(+), 102 deletions(-) (limited to 'src/codegen') diff --git a/lib/zig.h b/lib/zig.h index 9a5e751f79..d336ecb2e2 100644 --- a/lib/zig.h +++ b/lib/zig.h @@ -316,7 +316,7 @@ zig_extern void *memset (void *, int, size_t); /* ===================== 8/16/32/64-bit Integer Support ===================== */ -#if __STDC_VERSION__ >= 199901L +#if __STDC_VERSION__ >= 199901L || _MSC_VER #include #else @@ -1923,6 +1923,7 @@ typedef double zig_f16; #define zig_make_f16(fp, repr) fp #elif LDBL_MANT_DIG == 11 #define zig_bitSizeOf_c_longdouble 16 +typedef uint16_t zig_repr_c_longdouble; typedef long double zig_f16; #define zig_make_f16(fp, repr) fp##l #elif FLT16_MANT_DIG == 11 && (zig_has_builtin(inff16) || defined(zig_gnuc)) @@ -1959,6 +1960,7 @@ typedef double zig_f32; #define zig_make_f32(fp, repr) fp #elif LDBL_MANT_DIG == 24 #define zig_bitSizeOf_c_longdouble 32 +typedef uint32_t zig_repr_c_longdouble; typedef long double zig_f32; #define zig_make_f32(fp, repr) fp##l #elif FLT32_MANT_DIG == 24 @@ -1982,6 +1984,7 @@ typedef int32_t zig_f32; #if _MSC_VER #ifdef ZIG_TARGET_ABI_MSVC #define zig_bitSizeOf_c_longdouble 64 +typedef uint64_t zig_repr_c_longdouble; #endif #define zig_make_special_constant_f64(sign, name, arg, repr) sign zig_make_f64(zig_msvc_flt_##name, ) #else /* _MSC_VER */ @@ -1995,6 +1998,7 @@ typedef double zig_f64; #define zig_make_f64(fp, repr) fp #elif LDBL_MANT_DIG == 53 #define zig_bitSizeOf_c_longdouble 64 +typedef uint64_t zig_repr_c_longdouble; typedef long double zig_f64; #define zig_make_f64(fp, repr) fp##l #elif FLT64_MANT_DIG == 53 @@ -2027,6 +2031,7 @@ typedef double zig_f80; #define zig_make_f80(fp, repr) fp #elif LDBL_MANT_DIG == 64 #define zig_bitSizeOf_c_longdouble 80 +typedef zig_u128 zig_repr_c_longdouble; typedef long double zig_f80; #define zig_make_f80(fp, repr) fp##l #elif FLT80_MANT_DIG == 64 @@ -2062,6 +2067,7 @@ typedef double zig_f128; #define zig_make_f128(fp, repr) fp #elif LDBL_MANT_DIG == 113 #define zig_bitSizeOf_c_longdouble 128 +typedef zig_u128 zig_repr_c_longdouble; typedef long double zig_f128; #define zig_make_f128(fp, repr) fp##l #elif FLT128_MANT_DIG == 113 @@ -2099,9 +2105,10 @@ typedef zig_i128 zig_f128; #ifdef zig_bitSizeOf_c_longdouble #ifdef ZIG_TARGET_ABI_MSVC -typedef double zig_c_longdouble; #undef zig_bitSizeOf_c_longdouble #define zig_bitSizeOf_c_longdouble 64 +typedef uint64_t zig_repr_c_longdouble; +typedef zig_f64 zig_c_longdouble; #define zig_make_c_longdouble(fp, repr) fp #else typedef long double zig_c_longdouble; @@ -2113,6 +2120,7 @@ typedef long double zig_c_longdouble; #undef zig_has_c_longdouble #define zig_has_c_longdouble 0 #define zig_bitSizeOf_c_longdouble 80 +typedef zig_u128 zig_repr_c_longdouble; #define zig_compiler_rt_abbrev_c_longdouble zig_compiler_rt_abbrev_f80 #define zig_bitSizeOf_repr_c_longdouble 128 typedef zig_i128 zig_c_longdouble; @@ -2126,21 +2134,18 @@ typedef zig_i128 zig_c_longdouble; #if !zig_has_float_builtins #define zig_float_from_repr(Type, ReprType) \ - static inline zig_##Type zig_float_from_repr_##Type(zig_##ReprType repr) { \ - return *((zig_##Type*)&repr); \ + static inline zig_##Type zig_float_from_repr_##Type(ReprType repr) { \ + zig_##Type result; \ + memcpy(&result, &repr, sizeof(result)); \ + return result; \ } -zig_float_from_repr(f16, u16) -zig_float_from_repr(f32, u32) -zig_float_from_repr(f64, u64) -zig_float_from_repr(f80, u128) -zig_float_from_repr(f128, u128) -#if zig_bitSizeOf_c_longdouble == 80 -zig_float_from_repr(c_longdouble, u128) -#else -#define zig_expand_float_from_repr(Type, ReprType) zig_float_from_repr(Type, ReprType) -zig_expand_float_from_repr(c_longdouble, zig_expand_concat(u, zig_bitSizeOf_c_longdouble)) -#endif +zig_float_from_repr(f16, uint16_t) +zig_float_from_repr(f32, uint32_t) +zig_float_from_repr(f64, uint64_t) +zig_float_from_repr(f80, zig_u128) +zig_float_from_repr(f128, zig_u128) +zig_float_from_repr(c_longdouble, zig_repr_c_longdouble) #endif #define zig_cast_f16 (zig_f16) @@ -2288,98 +2293,98 @@ zig_float_builtins(c_longdouble) // TODO: zig_msvc_atomic_load should load 32 bit without interlocked on x86, and load 64 bit without interlocked on x64 -#define zig_msvc_atomics(Type, suffix) \ - static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \ - zig_##Type comparand = *expected; \ - zig_##Type initial = _InterlockedCompareExchange##suffix(obj, desired, comparand); \ +#define zig_msvc_atomics(ZigType, Type, suffix) \ + static inline bool zig_msvc_cmpxchg_##ZigType(Type volatile* obj, Type* expected, Type desired) { \ + Type comparand = *expected; \ + Type initial = _InterlockedCompareExchange##suffix(obj, desired, comparand); \ bool exchanged = initial == comparand; \ if (!exchanged) { \ *expected = initial; \ } \ return exchanged; \ } \ - static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_xchg_##ZigType(Type volatile* obj, Type value) { \ return _InterlockedExchange##suffix(obj, value); \ } \ - static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_add_##ZigType(Type volatile* obj, Type value) { \ return _InterlockedExchangeAdd##suffix(obj, value); \ } \ - static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_sub_##ZigType(Type volatile* obj, Type value) { \ bool success = false; \ - zig_##Type new; \ - zig_##Type prev; \ + Type new; \ + Type prev; \ while (!success) { \ prev = *obj; \ new = prev - value; \ - success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \ } \ return prev; \ } \ - static inline zig_##Type zig_msvc_atomicrmw_or_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_or_##ZigType(Type volatile* obj, Type value) { \ return _InterlockedOr##suffix(obj, value); \ } \ - static inline zig_##Type zig_msvc_atomicrmw_xor_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_xor_##ZigType(Type volatile* obj, Type value) { \ return _InterlockedXor##suffix(obj, value); \ } \ - static inline zig_##Type zig_msvc_atomicrmw_and_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_and_##ZigType(Type volatile* obj, Type value) { \ return _InterlockedAnd##suffix(obj, value); \ } \ - static inline zig_##Type zig_msvc_atomicrmw_nand_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_nand_##ZigType(Type volatile* obj, Type value) { \ bool success = false; \ - zig_##Type new; \ - zig_##Type prev; \ + Type new; \ + Type prev; \ while (!success) { \ prev = *obj; \ new = ~(prev & value); \ - success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \ } \ return prev; \ } \ - static inline zig_##Type zig_msvc_atomicrmw_min_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_min_##ZigType(Type volatile* obj, Type value) { \ bool success = false; \ - zig_##Type new; \ - zig_##Type prev; \ + Type new; \ + Type prev; \ while (!success) { \ prev = *obj; \ new = value < prev ? value : prev; \ - success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \ } \ return prev; \ } \ - static inline zig_##Type zig_msvc_atomicrmw_max_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_max_##ZigType(Type volatile* obj, Type value) { \ bool success = false; \ - zig_##Type new; \ - zig_##Type prev; \ + Type new; \ + Type prev; \ while (!success) { \ prev = *obj; \ new = value > prev ? value : prev; \ - success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \ } \ return prev; \ } \ - static inline void zig_msvc_atomic_store_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline void zig_msvc_atomic_store_##ZigType(Type volatile* obj, Type value) { \ _InterlockedExchange##suffix(obj, value); \ } \ - static inline zig_##Type zig_msvc_atomic_load_##Type(zig_##Type volatile* obj) { \ + static inline Type zig_msvc_atomic_load_##ZigType(Type volatile* obj) { \ return _InterlockedOr##suffix(obj, 0); \ } -zig_msvc_atomics(u8, 8) -zig_msvc_atomics(i8, 8) -zig_msvc_atomics(u16, 16) -zig_msvc_atomics(i16, 16) -zig_msvc_atomics(u32, ) -zig_msvc_atomics(i32, ) +zig_msvc_atomics( u8, uint8_t, 8) +zig_msvc_atomics( i8, int8_t, 8) +zig_msvc_atomics(u16, uint16_t, 16) +zig_msvc_atomics(i16, int16_t, 16) +zig_msvc_atomics(u32, uint32_t, ) +zig_msvc_atomics(i32, int32_t, ) #if _M_X64 -zig_msvc_atomics(u64, 64) -zig_msvc_atomics(i64, 64) +zig_msvc_atomics(u64, uint64_t, 64) +zig_msvc_atomics(i64, int64_t, 64) #endif #define zig_msvc_flt_atomics(Type, ReprType, suffix) \ static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \ - zig_##ReprType comparand = *((zig_##ReprType*)expected); \ - zig_##ReprType initial = _InterlockedCompareExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&desired), comparand); \ + ReprType comparand = *((ReprType*)expected); \ + ReprType initial = _InterlockedCompareExchange##suffix((ReprType volatile*)obj, *((ReprType*)&desired), comparand); \ bool exchanged = initial == comparand; \ if (!exchanged) { \ *expected = *((zig_##Type*)&initial); \ @@ -2387,35 +2392,35 @@ zig_msvc_atomics(i64, 64) return exchanged; \ } \ static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \ - zig_##ReprType initial = _InterlockedExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&value)); \ + ReprType initial = _InterlockedExchange##suffix((ReprType volatile*)obj, *((ReprType*)&value)); \ return *((zig_##Type*)&initial); \ } \ static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \ bool success = false; \ - zig_##ReprType new; \ + ReprType new; \ zig_##Type prev; \ while (!success) { \ prev = *obj; \ new = prev + value; \ - success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, *((ReprType*)&new)); \ } \ return prev; \ } \ static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \ bool success = false; \ - zig_##ReprType new; \ + ReprType new; \ zig_##Type prev; \ while (!success) { \ prev = *obj; \ new = prev - value; \ - success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, *((ReprType*)&new)); \ } \ return prev; \ } -zig_msvc_flt_atomics(f32, u32, ) +zig_msvc_flt_atomics(f32, uint32_t, ) #if _M_X64 -zig_msvc_flt_atomics(f64, u64, 64) +zig_msvc_flt_atomics(f64, uint64_t, 64) #endif #if _M_IX86 @@ -2426,11 +2431,11 @@ static inline void zig_msvc_atomic_barrier() { } } -static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, uint32_t* arg) { +static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, void* arg) { return _InterlockedExchangePointer(obj, arg); } -static inline void zig_msvc_atomic_store_p32(void** obj, uint32_t* arg) { +static inline void zig_msvc_atomic_store_p32(void** obj, void* arg) { _InterlockedExchangePointer(obj, arg); } @@ -2448,11 +2453,11 @@ static inline bool zig_msvc_cmpxchg_p32(void** obj, void** expected, void* desir return exchanged; } #else /* _M_IX86 */ -static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, uint64_t* arg) { +static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, void* arg) { return _InterlockedExchangePointer(obj, arg); } -static inline void zig_msvc_atomic_store_p64(void** obj, uint64_t* arg) { +static inline void zig_msvc_atomic_store_p64(void** obj, void* arg) { _InterlockedExchangePointer(obj, arg); } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 4ddfe3213a..1db4fc5d51 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -2058,6 +2058,7 @@ fn renderTypePrefix( .zig_f64, .zig_f80, .zig_f128, + .zig_c_longdouble, => |tag| try w.writeAll(@tagName(tag)), .pointer, @@ -2225,6 +2226,7 @@ fn renderTypeSuffix( .zig_f64, .zig_f80, .zig_f128, + .zig_c_longdouble, => {}, .pointer, @@ -3062,6 +3064,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { return CValue.none; } + const inst_ty = f.air.typeOfIndex(inst); const ptr_ty = f.air.typeOf(bin_op.lhs); const child_ty = ptr_ty.childType(); @@ -3076,7 +3079,9 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, f.air.typeOfIndex(inst)); try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = &("); + try writer.writeAll(" = ("); + try f.renderTypecast(writer, inst_ty); + try writer.writeAll(")&("); if (ptr_ty.ptrSize() == .One) { // It's a pointer to an array, so we need to de-reference. try f.writeCValueDeref(writer, ptr); @@ -3902,32 +3907,31 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const inst_ty = f.air.typeOfIndex(inst); - const elem_ty = switch (inst_ty.ptrSize()) { - .One => blk: { - const array_ty = inst_ty.childType(); - break :blk array_ty.childType(); - }, - else => inst_ty.childType(), - }; + const elem_ty = inst_ty.elemType2(); - // We must convert to and from integer types to prevent UB if the operation - // results in a NULL pointer, or if LHS is NULL. The operation is only UB - // if the result is NULL and then dereferenced. const local = try f.allocLocal(inst, inst_ty); const writer = f.object.writer(); try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = ("); - try f.renderTypecast(writer, inst_ty); - try writer.writeAll(")(((uintptr_t)"); - try f.writeCValue(writer, lhs, .Other); - try writer.writeAll(") "); - try writer.writeByte(operator); - try writer.writeAll(" ("); - try f.writeCValue(writer, rhs, .Other); - try writer.writeAll("*sizeof("); - try f.renderTypecast(writer, elem_ty); - try writer.writeAll(")));\n"); + try writer.writeAll(" = "); + + if (elem_ty.hasRuntimeBitsIgnoreComptime()) { + // We must convert to and from integer types to prevent UB if the operation + // results in a NULL pointer, or if LHS is NULL. The operation is only UB + // if the result is NULL and then dereferenced. + try writer.writeByte('('); + try f.renderTypecast(writer, inst_ty); + try writer.writeAll(")(((uintptr_t)"); + try f.writeCValue(writer, lhs, .Other); + try writer.writeAll(") "); + try writer.writeByte(operator); + try writer.writeAll(" ("); + try f.writeCValue(writer, rhs, .Other); + try writer.writeAll("*sizeof("); + try f.renderTypecast(writer, elem_ty); + try writer.writeAll(")))"); + } else try f.writeCValue(writer, lhs, .Initializer); + try writer.writeAll(";\n"); return local; } @@ -5264,21 +5268,21 @@ fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struc else => unreachable, }; - try writer.writeByte('&'); - switch (field_loc) { - .begin, .end => { - try writer.writeByte('('); - try f.writeCValue(writer, struct_ptr, .Other); - try writer.print(")[{}]", .{ - @boolToInt(field_loc == .end and struct_ty.hasRuntimeBitsIgnoreComptime()), - }); - }, - .field => |field| if (extra_name != .none) { - try f.writeCValueDerefMember(writer, struct_ptr, extra_name); - try writer.writeByte('.'); - try f.writeCValue(writer, field, .Other); - } else try f.writeCValueDerefMember(writer, struct_ptr, field), - } + if (struct_ty.hasRuntimeBitsIgnoreComptime()) { + try writer.writeByte('&'); + switch (field_loc) { + .begin, .end => { + try writer.writeByte('('); + try f.writeCValue(writer, struct_ptr, .Other); + try writer.print(")[{}]", .{@boolToInt(field_loc == .end)}); + }, + .field => |field| if (extra_name != .none) { + try f.writeCValueDerefMember(writer, struct_ptr, extra_name); + try writer.writeByte('.'); + try f.writeCValue(writer, field, .Other); + } else try f.writeCValueDerefMember(writer, struct_ptr, field), + } + } else try f.writeCValue(writer, struct_ptr, .Other); try writer.writeAll(";\n"); return local; } diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index d6424b1f27..bf148a8b87 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -102,6 +102,7 @@ pub const CType = extern union { zig_f64, zig_f80, zig_f128, + zig_c_longdouble, // Keep last_no_payload_tag updated! // After this, the tag requires a payload. pointer, @@ -127,7 +128,7 @@ pub const CType = extern union { function, varargs_function, - pub const last_no_payload_tag = Tag.zig_f128; + pub const last_no_payload_tag = Tag.zig_c_longdouble; pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; pub fn hasPayload(self: Tag) bool { @@ -177,6 +178,7 @@ pub const CType = extern union { .zig_f64, .zig_f80, .zig_f128, + .zig_c_longdouble, => @compileError("Type Tag " ++ @tagName(self) ++ " has no payload"), .pointer, @@ -557,6 +559,7 @@ pub const CType = extern union { .zig_f64, .zig_f80, .zig_f128, + .zig_c_longdouble, => false, .pointer, @@ -674,6 +677,7 @@ pub const CType = extern union { .zig_f64, .zig_f80, .zig_f128, + .zig_c_longdouble, => {}, .pointer, @@ -980,7 +984,7 @@ pub const CType = extern union { .f64 => .zig_f64, .f80 => .zig_f80, .f128 => .zig_f128, - .c_longdouble => .@"long double", + .c_longdouble => .zig_c_longdouble, else => unreachable, }), @@ -1374,6 +1378,7 @@ pub const CType = extern union { .zig_f64, .zig_f80, .zig_f128, + .zig_c_longdouble, => return self, .pointer, diff --git a/test/behavior/asm.zig b/test/behavior/asm.zig index e9a01226b1..b242374ef8 100644 --- a/test/behavior/asm.zig +++ b/test/behavior/asm.zig @@ -7,6 +7,7 @@ const is_x86_64_linux = builtin.cpu.arch == .x86_64 and builtin.os.tag == .linux comptime { if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_aarch64 and + !(builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) and // MSVC doesn't support inline assembly is_x86_64_linux) { asm ( @@ -24,6 +25,8 @@ test "module level assembly" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly + if (is_x86_64_linux) { try expect(this_is_my_alias() == 1234); } @@ -36,6 +39,8 @@ test "output constraint modifiers" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly + // This is only testing compilation. var a: u32 = 3; asm volatile ("" @@ -57,6 +62,8 @@ test "alternative constraints" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly + // Make sure we allow commas as a separator for alternative constraints. var a: u32 = 3; asm volatile ("" @@ -73,6 +80,8 @@ test "sized integer/float in asm input" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly + asm volatile ("" : : [_] "m" (@as(usize, 3)), @@ -122,6 +131,8 @@ test "struct/array/union types as input values" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly + asm volatile ("" : : [_] "m" (@as([1]u32, undefined)), @@ -146,6 +157,8 @@ test "asm modifiers (AArch64)" { if (builtin.target.cpu.arch != .aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly + var x: u32 = 15; const double = asm ("add %[ret:w], %[in:w], %[in:w]" : [ret] "=r" (-> u32), -- cgit v1.2.3 From bdb1e014a085fdd872886dddc66cfe1081887e5c Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 22 Feb 2023 23:31:41 -0500 Subject: CBE: cleanup field access * Implement @fieldParentPtr on a union * Refactor field access to ensure that it is handled consistently * Remove `renderTypecast` as it is now behaves the same as `renderType` --- src/codegen/c.zig | 609 +++++++++++++++++++------------------ test/behavior/field_parent_ptr.zig | 3 - test/behavior/packed-struct.zig | 1 - 3 files changed, 314 insertions(+), 299 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/c.zig b/src/codegen/c.zig index bfc6e6451b..b5269acc5c 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -39,7 +39,7 @@ pub const CValue = union(enum) { constant: Air.Inst.Ref, /// Index into the parameters arg: usize, - /// The payload field of a parameter + /// The array field of a parameter arg_array: usize, /// Index into a tuple's fields field: usize, @@ -50,6 +50,8 @@ pub const CValue = union(enum) { undef: Type, /// Render the slice as an identifier (using fmtIdent) identifier: []const u8, + /// Render the slice as an payload.identifier (using fmtIdent) + payload_identifier: []const u8, /// Render these bytes literally. /// TODO make this a [*:0]const u8 to save memory bytes: []const u8, @@ -416,12 +418,20 @@ pub const Function = struct { return f.object.dg.fail(format, args); } - fn renderType(f: *Function, w: anytype, t: Type) !void { - return f.object.dg.renderType(w, t, .Complete); + fn indexToCType(f: *Function, idx: CType.Index) CType { + return f.object.dg.indexToCType(idx); + } + + fn typeToIndex(f: *Function, ty: Type, kind: CType.Kind) !CType.Index { + return f.object.dg.typeToIndex(ty, kind); + } + + fn typeToCType(f: *Function, ty: Type, kind: CType.Kind) !CType { + return f.object.dg.typeToCType(ty, kind); } - fn renderTypecast(f: *Function, w: anytype, t: Type) !void { - return f.object.dg.renderTypecast(w, t); + fn renderType(f: *Function, w: anytype, t: Type) !void { + return f.object.dg.renderType(w, t); } fn renderIntCast(f: *Function, w: anytype, dest_ty: Type, src: CValue, src_ty: Type, location: ValueRenderLocation) !void { @@ -532,7 +542,7 @@ pub const DeclGen = struct { try writer.writeByte('{'); } else { try writer.writeByte('('); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeAll("){ .ptr = "); } @@ -559,7 +569,7 @@ pub const DeclGen = struct { const need_typecast = if (ty.castPtrToFn()) |_| false else !ty.eql(decl.ty, dg.module); if (need_typecast) { try writer.writeAll("(("); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeByte(')'); } try writer.writeByte('&'); @@ -574,7 +584,7 @@ pub const DeclGen = struct { fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type, location: ValueRenderLocation) error{ OutOfMemory, AnalysisFail }!void { if (!ptr_ty.isSlice()) { try writer.writeByte('('); - try dg.renderTypecast(writer, ptr_ty); + try dg.renderType(writer, ptr_ty); try writer.writeByte(')'); } switch (ptr_val.tag()) { @@ -589,90 +599,71 @@ pub const DeclGen = struct { try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index, location); }, .field_ptr => { - const ptr_info = ptr_ty.ptrInfo(); + const target = dg.module.getTarget(); const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const container_ty = field_ptr.container_ty; - const index = field_ptr.field_index; - - var container_ptr_ty_pl: Type.Payload.ElemType = .{ - .base = .{ .tag = .c_mut_pointer }, - .data = field_ptr.container_ty, - }; - const container_ptr_ty = Type.initPayload(&container_ptr_ty_pl.base); - - const FieldInfo = struct { name: []const u8, ty: Type }; - const field_info: FieldInfo = switch (container_ty.zigTypeTag()) { - .Struct => switch (container_ty.containerLayout()) { - .Auto, .Extern => FieldInfo{ - .name = container_ty.structFields().keys()[index], - .ty = container_ty.structFields().values()[index].ty, - }, - .Packed => if (ptr_info.data.host_size == 0) { - const target = dg.module.getTarget(); - - const byte_offset = container_ty.packedStructFieldByteOffset(index, target); - var byte_offset_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = byte_offset, - }; - const byte_offset_val = Value.initPayload(&byte_offset_pl.base); - - var u8_ptr_pl = ptr_info; - u8_ptr_pl.data.pointee_type = Type.u8; - const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); - - try writer.writeAll("&(("); - try dg.renderTypecast(writer, u8_ptr_ty); - try writer.writeByte(')'); - try dg.renderParentPtr(writer, field_ptr.container_ptr, container_ptr_ty, location); - return writer.print(")[{}]", .{try dg.fmtIntLiteral(Type.usize, byte_offset_val)}); - } else { - var host_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = ptr_info.data.host_size * 8, - }; - const host_ty = Type.initPayload(&host_pl.base); - try writer.writeByte('('); - try dg.renderTypecast(writer, ptr_ty); - try writer.writeByte(')'); - return dg.renderParentPtr(writer, field_ptr.container_ptr, host_ty, location); - }, - }, - .Union => switch (container_ty.containerLayout()) { - .Auto, .Extern => FieldInfo{ - .name = container_ty.unionFields().keys()[index], - .ty = container_ty.unionFields().values()[index].ty, - }, - .Packed => { - return dg.renderParentPtr(writer, field_ptr.container_ptr, ptr_ty, location); - }, + // Ensure complete type definition is visible before accessing fields. + _ = try dg.typeToIndex(field_ptr.container_ty, .complete); + + var container_ptr_pl = ptr_ty.ptrInfo(); + container_ptr_pl.data.pointee_type = field_ptr.container_ty; + const container_ptr_ty = Type.initPayload(&container_ptr_pl.base); + + switch (fieldLocation( + field_ptr.container_ty, + ptr_ty, + @intCast(u32, field_ptr.field_index), + target, + )) { + .begin => try dg.renderParentPtr( + writer, + field_ptr.container_ptr, + container_ptr_ty, + location, + ), + .field => |field| { + try writer.writeAll("&("); + try dg.renderParentPtr( + writer, + field_ptr.container_ptr, + container_ptr_ty, + location, + ); + try writer.writeAll(")->"); + try dg.writeCValue(writer, field); }, - .Pointer => field_info: { - assert(container_ty.isSlice()); - break :field_info switch (index) { - 0 => FieldInfo{ .name = "ptr", .ty = container_ty.childType() }, - 1 => FieldInfo{ .name = "len", .ty = Type.usize }, - else => unreachable, + .byte_offset => |byte_offset| { + var u8_ptr_pl = ptr_ty.ptrInfo(); + u8_ptr_pl.data.pointee_type = Type.u8; + const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); + + var byte_offset_pl = Value.Payload.U64{ + .base = .{ .tag = .int_u64 }, + .data = byte_offset, }; - }, - else => unreachable, - }; - - if (field_info.ty.hasRuntimeBitsIgnoreComptime()) { - // Ensure complete type definition is visible before accessing fields. - try dg.renderType(std.io.null_writer, field_ptr.container_ty, .Complete); + const byte_offset_val = Value.initPayload(&byte_offset_pl.base); - try writer.writeAll("&("); - try dg.renderParentPtr(writer, field_ptr.container_ptr, container_ptr_ty, location); - try writer.writeAll(")->"); - switch (field_ptr.container_ty.tag()) { - .union_tagged, .union_safety_tagged => try writer.writeAll("payload."), - else => {}, - } - try writer.print("{ }", .{fmtIdent(field_info.name)}); - } else { - try dg.renderParentPtr(writer, field_ptr.container_ptr, container_ptr_ty, location); + try writer.writeAll("(("); + try dg.renderType(writer, u8_ptr_ty); + try writer.writeByte(')'); + try dg.renderParentPtr( + writer, + field_ptr.container_ptr, + container_ptr_ty, + location, + ); + try writer.print(" + {})", .{try dg.fmtIntLiteral(Type.usize, byte_offset_val)}); + }, + .end => { + try writer.writeAll("(("); + try dg.renderParentPtr( + writer, + field_ptr.container_ptr, + container_ptr_ty, + location, + ); + try writer.print(") + {})", .{try dg.fmtIntLiteral(Type.usize, Value.one)}); + }, } }, .elem_ptr => { @@ -696,7 +687,7 @@ pub const DeclGen = struct { const container_ptr_ty = Type.initPayload(&container_ptr_ty_pl.base); // Ensure complete type definition is visible before accessing fields. - try dg.renderType(std.io.null_writer, payload_ptr.container_ty, .Complete); + _ = try dg.typeToIndex(payload_ptr.container_ty, .complete); try writer.writeAll("&("); try dg.renderParentPtr(writer, payload_ptr.container_ptr, container_ptr_ty, location); @@ -763,18 +754,18 @@ pub const DeclGen = struct { .Pointer => if (ty.isSlice()) { if (!location.isInitializer()) { try writer.writeByte('('); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeByte(')'); } try writer.writeAll("{("); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = ty.slicePtrFieldType(&buf); - try dg.renderTypecast(writer, ptr_ty); + try dg.renderType(writer, ptr_ty); return writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(Type.usize, val)}); } else { try writer.writeAll("(("); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val)}); }, .Optional => { @@ -791,7 +782,7 @@ pub const DeclGen = struct { if (!location.isInitializer()) { try writer.writeByte('('); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeByte(')'); } @@ -805,7 +796,7 @@ pub const DeclGen = struct { .Auto, .Extern => { if (!location.isInitializer()) { try writer.writeByte('('); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeByte(')'); } @@ -827,7 +818,7 @@ pub const DeclGen = struct { .Union => { if (!location.isInitializer()) { try writer.writeByte('('); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeByte(')'); } @@ -852,7 +843,7 @@ pub const DeclGen = struct { .ErrorUnion => { if (!location.isInitializer()) { try writer.writeByte('('); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeByte(')'); } @@ -865,7 +856,7 @@ pub const DeclGen = struct { .Array, .Vector => { if (!location.isInitializer()) { try writer.writeByte('('); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeByte(')'); } @@ -1026,7 +1017,7 @@ pub const DeclGen = struct { return dg.renderValue(writer, ty, slice_val, location); } else { try writer.writeAll("(("); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeAll(")NULL)"); }, .variable => { @@ -1036,7 +1027,7 @@ pub const DeclGen = struct { .slice => { if (!location.isInitializer()) { try writer.writeByte('('); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeByte(')'); } @@ -1059,7 +1050,7 @@ pub const DeclGen = struct { }, .int_u64, .one => { try writer.writeAll("(("); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val)}); }, .field_ptr, @@ -1074,7 +1065,7 @@ pub const DeclGen = struct { .Array, .Vector => { if (location == .FunctionArgument) { try writer.writeByte('('); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeByte(')'); } @@ -1177,7 +1168,7 @@ pub const DeclGen = struct { if (!location.isInitializer()) { try writer.writeByte('('); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeByte(')'); } @@ -1211,7 +1202,7 @@ pub const DeclGen = struct { if (!location.isInitializer()) { try writer.writeByte('('); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeByte(')'); } @@ -1275,7 +1266,7 @@ pub const DeclGen = struct { if (!location.isInitializer()) { try writer.writeByte('('); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeByte(')'); } @@ -1362,7 +1353,7 @@ pub const DeclGen = struct { if (!empty) try writer.writeAll(" | "); try writer.writeByte('('); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeByte(')'); if (bit_offset_val_pl.data != 0) { @@ -1385,7 +1376,7 @@ pub const DeclGen = struct { if (!location.isInitializer()) { try writer.writeByte('('); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeByte(')'); } @@ -1396,11 +1387,11 @@ pub const DeclGen = struct { if (field_ty.hasRuntimeBits()) { if (field_ty.isPtrAtRuntime()) { try writer.writeByte('('); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeByte(')'); } else if (field_ty.zigTypeTag() == .Float) { try writer.writeByte('('); - try dg.renderTypecast(writer, ty); + try dg.renderType(writer, ty); try writer.writeByte(')'); } try dg.renderValue(writer, field_ty, union_obj.val, initializer_type); @@ -1509,9 +1500,11 @@ pub const DeclGen = struct { fn indexToCType(dg: *DeclGen, idx: CType.Index) CType { return dg.ctypes.indexToCType(idx); } + fn typeToIndex(dg: *DeclGen, ty: Type, kind: CType.Kind) !CType.Index { return dg.ctypes.typeToIndex(dg.gpa, ty, dg.module, kind); } + fn typeToCType(dg: *DeclGen, ty: Type, kind: CType.Kind) !CType { return dg.ctypes.typeToCType(dg.gpa, ty, dg.module, kind); } @@ -1524,16 +1517,10 @@ pub const DeclGen = struct { /// There are three type formats in total that we support rendering: /// | Function | Example 1 (*u8) | Example 2 ([10]*u8) | /// |---------------------|-----------------|---------------------| - /// | `renderTypecast` | "uint8_t *" | "uint8_t *[10]" | /// | `renderTypeAndName` | "uint8_t *name" | "uint8_t *name[10]" | /// | `renderType` | "uint8_t *" | "uint8_t *[10]" | /// - fn renderType( - dg: *DeclGen, - w: anytype, - t: Type, - _: TypedefKind, - ) error{ OutOfMemory, AnalysisFail }!void { + fn renderType(dg: *DeclGen, w: anytype, t: Type) error{ OutOfMemory, AnalysisFail }!void { const store = &dg.ctypes.set; const module = dg.module; const idx = try dg.typeToIndex(t, .complete); @@ -1603,12 +1590,12 @@ pub const DeclGen = struct { if (needs_cast) { try w.writeByte('('); - try dg.renderTypecast(w, dest_ty); + try dg.renderType(w, dest_ty); try w.writeByte(')'); } if (src_is_ptr) { try w.writeByte('('); - try dg.renderTypecast(w, src_eff_ty); + try dg.renderType(w, src_eff_ty); try w.writeByte(')'); } try context.writeValue(dg, w, src_ty, location); @@ -1625,7 +1612,7 @@ pub const DeclGen = struct { try w.writeAll("(0, "); // TODO: Should the 0 go through fmtIntLiteral? if (src_is_ptr) { try w.writeByte('('); - try dg.renderTypecast(w, src_eff_ty); + try dg.renderType(w, src_eff_ty); try w.writeByte(')'); } try context.writeValue(dg, w, src_ty, .FunctionArgument); @@ -1646,28 +1633,11 @@ pub const DeclGen = struct { } } - /// Renders a type in C typecast format. - /// - /// This is guaranteed to be valid in a typecast expression, but not - /// necessarily in a variable/field declaration. - /// - /// There are three type formats in total that we support rendering: - /// | Function | Example 1 (*u8) | Example 2 ([10]*u8) | - /// |---------------------|-----------------|---------------------| - /// | `renderTypecast` | "uint8_t *" | "uint8_t *[10]" | - /// | `renderTypeAndName` | "uint8_t *name" | "uint8_t *name[10]" | - /// | `renderType` | "uint8_t *" | "uint8_t *[10]" | - /// - fn renderTypecast(dg: *DeclGen, w: anytype, ty: Type) error{ OutOfMemory, AnalysisFail }!void { - try dg.renderType(w, ty, undefined); - } - /// Renders a type and name in field declaration/definition format. /// /// There are three type formats in total that we support rendering: /// | Function | Example 1 (*u8) | Example 2 ([10]*u8) | /// |---------------------|-----------------|---------------------| - /// | `renderTypecast` | "uint8_t *" | "uint8_t *[10]" | /// | `renderTypeAndName` | "uint8_t *name" | "uint8_t *name[10]" | /// | `renderType` | "uint8_t *" | "uint8_t *[10]" | /// @@ -1708,7 +1678,7 @@ pub const DeclGen = struct { const name_slice_ty = Type.initTag(.const_slice_u8_sentinel_0); try w.writeAll("static "); - try dg.renderType(w, name_slice_ty, .Complete); + try dg.renderType(w, name_slice_ty); try w.writeByte(' '); try w.writeAll(fn_name); try w.writeByte('('); @@ -1742,7 +1712,7 @@ pub const DeclGen = struct { try w.writeAll(" = "); try dg.renderValue(w, name_ty, name_val, .Initializer); try w.writeAll(";\n return ("); - try dg.renderTypecast(w, name_slice_ty); + try dg.renderType(w, name_slice_ty); try w.print("){{{}, {}}};\n", .{ fmtIdent("name"), try dg.fmtIntLiteral(Type.usize, len_val), }); @@ -1787,6 +1757,10 @@ pub const DeclGen = struct { }, .undef => |ty| return dg.renderValue(w, ty, Value.undef, .Other), .identifier => |ident| return w.print("{ }", .{fmtIdent(ident)}), + .payload_identifier => |ident| return w.print("{ }.{ }", .{ + fmtIdent("payload"), + fmtIdent(ident), + }), .bytes => |bytes| return w.writeAll(bytes), } } @@ -1812,6 +1786,10 @@ pub const DeclGen = struct { .decl_ref => |decl| return dg.renderDeclName(w, decl, 0), .undef => unreachable, .identifier => |ident| return w.print("(*{ })", .{fmtIdent(ident)}), + .payload_identifier => |ident| return w.print("(*{ }.{ })", .{ + fmtIdent("payload"), + fmtIdent(ident), + }), .bytes => |bytes| { try w.writeAll("(*"); try w.writeAll(bytes); @@ -1829,7 +1807,7 @@ pub const DeclGen = struct { fn writeCValueDerefMember(dg: *DeclGen, writer: anytype, c_value: CValue, member: CValue) !void { switch (c_value) { .none, .constant, .field, .undef => unreachable, - .new_local, .local, .arg, .arg_array, .decl, .identifier, .bytes => { + .new_local, .local, .arg, .arg_array, .decl, .identifier, .payload_identifier, .bytes => { try dg.writeCValue(writer, c_value); try writer.writeAll("->"); }, @@ -3048,7 +3026,7 @@ fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(']'); if (is_array) { try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, inst_ty); + try f.renderType(writer, inst_ty); try writer.writeAll("))"); } try writer.writeAll(";\n"); @@ -3080,7 +3058,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const local = try f.allocLocal(inst, f.air.typeOfIndex(inst)); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = ("); - try f.renderTypecast(writer, inst_ty); + try f.renderType(writer, inst_ty); try writer.writeAll(")&("); if (ptr_ty.ptrSize() == .One) { // It's a pointer to an array, so we need to de-reference. @@ -3128,7 +3106,7 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(']'); if (is_array) { try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, inst_ty); + try f.renderType(writer, inst_ty); try writer.writeAll("))"); } try writer.writeAll(";\n"); @@ -3197,7 +3175,7 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(']'); if (is_array) { try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, inst_ty); + try f.renderType(writer, inst_ty); try writer.writeAll("))"); } try writer.writeAll(";\n"); @@ -3240,11 +3218,11 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { fn airArg(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.air.typeOfIndex(inst); - const inst_cty = try f.object.dg.typeToIndex(inst_ty, .parameter); + const inst_cty = try f.typeToIndex(inst_ty, .parameter); const i = f.next_arg_index; f.next_arg_index += 1; - return if (inst_cty != try f.object.dg.typeToIndex(inst_ty, .complete)) + return if (inst_cty != try f.typeToIndex(inst_ty, .complete)) .{ .arg_array = i } else .{ .arg = i }; @@ -3281,7 +3259,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(", (const char *)"); try f.writeCValue(writer, operand, .Other); try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, src_ty); + try f.renderType(writer, src_ty); try writer.writeAll("))"); } else if (ptr_info.host_size != 0) { var host_pl = Type.Payload.Bits{ @@ -3310,11 +3288,11 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = ("); - try f.renderTypecast(writer, src_ty); + try f.renderType(writer, src_ty); try writer.writeAll(")zig_wrap_"); try f.object.dg.renderTypeForBuiltinFnName(writer, field_ty); try writer.writeAll("(("); - try f.renderTypecast(writer, field_ty); + try f.renderType(writer, field_ty); try writer.writeByte(')'); const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64; if (cant_cast) { @@ -3365,7 +3343,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { try f.writeCValue(writer, operand, .FunctionArgument); deref = false; try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, ret_ty); + try f.renderType(writer, ret_ty); try writer.writeAll("));\n"); break :ret_val array_local; } else operand; @@ -3440,7 +3418,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte('('); } else if (dest_c_bits <= 64) { try writer.writeByte('('); - try f.renderTypecast(writer, inst_ty); + try f.renderType(writer, inst_ty); try writer.writeByte(')'); } @@ -3521,7 +3499,7 @@ fn storeUndefined(f: *Function, lhs_child_ty: Type, dest_ptr: CValue) !CValue { try writer.writeAll("memset("); try f.writeCValue(writer, dest_ptr, .FunctionArgument); try writer.print(", {x}, sizeof(", .{try f.fmtIntLiteral(Type.u8, Value.undef)}); - try f.renderTypecast(writer, lhs_child_ty); + try f.renderType(writer, lhs_child_ty); try writer.writeAll("));\n"); } return CValue.none; @@ -3582,7 +3560,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue { if (!is_array) try writer.writeByte('&'); try f.writeCValue(writer, array_src, .FunctionArgument); try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, src_ty); + try f.renderType(writer, src_ty); try writer.writeAll("))"); if (src_val == .constant) { try freeLocal(f, inst, array_src.new_local, 0); @@ -3641,13 +3619,13 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("(0, "); } else { try writer.writeByte('('); - try f.renderTypecast(writer, host_ty); + try f.renderType(writer, host_ty); try writer.writeByte(')'); } if (src_ty.isPtrAtRuntime()) { try writer.writeByte('('); - try f.renderTypecast(writer, Type.usize); + try f.renderType(writer, Type.usize); try writer.writeByte(')'); } try f.writeCValue(writer, src_val, .Other); @@ -3919,7 +3897,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { // results in a NULL pointer, or if LHS is NULL. The operation is only UB // if the result is NULL and then dereferenced. try writer.writeByte('('); - try f.renderTypecast(writer, inst_ty); + try f.renderType(writer, inst_ty); try writer.writeAll(")(((uintptr_t)"); try f.writeCValue(writer, lhs, .Other); try writer.writeAll(") "); @@ -3927,7 +3905,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { try writer.writeAll(" ("); try f.writeCValue(writer, rhs, .Other); try writer.writeAll("*sizeof("); - try f.renderTypecast(writer, elem_ty); + try f.renderType(writer, elem_ty); try writer.writeAll(")))"); } else try f.writeCValue(writer, lhs, .Initializer); @@ -3992,7 +3970,7 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, local, .Other); try writer.writeAll(".ptr = ("); var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try f.renderTypecast(writer, inst_ty.slicePtrFieldType(&buf)); + try f.renderType(writer, inst_ty.slicePtrFieldType(&buf)); try writer.writeByte(')'); try f.writeCValue(writer, ptr, .Other); try writer.writeAll("; "); @@ -4032,13 +4010,13 @@ fn airCall( defer gpa.free(resolved_args); for (resolved_args, args) |*resolved_arg, arg| { const arg_ty = f.air.typeOf(arg); - const arg_cty = try f.object.dg.typeToIndex(arg_ty, .parameter); - if (f.object.dg.indexToCType(arg_cty).tag() == .void) { + const arg_cty = try f.typeToIndex(arg_ty, .parameter); + if (f.indexToCType(arg_cty).tag() == .void) { resolved_arg.* = .none; continue; } resolved_arg.* = try f.resolveInst(arg); - if (arg_cty != try f.object.dg.typeToIndex(arg_ty, .complete)) { + if (arg_cty != try f.typeToIndex(arg_ty, .complete)) { var lowered_arg_buf: LowerFnRetTyBuffer = undefined; const lowered_arg_ty = lowerFnRetTy(arg_ty, &lowered_arg_buf, target); @@ -4048,7 +4026,7 @@ fn airCall( try writer.writeAll(", "); try f.writeCValue(writer, resolved_arg.*, .FunctionArgument); try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, lowered_arg_ty); + try f.renderType(writer, lowered_arg_ty); try writer.writeAll("));\n"); resolved_arg.* = array_local; } @@ -4077,7 +4055,7 @@ fn airCall( .none else if (f.liveness.isUnused(inst)) r: { try writer.writeByte('('); - try f.renderTypecast(writer, Type.void); + try f.renderType(writer, Type.void); try writer.writeByte(')'); break :r .none; } else r: { @@ -4132,7 +4110,7 @@ fn airCall( try writer.writeAll(", "); try f.writeCValueMember(writer, result_local, .{ .identifier = "array" }); try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, ret_ty); + try f.renderType(writer, ret_ty); try writer.writeAll("));\n"); try freeLocal(f, inst, result_local.new_local, 0); break :r array_local; @@ -4280,7 +4258,7 @@ fn lowerTry( try writer.writeAll(", "); try f.writeCValueMember(writer, err_union, .{ .identifier = "payload" }); try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, payload_ty); + try f.renderType(writer, payload_ty); try writer.writeAll("));\n"); } else { try f.writeCValue(writer, local, .Other); @@ -4313,7 +4291,7 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(", "); try f.writeCValue(writer, operand, .FunctionArgument); try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, operand_ty); + try f.renderType(writer, operand_ty); try writer.writeAll("))"); } else { try f.writeCValue(writer, result, .Other); @@ -4362,7 +4340,7 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { if (dest_ty.isPtrAtRuntime() and operand_ty.isPtrAtRuntime()) { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = ("); - try f.renderTypecast(writer, dest_ty); + try f.renderType(writer, dest_ty); try writer.writeByte(')'); try f.writeCValue(writer, operand, .Other); try writer.writeAll(";\n"); @@ -4383,7 +4361,7 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(", &"); try f.writeCValue(writer, operand_lval, .Other); try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, dest_ty); + try f.renderType(writer, dest_ty); try writer.writeAll("));\n"); // Ensure padding bits have the expected value. @@ -4415,7 +4393,7 @@ fn airRetAddr(f: *Function, inst: Air.Inst.Index) !CValue { const local = try f.allocLocal(inst, Type.usize); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = ("); - try f.renderTypecast(writer, Type.usize); + try f.renderType(writer, Type.usize); try writer.writeAll(")zig_return_address();\n"); return local; } @@ -4426,7 +4404,7 @@ fn airFrameAddress(f: *Function, inst: Air.Inst.Index) !CValue { const local = try f.allocLocal(inst, Type.usize); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = ("); - try f.renderTypecast(writer, Type.usize); + try f.renderType(writer, Type.usize); try writer.writeAll(")zig_frame_address();\n"); return local; } @@ -4558,11 +4536,11 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("switch ("); if (condition_ty.zigTypeTag() == .Bool) { try writer.writeByte('('); - try f.renderTypecast(writer, Type.u1); + try f.renderType(writer, Type.u1); try writer.writeByte(')'); } else if (condition_ty.isPtrAtRuntime()) { try writer.writeByte('('); - try f.renderTypecast(writer, Type.usize); + try f.renderType(writer, Type.usize); try writer.writeByte(')'); } try f.writeCValue(writer, condition, .Other); @@ -4591,7 +4569,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("case "); if (condition_ty.isPtrAtRuntime()) { try writer.writeByte('('); - try f.renderTypecast(writer, Type.usize); + try f.renderType(writer, Type.usize); try writer.writeByte(')'); } try f.object.dg.renderValue(writer, condition_ty, f.air.value(item).?, .Other); @@ -5043,7 +5021,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValueMember(writer, operand, .{ .identifier = "payload" }); if (is_array) { try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, inst_ty); + try f.renderType(writer, inst_ty); try writer.writeAll("))"); } try writer.writeAll(";\n"); @@ -5127,6 +5105,62 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { } } +fn fieldLocation( + container_ty: Type, + field_ptr_ty: Type, + field_index: u32, + target: std.Target, +) union(enum) { + begin: void, + field: CValue, + byte_offset: u32, + end: void, +} { + return switch (container_ty.zigTypeTag()) { + .Struct => switch (container_ty.containerLayout()) { + .Auto, .Extern => for (field_index..container_ty.structFieldCount()) |next_field_index| { + if (container_ty.structFieldIsComptime(next_field_index)) continue; + const field_ty = container_ty.structFieldType(next_field_index); + if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + break .{ .field = if (container_ty.isSimpleTuple()) + .{ .field = next_field_index } + else + .{ .identifier = container_ty.structFieldName(next_field_index) } }; + } else if (container_ty.hasRuntimeBitsIgnoreComptime()) .end else .begin, + .Packed => if (field_ptr_ty.ptrInfo().data.host_size == 0) + .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, target) } + else + .begin, + }, + .Union => switch (container_ty.containerLayout()) { + .Auto, .Extern => { + const field_ty = container_ty.structFieldType(field_index); + if (!field_ty.hasRuntimeBitsIgnoreComptime()) + return if (container_ty.unionTagTypeSafety() != null and + !container_ty.unionHasAllZeroBitFieldTypes()) + .{ .field = .{ .identifier = "payload" } } + else + .begin; + const field_name = container_ty.unionFields().keys()[field_index]; + return .{ .field = if (container_ty.unionTagTypeSafety()) |_| + .{ .payload_identifier = field_name } + else + .{ .identifier = field_name } }; + }, + .Packed => .begin, + }, + .Pointer => switch (container_ty.ptrSize()) { + .Slice => switch (field_index) { + 0 => .{ .field = .{ .identifier = "ptr" } }, + 1 => .{ .field = .{ .identifier = "len" } }, + else => unreachable, + }, + .One, .Many, .C => unreachable, + }, + else => unreachable, + }; +} + fn airStructFieldPtr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.StructField, ty_pl.payload).data; @@ -5136,10 +5170,10 @@ fn airStructFieldPtr(f: *Function, inst: Air.Inst.Index) !CValue { return .none; } - const struct_ptr = try f.resolveInst(extra.struct_operand); + const container_ptr_val = try f.resolveInst(extra.struct_operand); try reap(f, inst, &.{extra.struct_operand}); - const struct_ptr_ty = f.air.typeOf(extra.struct_operand); - return structFieldPtr(f, inst, struct_ptr_ty, struct_ptr, extra.field_index); + const container_ptr_ty = f.air.typeOf(extra.struct_operand); + return fieldPtr(f, inst, container_ptr_ty, container_ptr_val, extra.field_index); } fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue { @@ -5150,10 +5184,10 @@ fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue return .none; } - const struct_ptr = try f.resolveInst(ty_op.operand); + const container_ptr_val = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const struct_ptr_ty = f.air.typeOf(ty_op.operand); - return structFieldPtr(f, inst, struct_ptr_ty, struct_ptr, index); + const container_ptr_ty = f.air.typeOf(ty_op.operand); + return fieldPtr(f, inst, container_ptr_ty, container_ptr_val, index); } fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { @@ -5165,130 +5199,115 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { return CValue.none; } - const struct_ptr_ty = f.air.typeOfIndex(inst); + const target = f.object.dg.module.getTarget(); + const container_ptr_ty = f.air.typeOfIndex(inst); + const container_ty = container_ptr_ty.childType(); const field_ptr_ty = f.air.typeOf(extra.field_ptr); const field_ptr_val = try f.resolveInst(extra.field_ptr); try reap(f, inst, &.{extra.field_ptr}); - const target = f.object.dg.module.getTarget(); - const struct_ty = struct_ptr_ty.childType(); + const writer = f.object.writer(); + const local = try f.allocLocal(inst, container_ptr_ty); + try f.writeCValue(writer, local, .Other); + try writer.writeAll(" = ("); + try f.renderType(writer, container_ptr_ty); + try writer.writeByte(')'); - if (struct_ty.zigTypeTag() == .Union) { - return f.fail("TODO: CBE: @fieldParentPtr for unions", .{}); - } + switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, target)) { + .begin => try f.writeCValue(writer, field_ptr_val, .Initializer), + .field => |field| { + var u8_ptr_pl = field_ptr_ty.ptrInfo(); + u8_ptr_pl.data.pointee_type = Type.u8; + const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); - const field_offset = struct_ty.structFieldOffset(extra.field_index, target); + try writer.writeAll("(("); + try f.renderType(writer, u8_ptr_ty); + try writer.writeByte(')'); + try f.writeCValue(writer, field_ptr_val, .Other); + try writer.writeAll(" - offsetof("); + try f.renderType(writer, container_ty); + try writer.writeAll(", "); + try f.writeCValue(writer, field, .Other); + try writer.writeAll("))"); + }, + .byte_offset => |byte_offset| { + var u8_ptr_pl = field_ptr_ty.ptrInfo(); + u8_ptr_pl.data.pointee_type = Type.u8; + const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); - var field_offset_pl = Value.Payload.I64{ - .base = .{ .tag = .int_i64 }, - .data = -@intCast(i64, field_offset), - }; - const field_offset_val = Value.initPayload(&field_offset_pl.base); + var byte_offset_pl = Value.Payload.U64{ + .base = .{ .tag = .int_u64 }, + .data = byte_offset, + }; + const byte_offset_val = Value.initPayload(&byte_offset_pl.base); - var u8_ptr_pl = field_ptr_ty.ptrInfo(); - u8_ptr_pl.data.pointee_type = Type.u8; - const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); + try writer.writeAll("(("); + try f.renderType(writer, u8_ptr_ty); + try writer.writeByte(')'); + try f.writeCValue(writer, field_ptr_val, .Other); + try writer.print(" - {})", .{try f.fmtIntLiteral(Type.usize, byte_offset_val)}); + }, + .end => { + try f.writeCValue(writer, field_ptr_val, .Other); + try writer.print(" - {}", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + }, + } - const writer = f.object.writer(); - const local = try f.allocLocal(inst, struct_ptr_ty); - try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = ("); - try f.renderTypecast(writer, struct_ptr_ty); - try writer.writeAll(")&(("); - try f.renderTypecast(writer, u8_ptr_ty); - try writer.writeByte(')'); - try f.writeCValue(writer, field_ptr_val, .Other); - try writer.print(")[{}];\n", .{try f.fmtIntLiteral(Type.isize, field_offset_val)}); + try writer.writeAll(";\n"); return local; } -fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struct_ptr: CValue, index: u32) !CValue { - const writer = f.object.writer(); +fn fieldPtr( + f: *Function, + inst: Air.Inst.Index, + container_ptr_ty: Type, + container_ptr_val: CValue, + field_index: u32, +) !CValue { + const target = f.object.dg.module.getTarget(); + const container_ty = container_ptr_ty.elemType(); const field_ptr_ty = f.air.typeOfIndex(inst); - const field_ptr_info = field_ptr_ty.ptrInfo(); - const struct_ty = struct_ptr_ty.elemType(); - const field_ty = struct_ty.structFieldType(index); // Ensure complete type definition is visible before accessing fields. - try f.renderType(std.io.null_writer, struct_ty); + _ = try f.typeToIndex(container_ty, .complete); + const writer = f.object.writer(); const local = try f.allocLocal(inst, field_ptr_ty); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = ("); - try f.renderTypecast(writer, field_ptr_ty); + try f.renderType(writer, field_ptr_ty); try writer.writeByte(')'); - const extra_name: CValue = switch (struct_ty.tag()) { - .union_tagged, .union_safety_tagged => .{ .identifier = "payload" }, - else => .none, - }; - - const field_loc: union(enum) { - begin: void, - field: CValue, - end: void, - } = switch (struct_ty.tag()) { - .tuple, .anon_struct, .@"struct" => switch (struct_ty.containerLayout()) { - .Auto, .Extern => for (index..struct_ty.structFieldCount()) |field_i| { - if (!struct_ty.structFieldIsComptime(field_i) and - struct_ty.structFieldType(field_i).hasRuntimeBitsIgnoreComptime()) - break .{ .field = if (struct_ty.isSimpleTuple()) - .{ .field = field_i } - else - .{ .identifier = struct_ty.structFieldName(field_i) } }; - } else .end, - .Packed => if (field_ptr_info.data.host_size == 0) { - const target = f.object.dg.module.getTarget(); - - const byte_offset = struct_ty.packedStructFieldByteOffset(index, target); - var byte_offset_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = byte_offset, - }; - const byte_offset_val = Value.initPayload(&byte_offset_pl.base); - - var u8_ptr_pl = field_ptr_info; - u8_ptr_pl.data.pointee_type = Type.u8; - const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); + switch (fieldLocation(container_ty, field_ptr_ty, field_index, target)) { + .begin => try f.writeCValue(writer, container_ptr_val, .Initializer), + .field => |field| { + try writer.writeByte('&'); + try f.writeCValueDerefMember(writer, container_ptr_val, field); + }, + .byte_offset => |byte_offset| { + var u8_ptr_pl = field_ptr_ty.ptrInfo(); + u8_ptr_pl.data.pointee_type = Type.u8; + const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); - if (!std.mem.isAligned(byte_offset, field_ptr_ty.ptrAlignment(target))) { - return f.fail("TODO: CBE: unaligned packed struct field pointer", .{}); - } + var byte_offset_pl = Value.Payload.U64{ + .base = .{ .tag = .int_u64 }, + .data = byte_offset, + }; + const byte_offset_val = Value.initPayload(&byte_offset_pl.base); - try writer.writeAll("&(("); - try f.renderTypecast(writer, u8_ptr_ty); - try writer.writeByte(')'); - try f.writeCValue(writer, struct_ptr, .Other); - try writer.print(")[{}];\n", .{try f.fmtIntLiteral(Type.usize, byte_offset_val)}); - return local; - } else .begin, + try writer.writeAll("(("); + try f.renderType(writer, u8_ptr_ty); + try writer.writeByte(')'); + try f.writeCValue(writer, container_ptr_val, .Other); + try writer.print(" + {})", .{try f.fmtIntLiteral(Type.usize, byte_offset_val)}); }, - .@"union", .union_safety_tagged, .union_tagged => if (struct_ty.containerLayout() == .Packed) { - try f.writeCValue(writer, struct_ptr, .Other); - try writer.writeAll(";\n"); - return local; - } else if (field_ty.hasRuntimeBitsIgnoreComptime()) .{ .field = .{ - .identifier = struct_ty.unionFields().keys()[index], - } } else .end, - else => unreachable, - }; + .end => { + try f.writeCValue(writer, container_ptr_val, .Other); + try writer.print(" + {}", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + }, + } - if (struct_ty.hasRuntimeBitsIgnoreComptime()) { - try writer.writeByte('&'); - switch (field_loc) { - .begin, .end => { - try writer.writeByte('('); - try f.writeCValue(writer, struct_ptr, .Other); - try writer.print(")[{}]", .{@boolToInt(field_loc == .end)}); - }, - .field => |field| if (extra_name != .none) { - try f.writeCValueDerefMember(writer, struct_ptr, extra_name); - try writer.writeByte('.'); - try f.writeCValue(writer, field, .Other); - } else try f.writeCValueDerefMember(writer, struct_ptr, field), - } - } else try f.writeCValue(writer, struct_ptr, .Other); try writer.writeAll(";\n"); return local; } @@ -5315,7 +5334,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); // Ensure complete type definition is visible before accessing fields. - try f.renderType(std.io.null_writer, struct_ty); + _ = try f.typeToIndex(struct_ty, .complete); const extra_name: CValue = switch (struct_ty.tag()) { .union_tagged, .union_safety_tagged => .{ .identifier = "payload" }, @@ -5362,7 +5381,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(" = zig_wrap_"); try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty); try writer.writeAll("(("); - try f.renderTypecast(writer, field_int_ty); + try f.renderType(writer, field_int_ty); try writer.writeByte(')'); const cant_cast = int_info.bits > 64; if (cant_cast) { @@ -5389,7 +5408,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(", "); try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument); try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, inst_ty); + try f.renderType(writer, inst_ty); try writer.writeAll("));\n"); try freeLocal(f, inst, temp_local.new_local, 0); return local; @@ -5411,7 +5430,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(", &"); try f.writeCValue(writer, operand_lval, .FunctionArgument); try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, inst_ty); + try f.renderType(writer, inst_ty); try writer.writeAll("));\n"); if (struct_byval == .constant) { @@ -5442,7 +5461,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { } else try f.writeCValueMember(writer, struct_byval, field_name); if (is_array) { try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, inst_ty); + try f.renderType(writer, inst_ty); try writer.writeAll("))"); } try writer.writeAll(";\n"); @@ -5510,7 +5529,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(w, local, .Other); try w.writeAll(" = ("); - try f.renderTypecast(w, inst_ty); + try f.renderType(w, inst_ty); try w.writeByte(')'); try f.writeCValue(w, operand, .Initializer); try w.writeAll(";\n"); @@ -5571,7 +5590,7 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(", "); try f.writeCValue(writer, payload, .FunctionArgument); try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, payload_ty); + try f.renderType(writer, payload_ty); try writer.writeAll("));\n"); } return local; @@ -5691,7 +5710,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(", "); try f.writeCValue(writer, payload, .FunctionArgument); try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, payload_ty); + try f.renderType(writer, payload_ty); try writer.writeAll("));\n"); } return local; @@ -5837,7 +5856,7 @@ fn airPtrToInt(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = ("); - try f.renderTypecast(writer, inst_ty); + try f.renderType(writer, inst_ty); try writer.writeByte(')'); try f.writeCValue(writer, operand, .Other); try writer.writeAll(";\n"); @@ -5959,7 +5978,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try writer.writeAll(";\n"); try writer.writeAll("if ("); try writer.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor}); - try f.renderTypecast(writer, ptr_ty.childType()); + try f.renderType(writer, ptr_ty.childType()); try writer.writeByte(')'); if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); @@ -5988,7 +6007,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try writer.writeAll(";\n"); try f.writeCValue(writer, local, .Other); try writer.print(".is_null = zig_cmpxchg_{s}((zig_atomic(", .{flavor}); - try f.renderTypecast(writer, ptr_ty.childType()); + try f.renderType(writer, ptr_ty.childType()); try writer.writeByte(')'); if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); @@ -6031,12 +6050,12 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { switch (extra.op()) { else => { try writer.writeAll("zig_atomic("); - try f.renderTypecast(writer, ptr_ty.elemType()); + try f.renderType(writer, ptr_ty.elemType()); try writer.writeByte(')'); }, .Nand, .Min, .Max => { // These are missing from stdatomic.h, so no atomic types for now. - try f.renderTypecast(writer, ptr_ty.elemType()); + try f.renderType(writer, ptr_ty.elemType()); }, } if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); @@ -6073,7 +6092,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = zig_atomic_load((zig_atomic("); - try f.renderTypecast(writer, ptr_ty.elemType()); + try f.renderType(writer, ptr_ty.elemType()); try writer.writeByte(')'); if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); @@ -6096,7 +6115,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa const writer = f.object.writer(); try writer.writeAll("zig_atomic_store((zig_atomic("); - try f.renderTypecast(writer, ptr_ty.elemType()); + try f.renderType(writer, ptr_ty.elemType()); try writer.writeByte(')'); if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); @@ -6138,7 +6157,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(" += "); try f.object.dg.renderValue(writer, Type.usize, Value.one, .Other); try writer.writeAll(") (("); - try f.renderTypecast(writer, u8_ptr_ty); + try f.renderType(writer, u8_ptr_ty); try writer.writeByte(')'); try f.writeCValue(writer, dest_ptr, .FunctionArgument); try writer.writeAll(")["); @@ -6514,7 +6533,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { .Auto, .Extern => { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = ("); - try f.renderTypecast(writer, inst_ty); + try f.renderType(writer, inst_ty); try writer.writeAll(")"); try writer.writeByte('{'); var empty = true; @@ -6557,7 +6576,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(", "); try f.writeCValue(writer, resolved_element, .FunctionArgument); try writer.writeAll(", sizeof("); - try f.renderTypecast(writer, element_ty); + try f.renderType(writer, element_ty); try writer.writeAll("));\n"); } }, @@ -6602,11 +6621,11 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderIntCast(writer, inst_ty, element, field_ty, .FunctionArgument); } else { try writer.writeByte('('); - try f.renderTypecast(writer, inst_ty); + try f.renderType(writer, inst_ty); try writer.writeByte(')'); if (field_ty.isPtrAtRuntime()) { try writer.writeByte('('); - try f.renderTypecast(writer, switch (int_info.signedness) { + try f.renderType(writer, switch (int_info.signedness) { .unsigned => Type.usize, .signed => Type.isize, }); diff --git a/test/behavior/field_parent_ptr.zig b/test/behavior/field_parent_ptr.zig index b9e171db57..6bbd6ad7ef 100644 --- a/test/behavior/field_parent_ptr.zig +++ b/test/behavior/field_parent_ptr.zig @@ -48,7 +48,6 @@ fn testParentFieldPtrFirst(a: *const bool) !void { test "@fieldParentPtr untagged union" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try testFieldParentPtrUnion(&bar.c); @@ -75,7 +74,6 @@ fn testFieldParentPtrUnion(c: *const i32) !void { test "@fieldParentPtr tagged union" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try testFieldParentPtrTaggedUnion(&bar_tagged.c); @@ -102,7 +100,6 @@ fn testFieldParentPtrTaggedUnion(c: *const i32) !void { test "@fieldParentPtr extern union" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try testFieldParentPtrExternUnion(&bar_extern.c); diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index e1237a578b..85214bd7d8 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -603,7 +603,6 @@ test "packed struct initialized in bitcast" { test "pointer to container level packed struct field" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; -- cgit v1.2.3 From 597e8011f7b2ea76755165fa5a09b2f725180268 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 22 Feb 2023 23:32:54 -0500 Subject: CType: fix lowering of generic function pointer --- src/codegen/c/type.zig | 29 +++++++++++++++++------------ test/behavior/pointers.zig | 1 - 2 files changed, 17 insertions(+), 13 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index bf148a8b87..04c0ea3003 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1296,19 +1296,21 @@ pub const CType = extern union { .Fn => { const info = ty.fnInfo(); - if (lookup.isMutable()) { - const param_kind: Kind = switch (kind) { - .forward, .forward_parameter => .forward_parameter, - .complete, .parameter, .global => .parameter, - .payload => unreachable, - }; - _ = try lookup.typeToIndex(info.return_type, param_kind); - for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; - _ = try lookup.typeToIndex(param_type, param_kind); + if (!info.is_generic) { + if (lookup.isMutable()) { + const param_kind: Kind = switch (kind) { + .forward, .forward_parameter => .forward_parameter, + .complete, .parameter, .global => .parameter, + .payload => unreachable, + }; + _ = try lookup.typeToIndex(info.return_type, param_kind); + for (info.param_types) |param_type| { + if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + _ = try lookup.typeToIndex(param_type, param_kind); + } } - } - self.init(if (info.is_var_args) .varargs_function else .function); + self.init(if (info.is_var_args) .varargs_function else .function); + } else self.init(.void); }, } } @@ -1619,6 +1621,7 @@ pub const CType = extern union { .varargs_function, => { const info = ty.fnInfo(); + assert(!info.is_generic); const param_kind: Kind = switch (kind) { .forward, .forward_parameter => .forward_parameter, .complete, .parameter, .global => .parameter, @@ -1764,6 +1767,7 @@ pub const CType = extern union { if (ty.zigTypeTag() != .Fn) return false; const info = ty.fnInfo(); + assert(!info.is_generic); const data = cty.cast(Payload.Function).?.data; const param_kind: Kind = switch (self.kind) { .forward, .forward_parameter => .forward_parameter, @@ -1878,6 +1882,7 @@ pub const CType = extern union { .varargs_function, => { const info = ty.fnInfo(); + assert(!info.is_generic); const param_kind: Kind = switch (self.kind) { .forward, .forward_parameter => .forward_parameter, .complete, .parameter, .global => .parameter, diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index 2d55292916..ec4ff332cf 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -507,7 +507,6 @@ test "ptrCast comptime known slice to C pointer" { } test "ptrToInt on a generic function" { - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 57f6adf85da58c0c91a036deaa614c30df010b78 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 23 Feb 2023 00:28:49 -0500 Subject: CBE: implement c varargs Removed some backend test skip checks for things disabled in std. --- lib/zig.h | 1 + src/codegen/c.zig | 93 ++++++++++++++++++++++++++++++++++++++++++++-- test/behavior/var_args.zig | 15 ++++---- 3 files changed, 97 insertions(+), 12 deletions(-) (limited to 'src/codegen') diff --git a/lib/zig.h b/lib/zig.h index d336ecb2e2..67f64635c5 100644 --- a/lib/zig.h +++ b/lib/zig.h @@ -5,6 +5,7 @@ #endif #include #include +#include #include #include diff --git a/src/codegen/c.zig b/src/codegen/c.zig index b5269acc5c..390e8f8f4e 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -211,6 +211,15 @@ const reserved_idents = std.ComptimeStringMap(void, .{ .{ "volatile", {} }, .{ "while ", {} }, + // stdarg.h + .{ "va_start", {} }, + .{ "va_arg", {} }, + .{ "va_end", {} }, + .{ "va_copy", {} }, + + // stddef.h + .{ "offsetof", {} }, + // windows.h .{ "max", {} }, .{ "min", {} }, @@ -2952,10 +2961,10 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, .error_set_has_value => return f.fail("TODO: C backend: implement error_set_has_value", .{}), .vector_store_elem => return f.fail("TODO: C backend: implement vector_store_elem", .{}), - .c_va_arg => return f.fail("TODO implement c_va_arg", .{}), - .c_va_copy => return f.fail("TODO implement c_va_copy", .{}), - .c_va_end => return f.fail("TODO implement c_va_end", .{}), - .c_va_start => return f.fail("TODO implement c_va_start", .{}), + .c_va_start => try airCVaStart(f, inst), + .c_va_arg => try airCVaArg(f, inst), + .c_va_end => try airCVaEnd(f, inst), + .c_va_copy => try airCVaCopy(f, inst), // zig fmt: on }; if (result_value == .new_local) { @@ -6862,6 +6871,82 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { return local; } +fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) return .none; + + const inst_ty = f.air.typeOfIndex(inst); + const fn_cty = try f.typeToCType(f.object.dg.decl.?.ty, .complete); + + const param_len = fn_cty.castTag(.varargs_function).?.data.param_types.len; + if (param_len == 0) + return f.fail("CBE: C requires at least one runtime argument for varargs functions", .{}); + + const writer = f.object.writer(); + const local = try f.allocLocal(inst, inst_ty); + try writer.writeAll("va_start(*(va_list *)&"); + try f.writeCValue(writer, local, .Other); + try writer.writeAll(", "); + try f.writeCValue(writer, .{ .arg = param_len - 1 }, .FunctionArgument); + try writer.writeAll(");\n"); + return local; +} + +fn airCVaArg(f: *Function, inst: Air.Inst.Index) !CValue { + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + if (f.liveness.isUnused(inst)) { + try reap(f, inst, &.{ty_op.operand}); + return .none; + } + + const inst_ty = f.air.typeOfIndex(inst); + const va_list = try f.resolveInst(ty_op.operand); + try reap(f, inst, &.{ty_op.operand}); + + const writer = f.object.writer(); + const local = try f.allocLocal(inst, inst_ty); + try f.writeCValue(writer, local, .Other); + try writer.writeAll(" = va_arg(*(va_list *)"); + try f.writeCValue(writer, va_list, .Other); + try writer.writeAll(", "); + try f.renderType(writer, f.air.getRefType(ty_op.ty)); + try writer.writeAll(");\n"); + return local; +} + +fn airCVaEnd(f: *Function, inst: Air.Inst.Index) !CValue { + const un_op = f.air.instructions.items(.data)[inst].un_op; + + const va_list = try f.resolveInst(un_op); + try reap(f, inst, &.{un_op}); + + const writer = f.object.writer(); + try writer.writeAll("va_end(*(va_list *)"); + try f.writeCValue(writer, va_list, .Other); + try writer.writeAll(");\n"); + return .none; +} + +fn airCVaCopy(f: *Function, inst: Air.Inst.Index) !CValue { + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + if (f.liveness.isUnused(inst)) { + try reap(f, inst, &.{ty_op.operand}); + return .none; + } + + const inst_ty = f.air.typeOfIndex(inst); + const va_list = try f.resolveInst(ty_op.operand); + try reap(f, inst, &.{ty_op.operand}); + + const writer = f.object.writer(); + const local = try f.allocLocal(inst, inst_ty); + try writer.writeAll("va_copy(*(va_list *)&"); + try f.writeCValue(writer, local, .Other); + try writer.writeAll(", *(va_list *)"); + try f.writeCValue(writer, va_list, .Other); + try writer.writeAll(");\n"); + return local; +} + fn toMemoryOrder(order: std.builtin.AtomicOrder) [:0]const u8 { return switch (order) { // Note: unordered is actually even less atomic than relaxed diff --git a/test/behavior/var_args.zig b/test/behavior/var_args.zig index 97f90b559d..6431ca9470 100644 --- a/test/behavior/var_args.zig +++ b/test/behavior/var_args.zig @@ -96,10 +96,9 @@ fn doNothingWithFirstArg(args: anytype) void { test "simple variadic function" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO - if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos and builtin.zig_backend == .stage2_llvm) { + if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos) { // https://github.com/ziglang/zig/issues/14096 return error.SkipZigTest; } @@ -124,8 +123,10 @@ test "simple variadic function" { } }; - try std.testing.expectEqual(@as(c_int, 0), S.simple(@as(c_int, 0))); - try std.testing.expectEqual(@as(c_int, 1024), S.simple(@as(c_int, 1024))); + if (builtin.zig_backend != .stage2_c) { // C doesn't support varargs without a preceding runtime arg. + try std.testing.expectEqual(@as(c_int, 0), S.simple(@as(c_int, 0))); + try std.testing.expectEqual(@as(c_int, 1024), S.simple(@as(c_int, 1024))); + } try std.testing.expectEqual(@as(c_int, 0), S.add(0)); try std.testing.expectEqual(@as(c_int, 1), S.add(1, @as(c_int, 1))); try std.testing.expectEqual(@as(c_int, 3), S.add(2, @as(c_int, 1), @as(c_int, 2))); @@ -134,10 +135,9 @@ test "simple variadic function" { test "variadic functions" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO - if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos and builtin.zig_backend == .stage2_llvm) { + if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos) { // https://github.com/ziglang/zig/issues/14096 return error.SkipZigTest; } @@ -178,10 +178,9 @@ test "variadic functions" { test "copy VaList" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO - if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos and builtin.zig_backend == .stage2_llvm) { + if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos) { // https://github.com/ziglang/zig/issues/14096 return error.SkipZigTest; } -- cgit v1.2.3 From a0d7fd162b7568c0291ebcfc561a2852c7077e15 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 23 Feb 2023 05:16:23 -0500 Subject: CBE: support call attributes * Support always_tail and never_tail/never_inline with a comptime callee using clang * Support never_inline using gcc * Support never_inline using msvc Unfortunately, can't enable behavior tests because of the conditional support. --- lib/zig.h | 26 +++ src/codegen/c.zig | 496 +++++++++++++++++++++++++++++------------------------- src/link/C.zig | 35 ++-- src/target.zig | 1 + 4 files changed, 304 insertions(+), 254 deletions(-) (limited to 'src/codegen') diff --git a/lib/zig.h b/lib/zig.h index 67f64635c5..fb0573a049 100644 --- a/lib/zig.h +++ b/lib/zig.h @@ -78,6 +78,32 @@ typedef char bool; #define zig_cold #endif +#if zig_has_attribute(flatten) +#define zig_maybe_flatten __attribute__((flatten)) +#else +#define zig_maybe_flatten +#endif + +#if zig_has_attribute(noinline) +#define zig_never_inline __attribute__((noinline)) zig_maybe_flatten +#elif defined(_MSC_VER) +#define zig_never_inline __declspec(noinline) zig_maybe_flatten +#else +#define zig_never_inline zig_never_inline_unavailable +#endif + +#if zig_has_attribute(not_tail_called) +#define zig_never_tail __attribute__((not_tail_called)) zig_never_inline +#else +#define zig_never_tail zig_never_tail_unavailable +#endif + +#if zig_has_attribute(always_inline) +#define zig_always_tail __attribute__((musttail)) +#else +#define zig_always_tail zig_always_tail_unavailable +#endif + #if __STDC_VERSION__ >= 199901L #define zig_restrict restrict #elif defined(__GNUC__) diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 390e8f8f4e..4d42758773 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -23,7 +23,6 @@ const libcFloatSuffix = target_util.libcFloatSuffix; const compilerRtFloatAbbrev = target_util.compilerRtFloatAbbrev; const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev; -const Mutability = enum { @"const", mut }; const BigIntLimb = std.math.big.Limb; const BigInt = std.math.big.int; @@ -55,6 +54,8 @@ pub const CValue = union(enum) { /// Render these bytes literally. /// TODO make this a [*:0]const u8 to save memory bytes: []const u8, + /// A deferred call_always_tail + call_always_tail: void, }; const BlockData = struct { @@ -62,21 +63,22 @@ const BlockData = struct { result: CValue, }; -const TypedefKind = enum { - Forward, - Complete, -}; - pub const CValueMap = std.AutoHashMap(Air.Inst.Ref, CValue); pub const LazyFnKey = union(enum) { tag_name: Decl.Index, + never_tail: Decl.Index, + never_inline: Decl.Index, }; pub const LazyFnValue = struct { fn_name: []const u8, - data: union { + data: Data, + + pub const Data = union { tag_name: Type, - }, + never_tail: void, + never_inline: void, + }; }; pub const LazyFnMap = std.AutoArrayHashMapUnmanaged(LazyFnKey, LazyFnValue); @@ -314,7 +316,7 @@ pub const Function = struct { const gpa = f.object.dg.gpa; try f.allocs.put(gpa, decl_c_value.new_local, true); try writer.writeAll("static "); - try f.object.dg.renderTypeAndName(writer, ty, decl_c_value, .@"const", alignment, .Complete); + try f.object.dg.renderTypeAndName(writer, ty, decl_c_value, Const, alignment, .complete); try writer.writeAll(" = "); try f.object.dg.renderValue(writer, ty, val, .StaticInitializer); try writer.writeAll(";\n "); @@ -348,15 +350,13 @@ pub const Function = struct { } fn allocLocal(f: *Function, inst: Air.Inst.Index, ty: Type) !CValue { - const result = try f.allocAlignedLocal(ty, .mut, 0); + const result = try f.allocAlignedLocal(ty, .{}, 0); log.debug("%{d}: allocating t{d}", .{ inst, result.new_local }); return result; } /// Only allocates the local; does not print anything. - fn allocAlignedLocal(f: *Function, ty: Type, mutability: Mutability, alignment: u32) !CValue { - _ = mutability; - + fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: u32) !CValue { if (f.getFreeLocals().getPtrContext(ty, f.tyHashCtx())) |locals_list| { for (locals_list.items, 0..) |local_index, i| { const local = &f.locals.items[local_index]; @@ -451,11 +451,9 @@ pub const Function = struct { return f.object.dg.fmtIntLiteral(ty, val); } - fn getTagNameFn(f: *Function, enum_ty: Type) ![]const u8 { + fn getLazyFnName(f: *Function, key: LazyFnKey, data: LazyFnValue.Data) ![]const u8 { const gpa = f.object.dg.gpa; - const owner_decl = enum_ty.getOwnerDecl(); - - const gop = try f.lazy_fns.getOrPut(gpa, .{ .tag_name = owner_decl }); + const gop = try f.lazy_fns.getOrPut(gpa, key); if (!gop.found_existing) { errdefer _ = f.lazy_fns.pop(); @@ -464,11 +462,21 @@ pub const Function = struct { const arena = promoted.arena.allocator(); gop.value_ptr.* = .{ - .fn_name = try std.fmt.allocPrint(arena, "zig_tagName_{}__{d}", .{ - fmtIdent(mem.span(f.object.dg.module.declPtr(owner_decl).name)), - @enumToInt(owner_decl), - }), - .data = .{ .tag_name = try enum_ty.copy(arena) }, + .fn_name = switch (key) { + .tag_name, + .never_tail, + .never_inline, + => |owner_decl| try std.fmt.allocPrint(arena, "zig_{s}_{}__{d}", .{ + @tagName(key), + fmtIdent(mem.span(f.object.dg.module.declPtr(owner_decl).name)), + @enumToInt(owner_decl), + }), + }, + .data = switch (key) { + .tag_name => .{ .tag_name = try data.tag_name.copy(arena) }, + .never_tail => .{ .never_tail = data.never_tail }, + .never_inline => .{ .never_inline = data.never_inline }, + }, }; } return gop.value_ptr.fn_name; @@ -1457,24 +1465,31 @@ pub const DeclGen = struct { } } - fn renderFunctionSignature(dg: *DeclGen, w: anytype, kind: TypedefKind, export_index: u32) !void { + fn renderFunctionSignature( + dg: *DeclGen, + w: anytype, + fn_decl_index: Decl.Index, + kind: CType.Kind, + name: union(enum) { + export_index: u32, + string: []const u8, + }, + ) !void { const store = &dg.ctypes.set; const module = dg.module; - const fn_ty = dg.decl.?.ty; - const fn_cty_idx = try dg.typeToIndex(fn_ty, switch (kind) { - .Forward => .forward, - .Complete => .complete, - }); + const fn_decl = module.declPtr(fn_decl_index); + const fn_cty_idx = try dg.typeToIndex(fn_decl.ty, kind); - const fn_info = fn_ty.fnInfo(); + const fn_info = fn_decl.ty.fnInfo(); if (fn_info.cc == .Naked) { switch (kind) { - .Forward => try w.writeAll("zig_naked_decl "), - .Complete => try w.writeAll("zig_naked "), + .forward => try w.writeAll("zig_naked_decl "), + .complete => try w.writeAll("zig_naked "), + else => unreachable, } } - if (dg.decl.?.val.castTag(.function)) |func_payload| + if (fn_decl.val.castTag(.function)) |func_payload| if (func_payload.data.is_cold) try w.writeAll("zig_cold "); if (fn_info.return_type.tag() == .noreturn) try w.writeAll("zig_noreturn "); @@ -1485,7 +1500,7 @@ pub const DeclGen = struct { w, fn_cty_idx, .suffix, - CQualifiers.init(.{}), + .{}, ); try w.print("{}", .{trailing}); @@ -1493,16 +1508,37 @@ pub const DeclGen = struct { try w.print("zig_callconv({s}) ", .{call_conv}); } - if (fn_info.alignment > 0 and kind == .Complete) { - try w.print(" zig_align_fn({})", .{fn_info.alignment}); + switch (kind) { + .forward => {}, + .complete => if (fn_info.alignment > 0) + try w.print(" zig_align_fn({})", .{fn_info.alignment}), + else => unreachable, } - try dg.renderDeclName(w, dg.decl_index.unwrap().?, export_index); + switch (name) { + .export_index => |export_index| try dg.renderDeclName(w, fn_decl_index, export_index), + .string => |string| try w.writeAll(string), + } - try renderTypeSuffix(dg.decl_index, store.*, module, w, fn_cty_idx, .suffix); + try renderTypeSuffix( + dg.decl_index, + store.*, + module, + w, + fn_cty_idx, + .suffix, + CQualifiers.init(.{ .@"const" = switch (kind) { + .forward => false, + .complete => true, + else => unreachable, + } }), + ); - if (fn_info.alignment > 0 and kind == .Forward) { - try w.print(" zig_align_fn({})", .{fn_info.alignment}); + switch (kind) { + .forward => if (fn_info.alignment > 0) + try w.print(" zig_align_fn({})", .{fn_info.alignment}), + .complete => {}, + else => unreachable, } } @@ -1533,16 +1569,8 @@ pub const DeclGen = struct { const store = &dg.ctypes.set; const module = dg.module; const idx = try dg.typeToIndex(t, .complete); - _ = try renderTypePrefix( - dg.decl_index, - store.*, - module, - w, - idx, - .suffix, - CQualifiers.init(.{}), - ); - try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix); + _ = try renderTypePrefix(dg.decl_index, store.*, module, w, idx, .suffix, .{}); + try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix, .{}); } const IntCastContext = union(enum) { @@ -1655,9 +1683,9 @@ pub const DeclGen = struct { w: anytype, ty: Type, name: CValue, - mutability: Mutability, + qualifiers: CQualifiers, alignment: u32, - _: TypedefKind, + kind: CType.Kind, ) error{ OutOfMemory, AnalysisFail }!void { const store = &dg.ctypes.set; const module = dg.module; @@ -1668,71 +1696,12 @@ pub const DeclGen = struct { .gt => try w.print("zig_align({}) ", .{alignment}), }; - const idx = try dg.typeToIndex(ty, .complete); - const trailing = try renderTypePrefix( - dg.decl_index, - store.*, - module, - w, - idx, - .suffix, - CQualifiers.init(.{ .@"const" = mutability == .@"const" }), - ); + const idx = try dg.typeToIndex(ty, kind); + const trailing = + try renderTypePrefix(dg.decl_index, store.*, module, w, idx, .suffix, qualifiers); try w.print("{}", .{trailing}); try dg.writeCValue(w, name); - try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix); - } - - fn renderTagNameFn(dg: *DeclGen, w: anytype, fn_name: []const u8, enum_ty: Type) !void { - const name_slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - - try w.writeAll("static "); - try dg.renderType(w, name_slice_ty); - try w.writeByte(' '); - try w.writeAll(fn_name); - try w.writeByte('('); - try dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, .@"const", 0, .Complete); - try w.writeAll(") {\n switch (tag) {\n"); - for (enum_ty.enumFields().keys(), 0..) |name, index| { - const name_z = try dg.gpa.dupeZ(u8, name); - defer dg.gpa.free(name_z); - const name_bytes = name_z[0 .. name_z.len + 1]; - - var tag_pl: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, index), - }; - const tag_val = Value.initPayload(&tag_pl.base); - - var int_pl: Value.Payload.U64 = undefined; - const int_val = tag_val.enumToInt(enum_ty, &int_pl); - - var name_ty_pl = Type.Payload.Len{ .base = .{ .tag = .array_u8_sentinel_0 }, .data = name.len }; - const name_ty = Type.initPayload(&name_ty_pl.base); - - var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name_bytes }; - const name_val = Value.initPayload(&name_pl.base); - - var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; - const len_val = Value.initPayload(&len_pl.base); - - try w.print(" case {}: {{\n static ", .{try dg.fmtIntLiteral(enum_ty, int_val)}); - try dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, .@"const", 0, .Complete); - try w.writeAll(" = "); - try dg.renderValue(w, name_ty, name_val, .Initializer); - try w.writeAll(";\n return ("); - try dg.renderType(w, name_slice_ty); - try w.print("){{{}, {}}};\n", .{ - fmtIdent("name"), try dg.fmtIntLiteral(Type.usize, len_val), - }); - - try w.writeAll(" }\n"); - } - try w.writeAll(" }\n while ("); - try dg.renderValue(w, Type.bool, Value.true, .Other); - try w.writeAll(") "); - _ = try airBreakpoint(w); - try w.writeAll("}\n"); + try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix, .{}); } fn declIsGlobal(dg: *DeclGen, tv: TypedValue) bool { @@ -1771,6 +1740,7 @@ pub const DeclGen = struct { fmtIdent(ident), }), .bytes => |bytes| return w.writeAll(bytes), + .call_always_tail => return dg.fail("CBE: the result of @call(.always_tail, ...) must be returned directly", .{}), } } @@ -1804,6 +1774,7 @@ pub const DeclGen = struct { try w.writeAll(bytes); return w.writeByte(')'); }, + .call_always_tail => return dg.writeCValue(w, c_value), } } @@ -1816,7 +1787,16 @@ pub const DeclGen = struct { fn writeCValueDerefMember(dg: *DeclGen, writer: anytype, c_value: CValue, member: CValue) !void { switch (c_value) { .none, .constant, .field, .undef => unreachable, - .new_local, .local, .arg, .arg_array, .decl, .identifier, .payload_identifier, .bytes => { + .new_local, + .local, + .arg, + .arg_array, + .decl, + .identifier, + .payload_identifier, + .bytes, + .call_always_tail, + => { try dg.writeCValue(writer, c_value); try writer.writeAll("->"); }, @@ -1945,7 +1925,8 @@ pub const DeclGen = struct { const CTypeFix = enum { prefix, suffix }; const CQualifiers = std.enums.EnumSet(enum { @"const", @"volatile", restrict }); -const CTypeRenderTrailing = enum { +const Const = CQualifiers.init(.{ .@"const" = true }); +const RenderCTypeTrailing = enum { no_space, maybe_space, @@ -2004,8 +1985,8 @@ fn renderTypePrefix( idx: CType.Index, parent_fix: CTypeFix, qualifiers: CQualifiers, -) @TypeOf(w).Error!CTypeRenderTrailing { - var trailing = CTypeRenderTrailing.maybe_space; +) @TypeOf(w).Error!RenderCTypeTrailing { + var trailing = RenderCTypeTrailing.maybe_space; const cty = store.indexToCType(idx); switch (cty.tag()) { @@ -2147,7 +2128,7 @@ fn renderTypePrefix( w, cty.cast(CType.Payload.Function).?.data.return_type, .suffix, - CQualifiers.init(.{}), + .{}, ); switch (parent_fix) { .prefix => { @@ -2174,6 +2155,7 @@ fn renderTypeSuffix( w: anytype, idx: CType.Index, parent_fix: CTypeFix, + qualifiers: CQualifiers, ) @TypeOf(w).Error!void { const cty = store.indexToCType(idx); switch (cty.tag()) { @@ -2220,7 +2202,15 @@ fn renderTypeSuffix( .pointer_const, .pointer_volatile, .pointer_const_volatile, - => try renderTypeSuffix(decl, store, mod, w, cty.cast(CType.Payload.Child).?.data, .prefix), + => try renderTypeSuffix( + decl, + store, + mod, + w, + cty.cast(CType.Payload.Child).?.data, + .prefix, + .{}, + ), .array, .vector, @@ -2238,6 +2228,7 @@ fn renderTypeSuffix( w, cty.cast(CType.Payload.Sequence).?.data.elem_type, .suffix, + .{}, ); }, @@ -2272,17 +2263,10 @@ fn renderTypeSuffix( for (data.param_types, 0..) |param_type, param_i| { if (need_comma) try w.writeAll(", "); need_comma = true; - const trailing = try renderTypePrefix( - decl, - store, - mod, - w, - param_type, - .suffix, - CQualifiers.init(.{ .@"const" = true }), - ); - try w.print("{}a{d}", .{ trailing, param_i }); - try renderTypeSuffix(decl, store, mod, w, param_type, .suffix); + const trailing = + try renderTypePrefix(decl, store, mod, w, param_type, .suffix, qualifiers); + if (qualifiers.contains(.@"const")) try w.print("{}a{d}", .{ trailing, param_i }); + try renderTypeSuffix(decl, store, mod, w, param_type, .suffix, .{}); } switch (tag) { .function => {}, @@ -2296,7 +2280,7 @@ fn renderTypeSuffix( if (!need_comma) try w.writeAll("void"); try w.writeByte(')'); - try renderTypeSuffix(decl, store, mod, w, data.return_type, .suffix); + try renderTypeSuffix(decl, store, mod, w, data.return_type, .suffix, .{}); }, } } @@ -2316,17 +2300,9 @@ fn renderAggregateFields( .eq => {}, .gt => try writer.print("zig_align({}) ", .{field.alignas.getAlign()}), } - const trailing = try renderTypePrefix( - .none, - store, - mod, - writer, - field.type, - .suffix, - CQualifiers.init(.{}), - ); + const trailing = try renderTypePrefix(.none, store, mod, writer, field.type, .suffix, .{}); try writer.print("{}{ }", .{ trailing, fmtIdent(mem.span(field.name)) }); - try renderTypeSuffix(.none, store, mod, writer, field.type, .suffix); + try renderTypeSuffix(.none, store, mod, writer, field.type, .suffix, .{}); try writer.writeAll(";\n"); } try writer.writeByteNTimes(' ', indent); @@ -2347,25 +2323,9 @@ pub fn genTypeDecl( switch (global_cty.tag()) { .fwd_anon_struct => if (decl != .none) { try writer.writeAll("typedef "); - _ = try renderTypePrefix( - .none, - global_store, - mod, - writer, - global_idx, - .suffix, - CQualifiers.init(.{}), - ); + _ = try renderTypePrefix(.none, global_store, mod, writer, global_idx, .suffix, .{}); try writer.writeByte(' '); - _ = try renderTypePrefix( - decl, - decl_store, - mod, - writer, - decl_idx, - .suffix, - CQualifiers.init(.{}), - ); + _ = try renderTypePrefix(decl, decl_store, mod, writer, decl_idx, .suffix, .{}); try writer.writeAll(";\n"); }, @@ -2383,15 +2343,7 @@ pub fn genTypeDecl( .fwd_union, => { const owner_decl = global_cty.cast(CType.Payload.FwdDecl).?.data; - _ = try renderTypePrefix( - .none, - global_store, - mod, - writer, - global_idx, - .suffix, - CQualifiers.init(.{}), - ); + _ = try renderTypePrefix(.none, global_store, mod, writer, global_idx, .suffix, .{}); try writer.writeAll("; // "); try mod.declPtr(owner_decl).renderFullyQualifiedName(mod, writer); try writer.writeByte('\n'); @@ -2467,7 +2419,7 @@ pub fn genErrDecls(o: *Object) !void { const name_val = Value.initPayload(&name_pl.base); try writer.writeAll("static "); - try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, .@"const", 0, .Complete); + try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, Const, 0, .complete); try writer.writeAll(" = "); try o.dg.renderValue(writer, name_ty, name_val, .StaticInitializer); try writer.writeAll(";\n"); @@ -2480,7 +2432,7 @@ pub fn genErrDecls(o: *Object) !void { const name_array_ty = Type.initPayload(&name_array_ty_pl.base); try writer.writeAll("static "); - try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = name_prefix }, .@"const", 0, .Complete); + try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = name_prefix }, Const, 0, .complete); try writer.writeAll(" = {"); for (o.dg.module.error_name_list.items, 0..) |name, value| { if (value != 0) try writer.writeByte(','); @@ -2503,7 +2455,7 @@ fn genExports(o: *Object) !void { if (o.dg.module.decl_exports.get(o.dg.decl_index.unwrap().?)) |exports| { for (exports.items[1..], 1..) |@"export", i| { try fwd_decl_writer.writeAll("zig_export("); - try o.dg.renderFunctionSignature(fwd_decl_writer, .Forward, @intCast(u32, i)); + try o.dg.renderFunctionSignature(fwd_decl_writer, o.dg.decl_index.unwrap().?, .forward, .{ .export_index = @intCast(u32, i) }); try fwd_decl_writer.print(", {s}, {s});\n", .{ fmtStringLiteral(exports.items[0].options.name), fmtStringLiteral(@"export".options.name), @@ -2513,13 +2465,85 @@ fn genExports(o: *Object) !void { } pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { - const writer = o.writer(); - switch (lazy_fn.key_ptr.*) { - .tag_name => _ = try o.dg.renderTagNameFn( - writer, - lazy_fn.value_ptr.fn_name, - lazy_fn.value_ptr.data.tag_name, - ), + const w = o.writer(); + const key = lazy_fn.key_ptr.*; + const val = lazy_fn.value_ptr; + const fn_name = val.fn_name; + switch (key) { + .tag_name => { + const enum_ty = val.data.tag_name; + + const name_slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + + try w.writeAll("static "); + try o.dg.renderType(w, name_slice_ty); + try w.writeByte(' '); + try w.writeAll(fn_name); + try w.writeByte('('); + try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, 0, .complete); + try w.writeAll(") {\n switch (tag) {\n"); + for (enum_ty.enumFields().keys(), 0..) |name, index| { + const name_z = try o.dg.gpa.dupeZ(u8, name); + defer o.dg.gpa.free(name_z); + const name_bytes = name_z[0 .. name_z.len + 1]; + + var tag_pl: Value.Payload.U32 = .{ + .base = .{ .tag = .enum_field_index }, + .data = @intCast(u32, index), + }; + const tag_val = Value.initPayload(&tag_pl.base); + + var int_pl: Value.Payload.U64 = undefined; + const int_val = tag_val.enumToInt(enum_ty, &int_pl); + + var name_ty_pl = Type.Payload.Len{ .base = .{ .tag = .array_u8_sentinel_0 }, .data = name.len }; + const name_ty = Type.initPayload(&name_ty_pl.base); + + var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name_bytes }; + const name_val = Value.initPayload(&name_pl.base); + + var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; + const len_val = Value.initPayload(&len_pl.base); + + try w.print(" case {}: {{\n static ", .{try o.dg.fmtIntLiteral(enum_ty, int_val)}); + try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, 0, .complete); + try w.writeAll(" = "); + try o.dg.renderValue(w, name_ty, name_val, .Initializer); + try w.writeAll(";\n return ("); + try o.dg.renderType(w, name_slice_ty); + try w.print("){{{}, {}}};\n", .{ + fmtIdent("name"), try o.dg.fmtIntLiteral(Type.usize, len_val), + }); + + try w.writeAll(" }\n"); + } + try w.writeAll(" }\n while ("); + try o.dg.renderValue(w, Type.bool, Value.true, .Other); + try w.writeAll(") "); + _ = try airBreakpoint(w); + try w.writeAll("}\n"); + }, + .never_tail, .never_inline => |fn_decl_index| { + const fn_decl = o.dg.module.declPtr(fn_decl_index); + const fn_cty = try o.dg.typeToCType(fn_decl.ty, .complete); + const fn_info = fn_cty.cast(CType.Payload.Function).?.data; + + const fwd_decl_writer = o.dg.fwd_decl.writer(); + try fwd_decl_writer.print("static zig_{s} ", .{@tagName(key)}); + try o.dg.renderFunctionSignature(fwd_decl_writer, fn_decl_index, .forward, .{ .string = fn_name }); + try fwd_decl_writer.writeAll(";\n"); + + try w.print("static zig_{s} ", .{@tagName(key)}); + try o.dg.renderFunctionSignature(w, fn_decl_index, .complete, .{ .string = fn_name }); + try w.writeAll(" {\n return "); + try o.dg.renderDeclName(w, fn_decl_index, 0); + try w.writeByte('('); + for (0..fn_info.param_types.len) |arg| { + if (arg > 0) try w.writeAll(", "); + try o.dg.writeCValue(w, .{ .arg = arg }); + } + try w.writeAll(");\n}\n"); + }, } } @@ -2529,6 +2553,7 @@ pub fn genFunc(f: *Function) !void { const o = &f.object; const gpa = o.dg.gpa; + const decl_index = o.dg.decl_index.unwrap().?; const tv: TypedValue = .{ .ty = o.dg.decl.?.ty, .val = o.dg.decl.?.val, @@ -2540,13 +2565,13 @@ pub fn genFunc(f: *Function) !void { const is_global = o.dg.declIsGlobal(tv); const fwd_decl_writer = o.dg.fwd_decl.writer(); try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static "); - try o.dg.renderFunctionSignature(fwd_decl_writer, .Forward, 0); + try o.dg.renderFunctionSignature(fwd_decl_writer, decl_index, .forward, .{ .export_index = 0 }); try fwd_decl_writer.writeAll(";\n"); try genExports(o); try o.indent_writer.insertNewline(); if (!is_global) try o.writer().writeAll("static "); - try o.dg.renderFunctionSignature(o.writer(), .Complete, 0); + try o.dg.renderFunctionSignature(o.writer(), decl_index, .complete, .{ .export_index = 0 }); try o.writer().writeByte(' '); // In case we need to use the header, populate it with a copy of the function @@ -2600,9 +2625,9 @@ pub fn genFunc(f: *Function) !void { w, local.ty, .{ .local = local_index }, - .mut, + .{}, local.alignment, - .Complete, + .complete, ); try w.writeAll(";\n "); } @@ -2628,7 +2653,7 @@ pub fn genDecl(o: *Object) !void { if (tv.val.tag() == .extern_fn) { const fwd_decl_writer = o.dg.fwd_decl.writer(); try fwd_decl_writer.writeAll("zig_extern "); - try o.dg.renderFunctionSignature(fwd_decl_writer, .Forward, 0); + try o.dg.renderFunctionSignature(fwd_decl_writer, decl_c_value.decl, .forward, .{ .export_index = 0 }); try fwd_decl_writer.writeAll(";\n"); try genExports(o); } else if (tv.val.castTag(.variable)) |var_payload| { @@ -2639,7 +2664,7 @@ pub fn genDecl(o: *Object) !void { try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static "); if (variable.is_threadlocal) try fwd_decl_writer.writeAll("zig_threadlocal "); - try o.dg.renderTypeAndName(fwd_decl_writer, decl.ty, decl_c_value, .mut, decl.@"align", .Complete); + try o.dg.renderTypeAndName(fwd_decl_writer, decl.ty, decl_c_value, .{}, decl.@"align", .complete); try fwd_decl_writer.writeAll(";\n"); try genExports(o); @@ -2649,7 +2674,7 @@ pub fn genDecl(o: *Object) !void { if (!is_global) try w.writeAll("static "); if (variable.is_threadlocal) try w.writeAll("zig_threadlocal "); if (decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); - try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .mut, decl.@"align", .Complete); + try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.@"align", .complete); if (decl.@"linksection" != null) try w.writeAll(", read, write)"); try w.writeAll(" = "); try o.dg.renderValue(w, tv.ty, variable.init, .StaticInitializer); @@ -2660,13 +2685,13 @@ pub fn genDecl(o: *Object) !void { const fwd_decl_writer = o.dg.fwd_decl.writer(); try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static "); - try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, .@"const", decl.@"align", .Complete); + try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, Const, decl.@"align", .complete); try fwd_decl_writer.writeAll(";\n"); const w = o.writer(); if (!is_global) try w.writeAll("static "); if (decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); - try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .@"const", decl.@"align", .Complete); + try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, Const, decl.@"align", .complete); if (decl.@"linksection" != null) try w.writeAll(", read)"); try w.writeAll(" = "); try o.dg.renderValue(w, tv.ty, tv.val, .StaticInitializer); @@ -2689,7 +2714,7 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { const is_global = dg.declIsGlobal(tv); if (is_global) { try writer.writeAll("zig_extern "); - try dg.renderFunctionSignature(writer, .Complete, 0); + try dg.renderFunctionSignature(writer, dg.decl_index.unwrap().?, .complete, .{ .export_index = 0 }); try dg.fwd_decl.appendSlice(";\n"); } }, @@ -2879,10 +2904,10 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, .dbg_block_begin, .dbg_block_end, - => CValue{ .none = {} }, + => .none, .call => try airCall(f, inst, .auto), - .call_always_tail => try airCall(f, inst, .always_tail), + .call_always_tail => .call_always_tail, .call_never_tail => try airCall(f, inst, .never_tail), .call_never_inline => try airCall(f, inst, .never_inline), @@ -3199,9 +3224,12 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { return CValue{ .undef = inst_ty }; } - const mutability: Mutability = if (inst_ty.isConstPtr()) .@"const" else .mut; const target = f.object.dg.module.getTarget(); - const local = try f.allocAlignedLocal(elem_type, mutability, inst_ty.ptrAlignment(target)); + const local = try f.allocAlignedLocal( + elem_type, + CQualifiers.init(.{ .@"const" = inst_ty.isConstPtr() }), + inst_ty.ptrAlignment(target), + ); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.module.gpa; try f.allocs.put(gpa, local.new_local, false); @@ -3216,9 +3244,12 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { return CValue{ .undef = inst_ty }; } - const mutability: Mutability = if (inst_ty.isConstPtr()) .@"const" else .mut; const target = f.object.dg.module.getTarget(); - const local = try f.allocAlignedLocal(elem_ty, mutability, inst_ty.ptrAlignment(target)); + const local = try f.allocAlignedLocal( + elem_ty, + CQualifiers.init(.{ .@"const" = inst_ty.isConstPtr() }), + inst_ty.ptrAlignment(target), + ); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.module.gpa; try f.allocs.put(gpa, local.new_local, false); @@ -3336,10 +3367,19 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { var lowered_ret_buf: LowerFnRetTyBuffer = undefined; const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target); - if (lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) { - var deref = is_ptr; + const is_naked = if (f.object.dg.decl) |decl| decl.ty.fnCallingConvention() == .Naked else false; + const peek_operand = f.value_map.get(un_op); + if (if (peek_operand) |operand| operand == .call_always_tail else false) { + try reap(f, inst, &.{un_op}); + if (is_naked) { + try f.writeCValue(writer, peek_operand.?, .Other); + unreachable; + } + _ = try airCall(f, Air.refToIndex(un_op).?, .always_tail); + } else if (lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); + var deref = is_ptr; const is_array = lowersToArray(ret_ty, target); const ret_val = if (is_array) ret_val: { const array_local = try f.allocLocal(inst, try lowered_ret_ty.copy(f.arena.allocator())); @@ -3368,9 +3408,8 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { } } else { try reap(f, inst, &.{un_op}); - if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() != .Naked) - // Not even allowed to return void in a naked function. - try writer.writeAll("return;\n"); + // Not even allowed to return void in a naked function. + if (!is_naked) try writer.writeAll("return;\n"); } return CValue.none; } @@ -4004,13 +4043,6 @@ fn airCall( const target = module.getTarget(); const writer = f.object.writer(); - switch (modifier) { - .auto => {}, - .always_tail => return f.fail("TODO: C backend: call with always_tail attribute", .{}), - .never_tail => return f.fail("TODO: C backend: call with never_tail attribute", .{}), - .never_inline => return f.fail("TODO: C backend: call with never_inline attribute", .{}), - else => unreachable, - } const pl_op = f.air.instructions.items(.data)[inst].pl_op; const extra = f.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra.end..][0..extra.data.args_len]); @@ -4060,7 +4092,10 @@ fn airCall( var lowered_ret_buf: LowerFnRetTyBuffer = undefined; const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target); - const result_local: CValue = if (!lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) + const result_local: CValue = if (modifier == .always_tail) r: { + try writer.writeAll("zig_always_tail return "); + break :r .none; + } else if (!lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) .none else if (f.liveness.isUnused(inst)) r: { try writer.writeByte('('); @@ -4074,26 +4109,33 @@ fn airCall( break :r local; }; - var is_extern = false; - var name: [*:0]const u8 = ""; callee: { known: { const fn_decl = fn_decl: { const callee_val = f.air.value(pl_op.operand) orelse break :known; break :fn_decl switch (callee_val.tag()) { - .extern_fn => blk: { - is_extern = true; - break :blk callee_val.castTag(.extern_fn).?.data.owner_decl; - }, + .extern_fn => callee_val.castTag(.extern_fn).?.data.owner_decl, .function => callee_val.castTag(.function).?.data.owner_decl, .decl_ref => callee_val.castTag(.decl_ref).?.data, else => break :known, }; }; - name = module.declPtr(fn_decl).name; - try f.object.dg.renderDeclName(writer, fn_decl, 0); + switch (modifier) { + .auto, .always_tail => try f.object.dg.renderDeclName(writer, fn_decl, 0), + inline .never_tail, .never_inline => |mod| try writer.writeAll(try f.getLazyFnName( + @unionInit(LazyFnKey, @tagName(mod), fn_decl), + @unionInit(LazyFnValue.Data, @tagName(mod), {}), + )), + else => unreachable, + } break :callee; } + switch (modifier) { + .auto, .always_tail => {}, + .never_tail => return f.fail("CBE: runtime callee with never_tail attribute unsupported", .{}), + .never_inline => return f.fail("CBE: runtime callee with never_inline attribute unsupported", .{}), + else => unreachable, + } // Fall back to function pointer call. try f.writeCValue(writer, callee, .Other); } @@ -4704,14 +4746,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("register "); const alignment = 0; const local_value = try f.allocLocalValue(output_ty, alignment); - try f.object.dg.renderTypeAndName( - writer, - output_ty, - local_value, - .mut, - alignment, - .Complete, - ); + try f.object.dg.renderTypeAndName(writer, output_ty, local_value, .{}, alignment, .complete); try writer.writeAll(" __asm(\""); try writer.writeAll(constraint["={".len .. constraint.len - "}".len]); try writer.writeAll("\")"); @@ -4743,14 +4778,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { if (is_reg) try writer.writeAll("register "); const alignment = 0; const local_value = try f.allocLocalValue(input_ty, alignment); - try f.object.dg.renderTypeAndName( - writer, - input_ty, - local_value, - .@"const", - alignment, - .Complete, - ); + try f.object.dg.renderTypeAndName(writer, input_ty, local_value, Const, alignment, .complete); if (is_reg) { try writer.writeAll(" __asm(\""); try writer.writeAll(constraint["{".len .. constraint.len - "}".len]); @@ -6278,7 +6306,9 @@ fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); - try writer.print(" = {s}(", .{try f.getTagNameFn(enum_ty)}); + try writer.print(" = {s}(", .{ + try f.getLazyFnName(.{ .tag_name = enum_ty.getOwnerDecl() }, .{ .tag_name = enum_ty }), + }); try f.writeCValue(writer, operand, .Other); try writer.writeAll(");\n"); diff --git a/src/link/C.zig b/src/link/C.zig index 262e4e4923..5663ba71e2 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -247,8 +247,8 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) const abi_define = abiDefine(comp); - // Covers defines, zig.h, ctypes, asm, lazy fwd, lazy code. - try f.all_buffers.ensureUnusedCapacity(gpa, 6); + // Covers defines, zig.h, ctypes, asm, lazy fwd. + try f.all_buffers.ensureUnusedCapacity(gpa, 5); if (abi_define) |buf| f.appendBufAssumeCapacity(buf); f.appendBufAssumeCapacity(zig_h); @@ -263,8 +263,8 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) f.appendBufAssumeCapacity(asm_buf.items); } - const lazy_indices = f.all_buffers.items.len; - f.all_buffers.items.len += 2; + const lazy_index = f.all_buffers.items.len; + f.all_buffers.items.len += 1; try self.flushErrDecls(&f.lazy_db); @@ -297,6 +297,7 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) { // We need to flush lazy ctypes after flushing all decls but before flushing any decl ctypes. + // This ensures that every lazy CType.Index exactly matches the global CType.Index. assert(f.ctypes.count() == 0); try self.flushCTypes(&f, .none, f.lazy_db.ctypes); @@ -305,30 +306,22 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) try self.flushCTypes(&f, entry.key_ptr.toOptional(), entry.value_ptr.ctypes); } - { - f.all_buffers.items[lazy_indices + 0] = .{ - .iov_base = if (f.lazy_db.fwd_decl.items.len > 0) f.lazy_db.fwd_decl.items.ptr else "", - .iov_len = f.lazy_db.fwd_decl.items.len, - }; - f.file_size += f.lazy_db.fwd_decl.items.len; - - f.all_buffers.items[lazy_indices + 1] = .{ - .iov_base = if (f.lazy_db.code.items.len > 0) f.lazy_db.code.items.ptr else "", - .iov_len = f.lazy_db.code.items.len, - }; - f.file_size += f.lazy_db.code.items.len; - } - f.all_buffers.items[ctypes_index] = .{ .iov_base = if (f.ctypes_buf.items.len > 0) f.ctypes_buf.items.ptr else "", .iov_len = f.ctypes_buf.items.len, }; f.file_size += f.ctypes_buf.items.len; + f.all_buffers.items[lazy_index] = .{ + .iov_base = if (f.lazy_db.fwd_decl.items.len > 0) f.lazy_db.fwd_decl.items.ptr else "", + .iov_len = f.lazy_db.fwd_decl.items.len, + }; + f.file_size += f.lazy_db.fwd_decl.items.len; + // Now the code. - try f.all_buffers.ensureUnusedCapacity(gpa, decl_values.len); - for (decl_values) |decl| - f.appendBufAssumeCapacity(decl.code.items); + try f.all_buffers.ensureUnusedCapacity(gpa, 1 + decl_values.len); + f.appendBufAssumeCapacity(f.lazy_db.code.items); + for (decl_values) |decl| f.appendBufAssumeCapacity(decl.code.items); const file = self.base.file.?; try file.setEndPos(f.file_size); diff --git a/src/target.zig b/src/target.zig index 001adad7c2..6d6933e9e7 100644 --- a/src/target.zig +++ b/src/target.zig @@ -723,6 +723,7 @@ pub fn supportsFunctionAlignment(target: std.Target) bool { pub fn supportsTailCall(target: std.Target, backend: std.builtin.CompilerBackend) bool { switch (backend) { .stage1, .stage2_llvm => return @import("codegen/llvm.zig").supportsTailCall(target), + .stage2_c => return true, else => return false, } } -- cgit v1.2.3 From 9d24d0354f83a7cd706726d47a2dc9ac092304e7 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 23 Feb 2023 18:47:09 -0500 Subject: CBE: fix MSVC diagnostics generated by the behavior tests After this, the last MSVC warnings are in behavior/bugs/529.zig: behavior.c(37971): warning C4133: 'function': incompatible types - from 'A__8479 *' to 'A__8474 *' behavior.c(37974): warning C4133: 'function': incompatible types - from 'A__8480 *' to 'A__8474 *' --- lib/zig.h | 64 ++++++++++++++----------- src/codegen/c.zig | 140 +++++++++++++++++++++++++++--------------------------- 2 files changed, 106 insertions(+), 98 deletions(-) (limited to 'src/codegen') diff --git a/lib/zig.h b/lib/zig.h index fb0573a049..c10720d1bd 100644 --- a/lib/zig.h +++ b/lib/zig.h @@ -1076,7 +1076,7 @@ static inline void zig_vmulo_i16(uint8_t *ov, int16_t *res, int n, \ static inline int##w##_t zig_shls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ int##w##_t res; \ - if ((uint##w##_t)rhs < (uint##w##_t)bits && !zig_shlo_i##w(&res, lhs, rhs, bits)) return res; \ + if ((uint##w##_t)rhs < (uint##w##_t)bits && !zig_shlo_i##w(&res, lhs, (uint8_t)rhs, bits)) return res; \ return lhs < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \ } \ \ @@ -2410,39 +2410,47 @@ zig_msvc_atomics(i64, int64_t, 64) #define zig_msvc_flt_atomics(Type, ReprType, suffix) \ static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \ - ReprType comparand = *((ReprType*)expected); \ - ReprType initial = _InterlockedCompareExchange##suffix((ReprType volatile*)obj, *((ReprType*)&desired), comparand); \ - bool exchanged = initial == comparand; \ - if (!exchanged) { \ - *expected = *((zig_##Type*)&initial); \ - } \ - return exchanged; \ + ReprType exchange; \ + ReprType comparand; \ + ReprType initial; \ + bool success; \ + memcpy(&comparand, expected, sizeof(comparand)); \ + memcpy(&exchange, &desired, sizeof(exchange)); \ + initial = _InterlockedCompareExchange##suffix((ReprType volatile*)obj, exchange, comparand); \ + success = initial == comparand; \ + if (!success) memcpy(expected, &initial, sizeof(*expected)); \ + return success; \ } \ static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \ - ReprType initial = _InterlockedExchange##suffix((ReprType volatile*)obj, *((ReprType*)&value)); \ - return *((zig_##Type*)&initial); \ + ReprType repr; \ + ReprType initial; \ + zig_##Type result; \ + memcpy(&repr, &value, sizeof(repr)); \ + initial = _InterlockedExchange##suffix((ReprType volatile*)obj, repr); \ + memcpy(&result, &initial, sizeof(result)); \ + return result; \ } \ static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \ - bool success = false; \ - ReprType new; \ - zig_##Type prev; \ - while (!success) { \ - prev = *obj; \ - new = prev + value; \ - success = zig_msvc_cmpxchg_##Type(obj, &prev, *((ReprType*)&new)); \ - } \ - return prev; \ + ReprType repr; \ + zig_##Type expected; \ + zig_##Type desired; \ + repr = *(ReprType volatile*)obj; \ + memcpy(&expected, &repr, sizeof(expected)); \ + do { \ + desired = expected + value; \ + } while (!zig_msvc_cmpxchg_##Type(obj, &expected, desired)); \ + return expected; \ } \ static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \ - bool success = false; \ - ReprType new; \ - zig_##Type prev; \ - while (!success) { \ - prev = *obj; \ - new = prev - value; \ - success = zig_msvc_cmpxchg_##Type(obj, &prev, *((ReprType*)&new)); \ - } \ - return prev; \ + ReprType repr; \ + zig_##Type expected; \ + zig_##Type desired; \ + repr = *(ReprType volatile*)obj; \ + memcpy(&expected, &repr, sizeof(expected)); \ + do { \ + desired = expected - value; \ + } while (!zig_msvc_cmpxchg_##Type(obj, &expected, desired)); \ + return expected; \ } zig_msvc_flt_atomics(f32, uint32_t, ) diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 4d42758773..5faf7e0b60 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -882,14 +882,14 @@ pub const DeclGen = struct { var literal = stringLiteral(writer); try literal.start(); const c_len = ty.arrayLenIncludingSentinel(); - var index: usize = 0; + var index: u64 = 0; while (index < c_len) : (index += 1) try literal.writeChar(0xaa); return literal.end(); } else { try writer.writeByte('{'); const c_len = ty.arrayLenIncludingSentinel(); - var index: usize = 0; + var index: u64 = 0; while (index < c_len) : (index += 1) { if (index > 0) try writer.writeAll(", "); try dg.renderValue(writer, ty.childType(), val, initializer_type); @@ -1089,8 +1089,8 @@ pub const DeclGen = struct { // First try specific tag representations for more efficiency. switch (val.tag()) { .undef, .empty_struct_value, .empty_array => { - try writer.writeByte('{'); const ai = ty.arrayInfo(); + try writer.writeByte('{'); if (ai.sentinel) |s| { try dg.renderValue(writer, ai.elem_type, s, initializer_type); } else { @@ -1098,13 +1098,19 @@ pub const DeclGen = struct { } try writer.writeByte('}'); }, - .bytes => { - try writer.print("{s}", .{fmtStringLiteral(val.castTag(.bytes).?.data)}); - }, - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - try writer.print("{s}", .{fmtStringLiteral(bytes)}); + .bytes, .str_lit => |t| { + const bytes = switch (t) { + .bytes => val.castTag(.bytes).?.data, + .str_lit => bytes: { + const str_lit = val.castTag(.str_lit).?.data; + break :bytes dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + }, + else => unreachable, + }; + const sentinel = if (ty.sentinel()) |sentinel| @intCast(u8, sentinel.toUnsignedInt(target)) else null; + try writer.print("{s}", .{ + fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen())], sentinel), + }); }, else => { // Fall back to generic implementation. @@ -1128,7 +1134,7 @@ pub const DeclGen = struct { } if (ai.sentinel) |s| { const s_u8 = @intCast(u8, s.toUnsignedInt(target)); - try literal.writeChar(s_u8); + if (s_u8 != 0) try literal.writeChar(s_u8); } try literal.end(); } else { @@ -1638,6 +1644,11 @@ pub const DeclGen = struct { try context.writeValue(dg, w, src_ty, location); } else if (dest_bits <= 64 and src_bits > 64) { assert(!src_is_ptr); + if (dest_bits < 64) { + try w.writeByte('('); + try dg.renderType(w, dest_ty); + try w.writeByte(')'); + } try w.writeAll("zig_lo_"); try dg.renderTypeForBuiltinFnName(w, src_eff_ty); try w.writeByte('('); @@ -2380,9 +2391,7 @@ pub fn genTypeDecl( pub fn genGlobalAsm(mod: *Module, writer: anytype) !void { var it = mod.global_assembly.valueIterator(); - while (it.next()) |asm_source| { - try writer.print("__asm({s});\n", .{fmtStringLiteral(asm_source.*)}); - } + while (it.next()) |asm_source| try writer.print("__asm({s});\n", .{fmtStringLiteral(asm_source.*, null)}); } pub fn genErrDecls(o: *Object) !void { @@ -2400,22 +2409,20 @@ pub fn genErrDecls(o: *Object) !void { o.indent_writer.popIndent(); try writer.writeAll("};\n"); - const name_prefix = "zig_errorName"; - const name_buf = try o.dg.gpa.alloc(u8, name_prefix.len + "_".len + max_name_len + 1); + const array_identifier = "zig_errorName"; + const name_prefix = array_identifier ++ "_"; + const name_buf = try o.dg.gpa.alloc(u8, name_prefix.len + max_name_len); defer o.dg.gpa.free(name_buf); - std.mem.copy(u8, name_buf, name_prefix ++ "_"); + std.mem.copy(u8, name_buf, name_prefix); for (o.dg.module.error_name_list.items) |name| { - std.mem.copy(u8, name_buf[name_prefix.len + "_".len ..], name); - name_buf[name_prefix.len + "_".len + name.len] = 0; - - const identifier = name_buf[0 .. name_prefix.len + "_".len + name.len :0]; - const name_z = identifier[name_prefix.len + "_".len ..]; + std.mem.copy(u8, name_buf[name_prefix.len..], name); + const identifier = name_buf[0 .. name_prefix.len + name.len]; var name_ty_pl = Type.Payload.Len{ .base = .{ .tag = .array_u8_sentinel_0 }, .data = name.len }; const name_ty = Type.initPayload(&name_ty_pl.base); - var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name_z }; + var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name }; const name_val = Value.initPayload(&name_pl.base); try writer.writeAll("static "); @@ -2432,7 +2439,7 @@ pub fn genErrDecls(o: *Object) !void { const name_array_ty = Type.initPayload(&name_array_ty_pl.base); try writer.writeAll("static "); - try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = name_prefix }, Const, 0, .complete); + try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = array_identifier }, Const, 0, .complete); try writer.writeAll(" = {"); for (o.dg.module.error_name_list.items, 0..) |name, value| { if (value != 0) try writer.writeByte(','); @@ -2440,7 +2447,7 @@ pub fn genErrDecls(o: *Object) !void { var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; const len_val = Value.initPayload(&len_pl.base); - try writer.print("{{" ++ name_prefix ++ "_{}, {}}}", .{ + try writer.print("{{" ++ name_prefix ++ "{}, {}}}", .{ fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val), }); } @@ -2457,8 +2464,8 @@ fn genExports(o: *Object) !void { try fwd_decl_writer.writeAll("zig_export("); try o.dg.renderFunctionSignature(fwd_decl_writer, o.dg.decl_index.unwrap().?, .forward, .{ .export_index = @intCast(u32, i) }); try fwd_decl_writer.print(", {s}, {s});\n", .{ - fmtStringLiteral(exports.items[0].options.name), - fmtStringLiteral(@"export".options.name), + fmtStringLiteral(exports.items[0].options.name, null), + fmtStringLiteral(@"export".options.name, null), }); } } @@ -2483,10 +2490,6 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, 0, .complete); try w.writeAll(") {\n switch (tag) {\n"); for (enum_ty.enumFields().keys(), 0..) |name, index| { - const name_z = try o.dg.gpa.dupeZ(u8, name); - defer o.dg.gpa.free(name_z); - const name_bytes = name_z[0 .. name_z.len + 1]; - var tag_pl: Value.Payload.U32 = .{ .base = .{ .tag = .enum_field_index }, .data = @intCast(u32, index), @@ -2499,7 +2502,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { var name_ty_pl = Type.Payload.Len{ .base = .{ .tag = .array_u8_sentinel_0 }, .data = name.len }; const name_ty = Type.initPayload(&name_ty_pl.base); - var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name_bytes }; + var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name }; const name_val = Value.initPayload(&name_pl.base); var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; @@ -3459,15 +3462,17 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); + if (dest_c_bits < 64) { + try writer.writeByte('('); + try f.renderType(writer, inst_ty); + try writer.writeByte(')'); + } + const needs_lo = operand_int_info.bits > 64 and dest_bits <= 64; if (needs_lo) { try writer.writeAll("zig_lo_"); try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty); try writer.writeByte('('); - } else if (dest_c_bits <= 64) { - try writer.writeByte('('); - try f.renderType(writer, inst_ty); - try writer.writeByte(')'); } if (dest_bits >= 8 and std.math.isPowerOfTwo(dest_bits)) { @@ -4228,8 +4233,9 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { try genBodyInner(f, body); try f.object.indent_writer.insertNewline(); + // label might be unused, add a dummy goto // label must be followed by an expression, add an empty one. - try writer.print("zig_block_{d}:;\n", .{block_id}); + try writer.print("goto zig_block_{d};\nzig_block_{d}: (void)0;\n", .{ block_id, block_id }); return result; } @@ -4608,8 +4614,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { const last_case_i = switch_br.data.cases_len - @boolToInt(switch_br.data.else_body_len == 0); var extra_index: usize = switch_br.end; - var case_i: u32 = 0; - while (case_i < switch_br.data.cases_len) : (case_i += 1) { + for (0..switch_br.data.cases_len) |case_i| { const case = f.air.extraData(Air.SwitchBr.Case, extra_index); const items = @ptrCast([]const Air.Inst.Ref, f.air.extra[case.end..][0..case.data.items_len]); const case_body = f.air.extra[case.end + items.len ..][0..case.data.body_len]; @@ -4789,14 +4794,11 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(";\n"); } } - { - var clobber_i: u32 = 0; - while (clobber_i < clobbers_len) : (clobber_i += 1) { - const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(f.air.extra[extra_i..]), 0); - // This equation accounts for the fact that even if we have exactly 4 bytes - // for the string, we still use the next u32 for the null terminator. - extra_i += clobber.len / 4 + 1; - } + for (0..clobbers_len) |_| { + const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(f.air.extra[extra_i..]), 0); + // This equation accounts for the fact that even if we have exactly 4 bytes + // for the string, we still use the next u32 for the null terminator. + extra_i += clobber.len / 4 + 1; } { @@ -4851,7 +4853,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("__asm"); if (is_volatile) try writer.writeAll(" volatile"); - try writer.print("({s}", .{fmtStringLiteral(fixed_asm_source[0..dst_i])}); + try writer.print("({s}", .{fmtStringLiteral(fixed_asm_source[0..dst_i], null)}); } extra_i = constraints_extra_begin; @@ -4869,7 +4871,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(' '); if (!std.mem.eql(u8, name, "_")) try writer.print("[{s}]", .{name}); const is_reg = constraint[1] == '{'; - try writer.print("{s}(", .{fmtStringLiteral(if (is_reg) "=r" else constraint)}); + try writer.print("{s}(", .{fmtStringLiteral(if (is_reg) "=r" else constraint, null)}); if (is_reg) { try f.writeCValue(writer, .{ .local = locals_index }, .Other); locals_index += 1; @@ -4895,7 +4897,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const is_reg = constraint[0] == '{'; const input_val = try f.resolveInst(input); - try writer.print("{s}(", .{fmtStringLiteral(if (is_reg) "r" else constraint)}); + try writer.print("{s}(", .{fmtStringLiteral(if (is_reg) "r" else constraint, null)}); try f.writeCValue(writer, if (asmInputNeedsLocal(constraint, input_val)) local: { const input_local = CValue{ .local = locals_index }; locals_index += 1; @@ -4904,19 +4906,16 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(')'); } try writer.writeByte(':'); - { - var clobber_i: u32 = 0; - while (clobber_i < clobbers_len) : (clobber_i += 1) { - const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(f.air.extra[extra_i..]), 0); - // This equation accounts for the fact that even if we have exactly 4 bytes - // for the string, we still use the next u32 for the null terminator. - extra_i += clobber.len / 4 + 1; + for (0..clobbers_len) |clobber_i| { + const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(f.air.extra[extra_i..]), 0); + // This equation accounts for the fact that even if we have exactly 4 bytes + // for the string, we still use the next u32 for the null terminator. + extra_i += clobber.len / 4 + 1; - if (clobber.len == 0) continue; + if (clobber.len == 0) continue; - if (clobber_i > 0) try writer.writeByte(','); - try writer.print(" {s}", .{fmtStringLiteral(clobber)}); - } + if (clobber_i > 0) try writer.writeByte(','); + try writer.print(" {s}", .{fmtStringLiteral(clobber, null)}); } try writer.writeAll(");\n"); @@ -5340,8 +5339,9 @@ fn fieldPtr( try writer.print(" + {})", .{try f.fmtIntLiteral(Type.usize, byte_offset_val)}); }, .end => { + try writer.writeByte('('); try f.writeCValue(writer, container_ptr_val, .Other); - try writer.print(" + {}", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + try writer.print(" + {})", .{try f.fmtIntLiteral(Type.usize, Value.one)}); }, } @@ -6448,10 +6448,9 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { // // Equivalent to: // reduce: { - // var i: usize = 0; // var accum: T = init; - // while (i < vec.len) : (i += 1) { - // accum = func(accum, vec[i]); + // for (vec) : (elem) { + // accum = func(accum, elem); // } // break :reduce accum; // } @@ -7162,8 +7161,9 @@ fn stringLiteral(child_stream: anytype) StringLiteral(@TypeOf(child_stream)) { return .{ .counting_writer = std.io.countingWriter(child_stream) }; } +const FormatStringContext = struct { str: []const u8, sentinel: ?u8 }; fn formatStringLiteral( - str: []const u8, + data: FormatStringContext, comptime fmt: []const u8, _: std.fmt.FormatOptions, writer: anytype, @@ -7172,13 +7172,13 @@ fn formatStringLiteral( var literal = stringLiteral(writer); try literal.start(); - for (str) |c| - try literal.writeChar(c); + for (data.str) |c| try literal.writeChar(c); + if (data.sentinel) |sentinel| if (sentinel != 0) try literal.writeChar(sentinel); try literal.end(); } -fn fmtStringLiteral(str: []const u8) std.fmt.Formatter(formatStringLiteral) { - return .{ .data = str }; +fn fmtStringLiteral(str: []const u8, sentinel: ?u8) std.fmt.Formatter(formatStringLiteral) { + return .{ .data = .{ .str = str, .sentinel = sentinel } }; } fn undefPattern(comptime IntType: type) IntType { -- cgit v1.2.3 From 8ccdc74949e361b211dade90184ecb160c9340d8 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 23 Feb 2023 19:40:50 -0500 Subject: CType: cleanup --- src/codegen/c/type.zig | 259 +++++++++++++++++++++++-------------------------- 1 file changed, 124 insertions(+), 135 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 04c0ea3003..bd4b6d9a8d 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1056,7 +1056,7 @@ pub const CType = extern union { } }, - .Struct, .Union => |zig_tag| if (ty.containerLayout() == .Packed) { + .Struct, .Union => |zig_ty_tag| if (ty.containerLayout() == .Packed) { if (ty.castTag(.@"struct")) |struct_obj| { try self.initType(struct_obj.data.backing_int_ty, kind, lookup); } else { @@ -1068,9 +1068,13 @@ pub const CType = extern union { } } else if (ty.isTupleOrAnonStruct()) { if (lookup.isMutable()) { - for (0..ty.structFieldCount()) |field_i| { + for (0..switch (zig_ty_tag) { + .Struct => ty.structFieldCount(), + .Union => ty.unionFields().count(), + else => unreachable, + }) |field_i| { const field_ty = ty.structFieldType(field_i); - if (ty.structFieldIsComptime(field_i) or + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or !field_ty.hasRuntimeBitsIgnoreComptime()) continue; _ = try lookup.typeToIndex(field_ty, switch (kind) { .forward, .forward_parameter => .forward, @@ -1086,14 +1090,22 @@ pub const CType = extern union { } } self.init(switch (kind) { - .forward, .forward_parameter => .fwd_anon_struct, - .complete, .parameter, .global => .anon_struct, + .forward, .forward_parameter => switch (zig_ty_tag) { + .Struct => .fwd_anon_struct, + .Union => .fwd_anon_union, + else => unreachable, + }, + .complete, .parameter, .global => switch (zig_ty_tag) { + .Struct => .anon_struct, + .Union => .anon_union, + else => unreachable, + }, .payload => unreachable, }); } else { const tag_ty = ty.unionTagTypeSafety(); const is_tagged_union_wrapper = kind != .payload and tag_ty != null; - const is_struct = zig_tag == .Struct or is_tagged_union_wrapper; + const is_struct = zig_ty_tag == .Struct or is_tagged_union_wrapper; switch (kind) { .forward, .forward_parameter => { self.storage = .{ .fwd = .{ @@ -1138,7 +1150,7 @@ pub const CType = extern union { self.init(.void); } else { var is_packed = false; - for (0..switch (zig_tag) { + for (0..switch (zig_ty_tag) { .Struct => ty.structFieldCount(), .Union => ty.unionFields().count(), else => unreachable, @@ -1181,10 +1193,10 @@ pub const CType = extern union { } }, - .Array, .Vector => |zig_tag| { + .Array, .Vector => |zig_ty_tag| { switch (kind) { .forward, .complete, .global => { - const t: Tag = switch (zig_tag) { + const t: Tag = switch (zig_ty_tag) { .Array => .array, .Vector => .vector, else => unreachable, @@ -1501,120 +1513,88 @@ pub const CType = extern union { .@"union", .packed_struct, .packed_union, - => switch (ty.zigTypeTag()) { - .Struct => { - const fields_len = ty.structFieldCount(); - - var c_fields_len: usize = 0; - for (0..fields_len) |field_i| { - const field_ty = ty.structFieldType(field_i); - if (ty.structFieldIsComptime(field_i) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; - c_fields_len += 1; - } - - const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len); - var c_field_i: usize = 0; - for (0..fields_len) |field_i| { - const field_ty = ty.structFieldType(field_i); - if (ty.structFieldIsComptime(field_i) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; - - fields_pl[c_field_i] = .{ - .name = try if (ty.isSimpleTuple()) - std.fmt.allocPrintZ(arena, "f{}", .{field_i}) - else - arena.dupeZ(u8, ty.structFieldName(field_i)), - .type = store.set.typeToIndex(field_ty, target, switch (kind) { - .forward, .forward_parameter => .forward, - .complete, .parameter => .complete, - .global => .global, - .payload => unreachable, - }).?, - .alignas = Payload.Fields.AlignAs.fieldAlign(ty, field_i, target), - }; - c_field_i += 1; - } - - switch (t) { - .fwd_anon_struct => { - const anon_pl = try arena.create(Payload.Fields); - anon_pl.* = .{ .base = .{ .tag = t }, .data = fields_pl }; - return initPayload(anon_pl); - }, - - .anon_struct, - .@"struct", - .@"union", - .packed_struct, - .packed_union, - => { - const struct_pl = try arena.create(Payload.Aggregate); - struct_pl.* = .{ .base = .{ .tag = t }, .data = .{ - .fields = fields_pl, - .fwd_decl = store.set.typeToIndex(ty, target, .forward).?, - } }; - return initPayload(struct_pl); - }, + => { + const zig_ty_tag = ty.zigTypeTag(); + const fields_len = switch (zig_ty_tag) { + .Struct => ty.structFieldCount(), + .Union => ty.unionFields().count(), + else => unreachable, + }; - else => unreachable, - } - }, + var c_fields_len: usize = 0; + for (0..fields_len) |field_i| { + const field_ty = ty.structFieldType(field_i); + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or + !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + c_fields_len += 1; + } - .Union => { - const union_fields = ty.unionFields(); - const fields_len = union_fields.count(); + const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len); + var c_field_i: usize = 0; + for (0..fields_len) |field_i| { + const field_ty = ty.structFieldType(field_i); + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or + !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + + defer c_field_i += 1; + fields_pl[c_field_i] = .{ + .name = try if (ty.isSimpleTuple()) + std.fmt.allocPrintZ(arena, "f{}", .{field_i}) + else + arena.dupeZ(u8, switch (zig_ty_tag) { + .Struct => ty.structFieldName(field_i), + .Union => ty.unionFields().keys()[field_i], + else => unreachable, + }), + .type = store.set.typeToIndex(field_ty, target, switch (kind) { + .forward, .forward_parameter => .forward, + .complete, .parameter, .payload => .complete, + .global => .global, + }).?, + .alignas = Payload.Fields.AlignAs.fieldAlign(ty, field_i, target), + }; + } - var c_fields_len: usize = 0; - for (0..fields_len) |field_i| { - const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; - c_fields_len += 1; - } + switch (t) { + .fwd_anon_struct, + .fwd_anon_union, + => { + const anon_pl = try arena.create(Payload.Fields); + anon_pl.* = .{ .base = .{ .tag = t }, .data = fields_pl }; + return initPayload(anon_pl); + }, - const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len); - var field_i: usize = 0; - var c_field_i: usize = 0; - var field_it = union_fields.iterator(); - while (field_it.next()) |field| { - defer field_i += 1; - if (!field.value_ptr.ty.hasRuntimeBitsIgnoreComptime()) continue; - - fields_pl[c_field_i] = .{ - .name = try arena.dupeZ(u8, field.key_ptr.*), - .type = store.set.typeToIndex(field.value_ptr.ty, target, switch (kind) { - .forward, .forward_parameter => unreachable, - .complete, .parameter, .payload => .complete, - .global => .global, - }).?, - .alignas = Payload.Fields.AlignAs.fieldAlign(ty, field_i, target), - }; - c_field_i += 1; - } + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + => { + const unnamed_pl = try arena.create(Payload.Unnamed); + unnamed_pl.* = .{ .base = .{ .tag = t }, .data = .{ + .fields = fields_pl, + .owner_decl = ty.getOwnerDecl(), + .id = if (ty.unionTagTypeSafety()) |_| 0 else unreachable, + } }; + return initPayload(unnamed_pl); + }, - switch (kind) { - .forward, .forward_parameter => unreachable, - .complete, .parameter, .global => { - const union_pl = try arena.create(Payload.Aggregate); - union_pl.* = .{ .base = .{ .tag = t }, .data = .{ - .fields = fields_pl, - .fwd_decl = store.set.typeToIndex(ty, target, .forward).?, - } }; - return initPayload(union_pl); - }, - .payload => if (ty.unionTagTypeSafety()) |_| { - const union_pl = try arena.create(Payload.Unnamed); - union_pl.* = .{ .base = .{ .tag = t }, .data = .{ - .fields = fields_pl, - .owner_decl = ty.getOwnerDecl(), - .id = 0, - } }; - return initPayload(union_pl); - } else unreachable, - } - }, + .anon_struct, + .anon_union, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => { + const struct_pl = try arena.create(Payload.Aggregate); + struct_pl.* = .{ .base = .{ .tag = t }, .data = .{ + .fields = fields_pl, + .fwd_decl = store.set.typeToIndex(ty, target, .forward).?, + } }; + return initPayload(struct_pl); + }, - else => unreachable, + else => unreachable, + } }, .function, @@ -1710,14 +1690,19 @@ pub const CType = extern union { ]u8 = undefined; const c_fields = cty.cast(Payload.Fields).?.data; + const zig_ty_tag = ty.zigTypeTag(); var c_field_i: usize = 0; - for (0..ty.structFieldCount()) |field_i| { + for (0..switch (zig_ty_tag) { + .Struct => ty.structFieldCount(), + .Union => ty.unionFields().count(), + else => unreachable, + }) |field_i| { const field_ty = ty.structFieldType(field_i); - if (ty.structFieldIsComptime(field_i) or + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + defer c_field_i += 1; const c_field = &c_fields[c_field_i]; - c_field_i += 1; if (!self.eqlRecurse(field_ty, c_field.type, switch (self.kind) { .forward, .forward_parameter => .forward, @@ -1728,8 +1713,11 @@ pub const CType = extern union { u8, if (ty.isSimpleTuple()) std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable - else - ty.structFieldName(field_i), + else switch (zig_ty_tag) { + .Struct => ty.structFieldName(field_i), + .Union => ty.unionFields().keys()[field_i], + else => unreachable, + }, mem.span(c_field.name), ) or Payload.Fields.AlignAs.fieldAlign(ty, field_i, target).@"align" != c_field.alignas.@"align") return false; @@ -1828,29 +1816,30 @@ pub const CType = extern union { var name_buf: [ std.fmt.count("f{}", .{std.math.maxInt(usize)}) ]u8 = undefined; + + const zig_ty_tag = ty.zigTypeTag(); for (0..switch (ty.zigTypeTag()) { .Struct => ty.structFieldCount(), .Union => ty.unionFields().count(), else => unreachable, }) |field_i| { const field_ty = ty.structFieldType(field_i); - if (ty.structFieldIsComptime(field_i) or + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or !field_ty.hasRuntimeBitsIgnoreComptime()) continue; - self.updateHasherRecurse( - hasher, - ty.structFieldType(field_i), - switch (self.kind) { - .forward, .forward_parameter => .forward, - .complete, .parameter => .complete, - .global => .global, - .payload => unreachable, - }, - ); + self.updateHasherRecurse(hasher, field_ty, switch (self.kind) { + .forward, .forward_parameter => .forward, + .complete, .parameter => .complete, + .global => .global, + .payload => unreachable, + }); hasher.update(if (ty.isSimpleTuple()) std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable - else - ty.structFieldName(field_i)); + else switch (zig_ty_tag) { + .Struct => ty.structFieldName(field_i), + .Union => ty.unionFields().keys()[field_i], + else => unreachable, + }); autoHash( hasher, Payload.Fields.AlignAs.fieldAlign(ty, field_i, target).@"align", -- cgit v1.2.3 From 3a1cb62317073b8e599e604b74edf9d10f16d4a2 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 23 Feb 2023 20:11:38 -0500 Subject: CBE: delete stage1 hacks --- src/codegen/c.zig | 212 +++++++++++++++++++++++++++--------------------------- 1 file changed, 106 insertions(+), 106 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 5faf7e0b60..7b09d489d1 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -309,7 +309,7 @@ pub const Function = struct { const val = f.air.value(inst).?; const ty = f.air.typeOf(inst); - const result = if (lowersToArray(ty, f.object.dg.module.getTarget())) result: { + const result: CValue = if (lowersToArray(ty, f.object.dg.module.getTarget())) result: { const writer = f.object.code_header.writer(); const alignment = 0; const decl_c_value = try f.allocLocalValue(ty, alignment); @@ -321,7 +321,7 @@ pub const Function = struct { try f.object.dg.renderValue(writer, ty, val, .StaticInitializer); try writer.writeAll(";\n "); break :result decl_c_value; - } else CValue{ .constant = inst }; + } else .{ .constant = inst }; gop.value_ptr.* = result; return result; @@ -346,7 +346,7 @@ pub const Function = struct { .alignment = alignment, .loop_depth = @intCast(LoopDepth, f.free_locals_stack.items.len - 1), }); - return CValue{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) }; + return .{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) }; } fn allocLocal(f: *Function, inst: Air.Inst.Index, ty: Type) !CValue { @@ -363,7 +363,7 @@ pub const Function = struct { if (local.alignment >= alignment) { local.loop_depth = @intCast(LoopDepth, f.free_locals_stack.items.len - 1); _ = locals_list.swapRemove(i); - return CValue{ .new_local = local_index }; + return .{ .new_local = local_index }; } } } @@ -545,7 +545,7 @@ pub const DeclGen = struct { // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. if (ty.isPtrAtRuntime() and !decl.ty.isFnOrHasRuntimeBits()) { - return dg.writeCValue(writer, CValue{ .undef = ty }); + return dg.writeCValue(writer, .{ .undef = ty }); } // Chase function values in order to be able to reference the original function. @@ -2649,7 +2649,7 @@ pub fn genDecl(o: *Object) !void { defer tracy.end(); const decl = o.dg.decl.?; - const decl_c_value: CValue = .{ .decl = o.dg.decl_index.unwrap().? }; + const decl_c_value = .{ .decl = o.dg.decl_index.unwrap().? }; const tv: TypedValue = .{ .ty = decl.ty, .val = decl.val }; if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime()) return; @@ -3011,7 +3011,7 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [ if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const inst_ty = f.air.typeOfIndex(inst); @@ -3037,7 +3037,7 @@ fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { !inst_ty.hasRuntimeBitsIgnoreComptime()) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - return CValue.none; + return .none; } const ptr = try f.resolveInst(bin_op.lhs); @@ -3076,7 +3076,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - return CValue.none; + return .none; } const inst_ty = f.air.typeOfIndex(inst); @@ -3117,7 +3117,7 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { !inst_ty.hasRuntimeBitsIgnoreComptime()) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - return CValue.none; + return .none; } const slice = try f.resolveInst(bin_op.lhs); @@ -3156,7 +3156,7 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - return CValue.none; + return .none; } const slice_ty = f.air.typeOf(bin_op.lhs); @@ -3186,7 +3186,7 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.air.typeOfIndex(inst); if (f.liveness.isUnused(inst) or !inst_ty.hasRuntimeBitsIgnoreComptime()) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - return CValue.none; + return .none; } const array = try f.resolveInst(bin_op.lhs); @@ -3224,7 +3224,7 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { const elem_type = inst_ty.elemType(); if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime()) { - return CValue{ .undef = inst_ty }; + return .{ .undef = inst_ty }; } const target = f.object.dg.module.getTarget(); @@ -3236,7 +3236,7 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.module.gpa; try f.allocs.put(gpa, local.new_local, false); - return CValue{ .local_ref = local.new_local }; + return .{ .local_ref = local.new_local }; } fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { @@ -3244,7 +3244,7 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { const elem_ty = inst_ty.elemType(); if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime()) { - return CValue{ .undef = inst_ty }; + return .{ .undef = inst_ty }; } const target = f.object.dg.module.getTarget(); @@ -3256,7 +3256,7 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.module.gpa; try f.allocs.put(gpa, local.new_local, false); - return CValue{ .local_ref = local.new_local }; + return .{ .local_ref = local.new_local }; } fn airArg(f: *Function, inst: Air.Inst.Index) !CValue { @@ -3280,7 +3280,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { (!ptr_info.@"volatile" and f.liveness.isUnused(inst))) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const operand = try f.resolveInst(ty_op.operand); @@ -3414,7 +3414,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { // Not even allowed to return void in a naked function. if (!is_naked) try writer.writeAll("return;\n"); } - return CValue.none; + return .none; } fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { @@ -3422,7 +3422,7 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const operand = try f.resolveInst(ty_op.operand); @@ -3443,7 +3443,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const operand = try f.resolveInst(ty_op.operand); @@ -3532,7 +3532,7 @@ fn airBoolToInt(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{un_op}); - return CValue.none; + return .none; } const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); @@ -3555,7 +3555,7 @@ fn storeUndefined(f: *Function, lhs_child_ty: Type, dest_ptr: CValue) !CValue { try f.renderType(writer, lhs_child_ty); try writer.writeAll("));\n"); } - return CValue.none; + return .none; } fn airStore(f: *Function, inst: Air.Inst.Index) !CValue { @@ -3564,7 +3564,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue { const ptr_info = f.air.typeOf(bin_op.lhs).ptrInfo().data; if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - return CValue.none; + return .none; } const ptr_val = try f.resolveInst(bin_op.lhs); @@ -3690,7 +3690,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, src_val, .Other); } try writer.writeAll(";\n"); - return CValue.none; + return .none; } fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: BuiltinInfo) !CValue { @@ -3699,7 +3699,7 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - return CValue.none; + return .none; } const lhs = try f.resolveInst(bin_op.lhs); @@ -3755,7 +3755,7 @@ fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const op = try f.resolveInst(ty_op.operand); @@ -3790,7 +3790,7 @@ fn airBinOp( try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - if (f.liveness.isUnused(inst)) return CValue.none; + if (f.liveness.isUnused(inst)) return .none; const inst_ty = f.air.typeOfIndex(inst); @@ -3813,7 +3813,7 @@ fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: []const u8, operation: if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - return CValue.none; + return .none; } const operand_ty = f.air.typeOf(bin_op.lhs); @@ -3853,7 +3853,7 @@ fn airEquality( if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - return CValue.none; + return .none; } const operand_ty = f.air.typeOf(bin_op.lhs); @@ -3909,7 +3909,7 @@ fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{un_op}); - return CValue.none; + return .none; } const inst_ty = f.air.typeOfIndex(inst); @@ -3930,7 +3930,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - return CValue.none; + return .none; } const lhs = try f.resolveInst(bin_op.lhs); @@ -3971,7 +3971,7 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - return CValue.none; + return .none; } const inst_ty = f.air.typeOfIndex(inst); @@ -4010,7 +4010,7 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - return CValue.none; + return .none; } const ptr = try f.resolveInst(bin_op.lhs); @@ -4097,7 +4097,7 @@ fn airCall( var lowered_ret_buf: LowerFnRetTyBuffer = undefined; const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target); - const result_local: CValue = if (modifier == .always_tail) r: { + const result_local = if (modifier == .always_tail) r: { try writer.writeAll("zig_always_tail return "); break :r .none; } else if (!lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) @@ -4187,7 +4187,7 @@ fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue { // Perhaps an additional compilation option is in order? //try writer.print("#line {d}\n", .{dbg_stmt.line + 1}); try writer.print("/* file:{d}:{d} */\n", .{ dbg_stmt.line + 1, dbg_stmt.column + 1 }); - return CValue.none; + return .none; } fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue { @@ -4196,7 +4196,7 @@ fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue { const function = f.air.values[ty_pl.payload].castTag(.function).?.data; const mod = f.object.dg.module; try writer.print("/* dbg func:{s} */\n", .{mod.declPtr(function.owner_decl).name}); - return CValue.none; + return .none; } fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { @@ -4208,7 +4208,7 @@ fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{pl_op.operand}); const writer = f.object.writer(); try writer.print("/* var:{s} */\n", .{name}); - return CValue.none; + return .none; } fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { @@ -4224,7 +4224,7 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { const result = if (inst_ty.tag() != .void and !f.liveness.isUnused(inst)) try f.allocLocal(inst, inst_ty) else - CValue{ .none = {} }; + .none; try f.blocks.putNoClobber(f.object.dg.gpa, inst, .{ .block_id = block_id, @@ -4294,7 +4294,7 @@ fn lowerTry( if (!payload_has_bits) { if (!operand_is_ptr) { - return CValue.none; + return .none; } else { return err_union; } @@ -4303,7 +4303,7 @@ fn lowerTry( try reap(f, inst, &.{operand}); if (f.liveness.isUnused(inst)) { - return CValue.none; + return .none; } const target = f.object.dg.module.getTarget(); @@ -4359,7 +4359,7 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !CValue { } try writer.print("goto zig_block_{d};\n", .{block.block_id}); - return CValue.none; + return .none; } fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { @@ -4369,7 +4369,7 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { // https://github.com/ziglang/zig/issues/13410 if (f.liveness.isUnused(inst) or !dest_ty.hasRuntimeBits()) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const operand = try f.resolveInst(ty_op.operand); @@ -4441,11 +4441,11 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { fn airBreakpoint(writer: anytype) !CValue { try writer.writeAll("zig_breakpoint();\n"); - return CValue.none; + return .none; } fn airRetAddr(f: *Function, inst: Air.Inst.Index) !CValue { - if (f.liveness.isUnused(inst)) return CValue.none; + if (f.liveness.isUnused(inst)) return .none; const writer = f.object.writer(); const local = try f.allocLocal(inst, Type.usize); try f.writeCValue(writer, local, .Other); @@ -4456,7 +4456,7 @@ fn airRetAddr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airFrameAddress(f: *Function, inst: Air.Inst.Index) !CValue { - if (f.liveness.isUnused(inst)) return CValue.none; + if (f.liveness.isUnused(inst)) return .none; const writer = f.object.writer(); const local = try f.allocLocal(inst, Type.usize); try f.writeCValue(writer, local, .Other); @@ -4474,7 +4474,7 @@ fn airFence(f: *Function, inst: Air.Inst.Index) !CValue { try writeMemoryOrder(writer, atomic_order); try writer.writeAll(");\n"); - return CValue.none; + return .none; } fn airUnreach(f: *Function) !CValue { @@ -4482,7 +4482,7 @@ fn airUnreach(f: *Function) !CValue { if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none; try f.object.writer().writeAll("zig_unreachable();\n"); - return CValue.none; + return .none; } fn airLoop(f: *Function, inst: Air.Inst.Index) !CValue { @@ -4514,7 +4514,7 @@ fn airLoop(f: *Function, inst: Air.Inst.Index) !CValue { deinitFreeLocalsMap(gpa, new_free_locals); new_free_locals.* = old_free_locals.move(); - return CValue.none; + return .none; } fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue { @@ -4579,7 +4579,7 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.indent_writer.insertNewline(); - return CValue.none; + return .none; } fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { @@ -4691,7 +4691,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { f.object.indent_writer.popIndent(); try writer.writeAll("}\n"); - return CValue.none; + return .none; } fn asmInputNeedsLocal(constraint: []const u8, value: CValue) bool { @@ -4713,8 +4713,8 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const inputs = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra_i..][0..extra.data.inputs_len]); extra_i += inputs.len; - const result: CValue = r: { - if (!is_volatile and f.liveness.isUnused(inst)) break :r CValue.none; + const result = r: { + if (!is_volatile and f.liveness.isUnused(inst)) break :r .none; const writer = f.object.writer(); const inst_ty = f.air.typeOfIndex(inst); @@ -4899,7 +4899,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const input_val = try f.resolveInst(input); try writer.print("{s}(", .{fmtStringLiteral(if (is_reg) "r" else constraint, null)}); try f.writeCValue(writer, if (asmInputNeedsLocal(constraint, input_val)) local: { - const input_local = CValue{ .local = locals_index }; + const input_local = .{ .local = locals_index }; locals_index += 1; break :local input_local; } else input_val, .Other); @@ -4932,7 +4932,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const is_reg = constraint[1] == '{'; if (is_reg) { try f.writeCValueDeref(writer, if (output == .none) - CValue{ .local_ref = local.new_local } + .{ .local_ref = local.new_local } else try f.resolveInst(output)); try writer.writeAll(" = "); @@ -4967,7 +4967,7 @@ fn airIsNull( if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{un_op}); - return CValue.none; + return .none; } const writer = f.object.writer(); @@ -5017,7 +5017,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const operand = try f.resolveInst(ty_op.operand); @@ -5028,7 +5028,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { const payload_ty = opt_ty.optionalChild(&buf); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - return CValue.none; + return .none; } const inst_ty = f.air.typeOfIndex(inst); @@ -5069,7 +5069,7 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const writer = f.object.writer(); @@ -5080,7 +5080,7 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.air.typeOfIndex(inst); if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime()) { - return CValue{ .undef = inst_ty }; + return .{ .undef = inst_ty }; } const local = try f.allocLocal(inst, inst_ty); @@ -5112,7 +5112,7 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { if (opt_ty.optionalReprIsPayload()) { if (f.liveness.isUnused(inst)) { - return CValue.none; + return .none; } const local = try f.allocLocal(inst, inst_ty); // The payload and the optional are the same value. @@ -5129,7 +5129,7 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(";\n"); if (f.liveness.isUnused(inst)) { - return CValue.none; + return .none; } const local = try f.allocLocal(inst, inst_ty); @@ -5232,7 +5232,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{extra.field_ptr}); - return CValue.none; + return .none; } const target = f.object.dg.module.getTarget(); @@ -5355,13 +5355,13 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{extra.struct_operand}); - return CValue.none; + return .none; } const inst_ty = f.air.typeOfIndex(inst); if (!inst_ty.hasRuntimeBitsIgnoreComptime()) { try reap(f, inst, &.{extra.struct_operand}); - return CValue.none; + return .none; } const target = f.object.dg.module.getTarget(); @@ -5512,7 +5512,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const inst_ty = f.air.typeOfIndex(inst); @@ -5549,7 +5549,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const inst_ty = f.air.typeOfIndex(inst); @@ -5560,7 +5560,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; if (!error_union_ty.errorUnionPayload().hasRuntimeBits()) { - if (!is_ptr) return CValue.none; + if (!is_ptr) return .none; const w = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -5591,7 +5591,7 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const inst_ty = f.air.typeOfIndex(inst); @@ -5637,7 +5637,7 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const writer = f.object.writer(); @@ -5691,7 +5691,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(";\n"); // Then return the payload pointer (only if it is used) - if (f.liveness.isUnused(inst)) return CValue.none; + if (f.liveness.isUnused(inst)) return .none; const local = try f.allocLocal(inst, f.air.typeOfIndex(inst)); try f.writeCValue(writer, local, .Other); @@ -5702,7 +5702,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { } fn airErrReturnTrace(f: *Function, inst: Air.Inst.Index) !CValue { - if (f.liveness.isUnused(inst)) return CValue.none; + if (f.liveness.isUnused(inst)) return .none; return f.fail("TODO: C backend: implement airErrReturnTrace", .{}); } @@ -5720,7 +5720,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const inst_ty = f.air.typeOfIndex(inst); @@ -5758,7 +5758,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{un_op}); - return CValue.none; + return .none; } const writer = f.object.writer(); @@ -5796,7 +5796,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const operand = try f.resolveInst(ty_op.operand); @@ -5812,7 +5812,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { // &(*(void *)p)[0], although LLVM does via GetElementPtr if (operand == .undef) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try f.writeCValue(writer, CValue{ .undef = inst_ty.slicePtrFieldType(&buf) }, .Initializer); + try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(&buf) }, .Initializer); } else if (array_ty.hasRuntimeBitsIgnoreComptime()) { try writer.writeAll("&("); try f.writeCValueDeref(writer, operand); @@ -5834,7 +5834,7 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const inst_ty = f.air.typeOfIndex(inst); @@ -5882,7 +5882,7 @@ fn airPtrToInt(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{un_op}); - return CValue.none; + return .none; } const operand = try f.resolveInst(un_op); @@ -5910,7 +5910,7 @@ fn airUnBuiltinCall( if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const operand = try f.resolveInst(ty_op.operand); @@ -5942,7 +5942,7 @@ fn airBinBuiltinCall( if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - return CValue.none; + return .none; } const lhs = try f.resolveInst(bin_op.lhs); @@ -6065,7 +6065,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue if (f.liveness.isUnused(inst)) { try freeLocal(f, inst, local.new_local, 0); - return CValue.none; + return .none; } return local; @@ -6108,7 +6108,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try freeLocal(f, inst, local.new_local, 0); - return CValue.none; + return .none; } return local; @@ -6120,7 +6120,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{atomic_load.ptr}); const ptr_ty = f.air.typeOf(atomic_load.ptr); if (!ptr_ty.isVolatilePtr() and f.liveness.isUnused(inst)) { - return CValue.none; + return .none; } const inst_ty = f.air.typeOfIndex(inst); @@ -6163,7 +6163,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa try f.object.dg.renderTypeForBuiltinFnName(writer, ptr_ty.childType()); try writer.writeAll(");\n"); - return CValue.none; + return .none; } fn airMemset(f: *Function, inst: Air.Inst.Index) !CValue { @@ -6206,7 +6206,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs }); try freeLocal(f, inst, index.new_local, 0); - return CValue.none; + return .none; } try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs }); @@ -6218,7 +6218,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, len, .FunctionArgument); try writer.writeAll(");\n"); - return CValue.none; + return .none; } fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { @@ -6238,7 +6238,7 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, len, .FunctionArgument); try writer.writeAll(");\n"); - return CValue.none; + return .none; } fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { @@ -6251,7 +6251,7 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { const union_ty = f.air.typeOf(bin_op.lhs).childType(); const target = f.object.dg.module.getTarget(); const layout = union_ty.unionGetLayout(target); - if (layout.tag_size == 0) return CValue.none; + if (layout.tag_size == 0) return .none; try writer.writeByte('('); try f.writeCValue(writer, union_ptr, .Other); @@ -6259,7 +6259,7 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, new_tag, .Other); try writer.writeAll(";\n"); - return CValue.none; + return .none; } fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { @@ -6267,7 +6267,7 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const operand = try f.resolveInst(ty_op.operand); @@ -6277,7 +6277,7 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { const target = f.object.dg.module.getTarget(); const layout = un_ty.unionGetLayout(target); - if (layout.tag_size == 0) return CValue.none; + if (layout.tag_size == 0) return .none; const inst_ty = f.air.typeOfIndex(inst); const writer = f.object.writer(); @@ -6295,7 +6295,7 @@ fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{un_op}); - return CValue.none; + return .none; } const inst_ty = f.air.typeOfIndex(inst); @@ -6320,7 +6320,7 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{un_op}); - return CValue.none; + return .none; } const writer = f.object.writer(); @@ -6340,7 +6340,7 @@ fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ty_op.operand}); - return CValue.none; + return .none; } const inst_ty = f.air.typeOfIndex(inst); @@ -6356,13 +6356,13 @@ fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue { - if (f.liveness.isUnused(inst)) return CValue.none; + if (f.liveness.isUnused(inst)) return .none; return f.fail("TODO: C backend: implement airSelect", .{}); } fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { - if (f.liveness.isUnused(inst)) return CValue.none; + if (f.liveness.isUnused(inst)) return .none; return f.fail("TODO: C backend: implement airShuffle", .{}); } @@ -6372,7 +6372,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{reduce.operand}); - return CValue.none; + return .none; } const target = f.object.dg.module.getTarget(); @@ -6545,7 +6545,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } } - if (f.liveness.isUnused(inst)) return CValue.none; + if (f.liveness.isUnused(inst)) return .none; const target = f.object.dg.module.getTarget(); @@ -6590,7 +6590,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const element_ty = f.air.typeOf(element); try f.writeCValue(writer, switch (element_ty.zigTypeTag()) { - .Array => CValue{ .undef = element_ty }, + .Array => .{ .undef = element_ty }, else => resolved_element, }, .Initializer); empty = false; @@ -6697,7 +6697,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{extra.init}); - return CValue.none; + return .none; } const union_ty = f.air.typeOfIndex(inst); @@ -6756,7 +6756,7 @@ fn airPrefetch(f: *Function, inst: Air.Inst.Index) !CValue { // The available prefetch intrinsics do not accept a cache argument; only // address, rw, and locality. So unless the cache is data, we do not lower // this instruction. - .instruction => return CValue.none, + .instruction => return .none, } const ptr = try f.resolveInst(prefetch.ptr); try reap(f, inst, &.{prefetch.ptr}); @@ -6766,11 +6766,11 @@ fn airPrefetch(f: *Function, inst: Air.Inst.Index) !CValue { try writer.print(", {d}, {d});\n", .{ @enumToInt(prefetch.rw), prefetch.locality, }); - return CValue.none; + return .none; } fn airWasmMemorySize(f: *Function, inst: Air.Inst.Index) !CValue { - if (f.liveness.isUnused(inst)) return CValue.none; + if (f.liveness.isUnused(inst)) return .none; const pl_op = f.air.instructions.items(.data)[inst].pl_op; @@ -6807,7 +6807,7 @@ fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{un_op}); - return CValue.none; + return .none; } const operand = try f.resolveInst(un_op); @@ -6829,7 +6829,7 @@ fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVal const un_op = f.air.instructions.items(.data)[inst].un_op; if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{un_op}); - return CValue.none; + return .none; } const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); @@ -6851,7 +6851,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa const bin_op = f.air.instructions.items(.data)[inst].bin_op; if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - return CValue.none; + return .none; } const lhs = try f.resolveInst(bin_op.lhs); const rhs = try f.resolveInst(bin_op.rhs); @@ -6878,7 +6878,7 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { const bin_op = f.air.extraData(Air.Bin, pl_op.payload).data; if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand }); - return CValue.none; + return .none; } const inst_ty = f.air.typeOfIndex(inst); const mulend1 = try f.resolveInst(bin_op.lhs); -- cgit v1.2.3 From c0671a92c7f200a3c32d03db7b7e342c3efdd1fe Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 23 Feb 2023 20:12:02 -0500 Subject: CBE: simplify always_tail call logic It should be Sema's job to check this anyway. --- src/codegen/c.zig | 31 +++++++------------------------ 1 file changed, 7 insertions(+), 24 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 7b09d489d1..a7a069c193 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -54,8 +54,6 @@ pub const CValue = union(enum) { /// Render these bytes literally. /// TODO make this a [*:0]const u8 to save memory bytes: []const u8, - /// A deferred call_always_tail - call_always_tail: void, }; const BlockData = struct { @@ -1751,7 +1749,6 @@ pub const DeclGen = struct { fmtIdent(ident), }), .bytes => |bytes| return w.writeAll(bytes), - .call_always_tail => return dg.fail("CBE: the result of @call(.always_tail, ...) must be returned directly", .{}), } } @@ -1785,7 +1782,6 @@ pub const DeclGen = struct { try w.writeAll(bytes); return w.writeByte(')'); }, - .call_always_tail => return dg.writeCValue(w, c_value), } } @@ -1798,16 +1794,7 @@ pub const DeclGen = struct { fn writeCValueDerefMember(dg: *DeclGen, writer: anytype, c_value: CValue, member: CValue) !void { switch (c_value) { .none, .constant, .field, .undef => unreachable, - .new_local, - .local, - .arg, - .arg_array, - .decl, - .identifier, - .payload_identifier, - .bytes, - .call_always_tail, - => { + .new_local, .local, .arg, .arg_array, .decl, .identifier, .payload_identifier, .bytes => { try dg.writeCValue(writer, c_value); try writer.writeAll("->"); }, @@ -2910,7 +2897,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, => .none, .call => try airCall(f, inst, .auto), - .call_always_tail => .call_always_tail, + .call_always_tail => .none, .call_never_tail => try airCall(f, inst, .never_tail), .call_never_inline => try airCall(f, inst, .never_inline), @@ -3365,20 +3352,15 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); const target = f.object.dg.module.getTarget(); + const op_inst = Air.refToIndex(un_op); const op_ty = f.air.typeOf(un_op); const ret_ty = if (is_ptr) op_ty.childType() else op_ty; var lowered_ret_buf: LowerFnRetTyBuffer = undefined; const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target); - const is_naked = if (f.object.dg.decl) |decl| decl.ty.fnCallingConvention() == .Naked else false; - const peek_operand = f.value_map.get(un_op); - if (if (peek_operand) |operand| operand == .call_always_tail else false) { + if (op_inst != null and f.air.instructions.items(.tag)[op_inst.?] == .call_always_tail) { try reap(f, inst, &.{un_op}); - if (is_naked) { - try f.writeCValue(writer, peek_operand.?, .Other); - unreachable; - } - _ = try airCall(f, Air.refToIndex(un_op).?, .always_tail); + _ = try airCall(f, op_inst.?, .always_tail); } else if (lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); @@ -3412,7 +3394,8 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { } else { try reap(f, inst, &.{un_op}); // Not even allowed to return void in a naked function. - if (!is_naked) try writer.writeAll("return;\n"); + if (if (f.object.dg.decl) |decl| decl.ty.fnCallingConvention() != .Naked else true) + try writer.writeAll("return;\n"); } return .none; } -- cgit v1.2.3 From 1f3d9f79c19c225b2043a1d0355cbc74addcf03a Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 23 Feb 2023 20:29:59 -0500 Subject: CBE: apply some maybe payload cleanups --- src/codegen/c.zig | 46 +++++++++++++++++----------------------------- 1 file changed, 17 insertions(+), 29 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/c.zig b/src/codegen/c.zig index a7a069c193..79e0bff237 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -5356,11 +5356,6 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { // Ensure complete type definition is visible before accessing fields. _ = try f.typeToIndex(struct_ty, .complete); - const extra_name: CValue = switch (struct_ty.tag()) { - .union_tagged, .union_safety_tagged => .{ .identifier = "payload" }, - else => .none, - }; - const field_name: CValue = switch (struct_ty.tag()) { .tuple, .anon_struct, .@"struct" => switch (struct_ty.containerLayout()) { .Auto, .Extern => if (struct_ty.isSimpleTuple()) @@ -5458,31 +5453,29 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { } return local; - } else .{ - .identifier = struct_ty.unionFields().keys()[extra.field_index], + } else field_name: { + const name = struct_ty.unionFields().keys()[extra.field_index]; + break :field_name if (struct_ty.unionTagTypeSafety()) |_| + .{ .payload_identifier = name } + else + .{ .identifier = name }; }, else => unreachable, }; - const is_array = lowersToArray(inst_ty, target); const local = try f.allocLocal(inst, inst_ty); - if (is_array) { + if (lowersToArray(inst_ty, target)) { try writer.writeAll("memcpy("); try f.writeCValue(writer, local, .FunctionArgument); try writer.writeAll(", "); - } else { - try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = "); - } - if (extra_name != .none) { - try f.writeCValueMember(writer, struct_byval, extra_name); - try writer.writeByte('.'); - try f.writeCValue(writer, field_name, .Other); - } else try f.writeCValueMember(writer, struct_byval, field_name); - if (is_array) { + try f.writeCValueMember(writer, struct_byval, field_name); try writer.writeAll(", sizeof("); try f.renderType(writer, inst_ty); try writer.writeAll("))"); + } else { + try f.writeCValue(writer, local, .Other); + try writer.writeAll(" = "); + try f.writeCValueMember(writer, struct_byval, field_name); } try writer.writeAll(";\n"); return local; @@ -6700,7 +6693,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { return local; } - if (union_ty.unionTagTypeSafety()) |tag_ty| { + const field: CValue = if (union_ty.unionTagTypeSafety()) |tag_ty| field: { const layout = union_ty.unionGetLayout(target); if (layout.tag_size != 0) { const field_index = tag_ty.enumFieldIndex(field_name).?; @@ -6717,18 +6710,13 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, local, .Other); try writer.print(".tag = {}; ", .{try f.fmtIntLiteral(tag_ty, int_val)}); } - try f.writeCValue(writer, local, .Other); - try writer.print(".payload.{ } = ", .{fmtIdent(field_name)}); - try f.writeCValue(writer, payload, .Other); - try writer.writeAll(";\n"); - return local; - } + break :field .{ .payload_identifier = field_name }; + } else .{ .identifier = field_name }; - try f.writeCValue(writer, local, .Other); - try writer.print(".{ } = ", .{fmtIdent(field_name)}); + try f.writeCValueMember(writer, local, field); + try writer.writeAll(" = "); try f.writeCValue(writer, payload, .Other); try writer.writeAll(";\n"); - return local; } -- cgit v1.2.3 From f8aecef6705a75a4c35754bcac32c27602b84711 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 23 Feb 2023 21:18:26 -0500 Subject: CBE: implement the future Turns out f(...) will be supported one day. --- src/codegen/c.zig | 9 ++++----- test/behavior/var_args.zig | 11 ++++++++++- 2 files changed, 14 insertions(+), 6 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 79e0bff237..1af14cb372 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -6876,17 +6876,16 @@ fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.air.typeOfIndex(inst); const fn_cty = try f.typeToCType(f.object.dg.decl.?.ty, .complete); - const param_len = fn_cty.castTag(.varargs_function).?.data.param_types.len; - if (param_len == 0) - return f.fail("CBE: C requires at least one runtime argument for varargs functions", .{}); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); try writer.writeAll("va_start(*(va_list *)&"); try f.writeCValue(writer, local, .Other); - try writer.writeAll(", "); - try f.writeCValue(writer, .{ .arg = param_len - 1 }, .FunctionArgument); + if (param_len > 0) { + try writer.writeAll(", "); + try f.writeCValue(writer, .{ .arg = param_len - 1 }, .FunctionArgument); + } try writer.writeAll(");\n"); return local; } diff --git a/test/behavior/var_args.zig b/test/behavior/var_args.zig index 6431ca9470..cdfbcc9188 100644 --- a/test/behavior/var_args.zig +++ b/test/behavior/var_args.zig @@ -111,6 +111,12 @@ test "simple variadic function" { return @cVaArg(&ap, c_int); } + fn compatible(_: c_int, ...) callconv(.C) c_int { + var ap = @cVaStart(); + defer @cVaEnd(&ap); + return @cVaArg(&ap, c_int); + } + fn add(count: c_int, ...) callconv(.C) c_int { var ap = @cVaStart(); defer @cVaEnd(&ap); @@ -123,10 +129,13 @@ test "simple variadic function" { } }; - if (builtin.zig_backend != .stage2_c) { // C doesn't support varargs without a preceding runtime arg. + if (builtin.zig_backend != .stage2_c) { + // pre C23 doesn't support varargs without a preceding runtime arg. try std.testing.expectEqual(@as(c_int, 0), S.simple(@as(c_int, 0))); try std.testing.expectEqual(@as(c_int, 1024), S.simple(@as(c_int, 1024))); } + try std.testing.expectEqual(@as(c_int, 0), S.compatible(undefined, @as(c_int, 0))); + try std.testing.expectEqual(@as(c_int, 1024), S.compatible(undefined, @as(c_int, 1024))); try std.testing.expectEqual(@as(c_int, 0), S.add(0)); try std.testing.expectEqual(@as(c_int, 1), S.add(1, @as(c_int, 1))); try std.testing.expectEqual(@as(c_int, 3), S.add(2, @as(c_int, 1), @as(c_int, 2))); -- cgit v1.2.3 From 1453a595aac86b0ca5017c084b5d36108ac414ae Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 24 Feb 2023 23:28:14 -0500 Subject: CBE: reuse locals with the same `CType` instead of `Type` Many `Type`s can correspond to the same `CType`, so this reduces the number of used locals by 27760 when compiling only-c. Also, disabled some tests that were only passing by accident and shouldn't really be considered working. --- src/codegen/c.zig | 136 ++++++++++++++++++++++++++++++----------------- src/codegen/c/type.zig | 135 ++++++++++++++++++++-------------------------- test/behavior/vector.zig | 2 + 3 files changed, 147 insertions(+), 126 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 1af14cb372..e1fc715f8b 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -82,15 +82,20 @@ pub const LazyFnMap = std.AutoArrayHashMapUnmanaged(LazyFnKey, LazyFnValue); const LoopDepth = u16; const Local = struct { - ty: Type, - alignment: u32, + cty_idx: CType.Index, /// How many loops the last definition was nested in. loop_depth: LoopDepth, + alignas: CType.AlignAs, + + pub fn getType(local: Local) LocalType { + return .{ .cty_idx = local.cty_idx, .alignas = local.alignas }; + } }; const LocalIndex = u16; +const LocalType = struct { cty_idx: CType.Index, alignas: CType.AlignAs }; const LocalsList = std.ArrayListUnmanaged(LocalIndex); -const LocalsMap = std.ArrayHashMapUnmanaged(Type, LocalsList, Type.HashContext32, true); +const LocalsMap = std.AutoArrayHashMapUnmanaged(LocalType, LocalsList); const LocalsStack = std.ArrayListUnmanaged(LocalsMap); const ValueRenderLocation = enum { @@ -296,10 +301,6 @@ pub const Function = struct { /// Needed for memory used by the keys of free_locals_stack entries. arena: std.heap.ArenaAllocator, - fn tyHashCtx(f: Function) Type.HashContext32 { - return .{ .mod = f.object.dg.module }; - } - fn resolveInst(f: *Function, inst: Air.Inst.Ref) !CValue { const gop = try f.value_map.getOrPut(inst); if (gop.found_existing) return gop.value_ptr.*; @@ -339,10 +340,11 @@ pub const Function = struct { /// Skips the reuse logic. fn allocLocalValue(f: *Function, ty: Type, alignment: u32) !CValue { const gpa = f.object.dg.gpa; + const target = f.object.dg.module.getTarget(); try f.locals.append(gpa, .{ - .ty = ty, - .alignment = alignment, + .cty_idx = try f.typeToIndex(ty, .complete), .loop_depth = @intCast(LoopDepth, f.free_locals_stack.items.len - 1), + .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)), }); return .{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) }; } @@ -355,14 +357,15 @@ pub const Function = struct { /// Only allocates the local; does not print anything. fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: u32) !CValue { - if (f.getFreeLocals().getPtrContext(ty, f.tyHashCtx())) |locals_list| { - for (locals_list.items, 0..) |local_index, i| { + const target = f.object.dg.module.getTarget(); + if (f.getFreeLocals().getPtr(.{ + .cty_idx = try f.typeToIndex(ty, .complete), + .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)), + })) |locals_list| { + if (locals_list.popOrNull()) |local_index| { const local = &f.locals.items[local_index]; - if (local.alignment >= alignment) { - local.loop_depth = @intCast(LoopDepth, f.free_locals_stack.items.len - 1); - _ = locals_list.swapRemove(i); - return .{ .new_local = local_index }; - } + local.loop_depth = @intCast(LoopDepth, f.free_locals_stack.items.len - 1); + return .{ .new_local = local_index }; } } @@ -1695,22 +1698,34 @@ pub const DeclGen = struct { qualifiers: CQualifiers, alignment: u32, kind: CType.Kind, + ) error{ OutOfMemory, AnalysisFail }!void { + const target = dg.module.getTarget(); + const alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)); + try dg.renderCTypeAndName(w, try dg.typeToIndex(ty, kind), name, qualifiers, alignas); + } + + fn renderCTypeAndName( + dg: *DeclGen, + w: anytype, + cty_idx: CType.Index, + name: CValue, + qualifiers: CQualifiers, + alignas: CType.AlignAs, ) error{ OutOfMemory, AnalysisFail }!void { const store = &dg.ctypes.set; const module = dg.module; - if (alignment != 0) switch (std.math.order(alignment, ty.abiAlignment(dg.module.getTarget()))) { - .lt => try w.print("zig_under_align({}) ", .{alignment}), + switch (std.math.order(alignas.@"align", alignas.abi)) { + .lt => try w.print("zig_under_align({}) ", .{alignas.getAlign()}), .eq => {}, - .gt => try w.print("zig_align({}) ", .{alignment}), - }; + .gt => try w.print("zig_align({}) ", .{alignas.getAlign()}), + } - const idx = try dg.typeToIndex(ty, kind); const trailing = - try renderTypePrefix(dg.decl_index, store.*, module, w, idx, .suffix, qualifiers); + try renderTypePrefix(dg.decl_index, store.*, module, w, cty_idx, .suffix, qualifiers); try w.print("{}", .{trailing}); try dg.writeCValue(w, name); - try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix, .{}); + try renderTypeSuffix(dg.decl_index, store.*, module, w, cty_idx, .suffix, .{}); } fn declIsGlobal(dg: *DeclGen, tv: TypedValue) bool { @@ -2589,36 +2604,27 @@ pub fn genFunc(f: *Function) !void { if (value) continue; // static const local = f.locals.items[local_index]; log.debug("inserting local {d} into free_locals", .{local_index}); - const gop = try free_locals.getOrPutContext(gpa, local.ty, f.tyHashCtx()); + const gop = try free_locals.getOrPut(gpa, local.getType()); if (!gop.found_existing) gop.value_ptr.* = .{}; try gop.value_ptr.append(gpa, local_index); } const SortContext = struct { - target: std.Target, - keys: []const Type, + keys: []const LocalType, - pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool { - const a_ty = ctx.keys[a_index]; - const b_ty = ctx.keys[b_index]; - return b_ty.abiAlignment(ctx.target) < a_ty.abiAlignment(ctx.target); + pub fn lessThan(ctx: @This(), lhs_index: usize, rhs_index: usize) bool { + const lhs_ty = ctx.keys[lhs_index]; + const rhs_ty = ctx.keys[rhs_index]; + return lhs_ty.alignas.getAlign() > rhs_ty.alignas.getAlign(); } }; - const target = o.dg.module.getTarget(); - free_locals.sort(SortContext{ .target = target, .keys = free_locals.keys() }); + free_locals.sort(SortContext{ .keys = free_locals.keys() }); const w = o.code_header.writer(); for (free_locals.values()) |list| { for (list.items) |local_index| { const local = f.locals.items[local_index]; - try o.dg.renderTypeAndName( - w, - local.ty, - .{ .local = local_index }, - .{}, - local.alignment, - .complete, - ); + try o.dg.renderCTypeAndName(w, local.cty_idx, .{ .local = local_index }, .{}, local.alignas); try w.writeAll(";\n "); } } @@ -4486,7 +4492,7 @@ fn airLoop(f: *Function, inst: Air.Inst.Index) !CValue { const new_free_locals = f.getFreeLocals(); var it = new_free_locals.iterator(); while (it.next()) |entry| { - const gop = try old_free_locals.getOrPutContext(gpa, entry.key_ptr.*, f.tyHashCtx()); + const gop = try old_free_locals.getOrPut(gpa, entry.key_ptr.*); if (gop.found_existing) { try gop.value_ptr.appendSlice(gpa, entry.value_ptr.items); } else { @@ -4522,6 +4528,10 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue { // that we can notice and use them in the else branch. Any new locals must // necessarily be free already after the then branch is complete. const pre_locals_len = @intCast(LocalIndex, f.locals.items.len); + // Remember how many allocs there were before entering the then branch so + // that we can notice and make sure not to use them in the else branch. + // Any new allocs must be removed from the free list. + const pre_allocs_len = @intCast(LocalIndex, f.allocs.count()); const pre_clone_depth = f.free_locals_clone_depth; f.free_locals_clone_depth = @intCast(LoopDepth, f.free_locals_stack.items.len); @@ -4552,7 +4562,7 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue { try die(f, inst, Air.indexToRef(operand)); } - try noticeBranchFrees(f, pre_locals_len, inst); + try noticeBranchFrees(f, pre_locals_len, pre_allocs_len, inst); if (needs_else) { try genBody(f, else_body); @@ -4627,6 +4637,10 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { // we can notice and use them in subsequent branches. Any new locals must // necessarily be free already after the previous branch is complete. const pre_locals_len = @intCast(LocalIndex, f.locals.items.len); + // Remember how many allocs there were before entering each branch so that + // we can notice and make sure not to use them in subsequent branches. + // Any new allocs must be removed from the free list. + const pre_allocs_len = @intCast(LocalIndex, f.allocs.count()); const pre_clone_depth = f.free_locals_clone_depth; f.free_locals_clone_depth = @intCast(LoopDepth, f.free_locals_stack.items.len); @@ -4647,7 +4661,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { try genBody(f, case_body); } - try noticeBranchFrees(f, pre_locals_len, inst); + try noticeBranchFrees(f, pre_locals_len, pre_allocs_len, inst); } else { for (liveness.deaths[case_i]) |operand| { try die(f, inst, Air.indexToRef(operand)); @@ -7441,11 +7455,7 @@ fn freeLocal(f: *Function, inst: Air.Inst.Index, local_index: LocalIndex, ref_in const local = &f.locals.items[local_index]; log.debug("%{d}: freeing t{d} (operand %{d})", .{ inst, local_index, ref_inst }); if (local.loop_depth < f.free_locals_clone_depth) return; - const gop = try f.free_locals_stack.items[local.loop_depth].getOrPutContext( - gpa, - local.ty, - f.tyHashCtx(), - ); + const gop = try f.free_locals_stack.items[local.loop_depth].getOrPut(gpa, local.getType()); if (!gop.found_existing) gop.value_ptr.* = .{}; if (std.debug.runtime_safety) { // If this trips, it means a local is being inserted into the @@ -7504,14 +7514,40 @@ fn deinitFreeLocalsMap(gpa: mem.Allocator, map: *LocalsMap) void { map.deinit(gpa); } -fn noticeBranchFrees(f: *Function, pre_locals_len: LocalIndex, inst: Air.Inst.Index) !void { +fn noticeBranchFrees( + f: *Function, + pre_locals_len: LocalIndex, + pre_allocs_len: LocalIndex, + inst: Air.Inst.Index, +) !void { + const free_locals = f.getFreeLocals(); + for (f.locals.items[pre_locals_len..], pre_locals_len..) |*local, local_i| { const local_index = @intCast(LocalIndex, local_i); - if (f.allocs.contains(local_index)) continue; // allocs are not freeable + if (f.allocs.contains(local_index)) { + if (std.debug.runtime_safety) { + // new allocs are no longer freeable, so make sure they aren't in the free list + if (free_locals.getPtr(local.getType())) |locals_list| { + assert(mem.indexOfScalar(LocalIndex, locals_list.items, local_index) == null); + } + } + continue; + } // free more deeply nested locals from other branches at current depth assert(local.loop_depth >= f.free_locals_stack.items.len - 1); local.loop_depth = @intCast(LoopDepth, f.free_locals_stack.items.len - 1); try freeLocal(f, inst, local_index, 0); } + + for (f.allocs.keys()[pre_allocs_len..]) |local_i| { + const local_index = @intCast(LocalIndex, local_i); + const local = &f.locals.items[local_index]; + // new allocs are no longer freeable, so remove them from the free list + if (free_locals.getPtr(local.getType())) |locals_list| { + if (mem.indexOfScalar(LocalIndex, locals_list.items, local_index)) |i| { + _ = locals_list.swapRemove(i); + } + } + } } diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index bd4b6d9a8d..1f1a220cd2 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -251,38 +251,6 @@ pub const CType = extern union { type: Index, alignas: AlignAs, }; - pub const AlignAs = struct { - @"align": std.math.Log2Int(u32), - abi: std.math.Log2Int(u32), - - pub fn init(alignment: u32, abi_alignment: u32) AlignAs { - assert(std.math.isPowerOfTwo(alignment)); - assert(std.math.isPowerOfTwo(abi_alignment)); - return .{ - .@"align" = std.math.log2_int(u32, alignment), - .abi = std.math.log2_int(u32, abi_alignment), - }; - } - pub fn abiAlign(ty: Type, target: Target) AlignAs { - const abi_align = ty.abiAlignment(target); - return init(abi_align, abi_align); - } - pub fn fieldAlign(struct_ty: Type, field_i: usize, target: Target) AlignAs { - return init( - struct_ty.structFieldAlign(field_i, target), - struct_ty.structFieldType(field_i).abiAlignment(target), - ); - } - pub fn unionPayloadAlign(union_ty: Type, target: Target) AlignAs { - const union_obj = union_ty.cast(Type.Payload.Union).?.data; - const union_payload_align = union_obj.abiAlignment(target, false); - return init(union_payload_align, union_payload_align); - } - - pub fn getAlign(self: AlignAs) u32 { - return @as(u32, 1) << self.@"align"; - } - }; }; pub const Unnamed = struct { @@ -311,13 +279,57 @@ pub const CType = extern union { }; }; + pub const AlignAs = struct { + @"align": std.math.Log2Int(u32), + abi: std.math.Log2Int(u32), + + pub fn init(alignment: u32, abi_alignment: u32) AlignAs { + const actual_align = if (alignment != 0) alignment else abi_alignment; + assert(std.math.isPowerOfTwo(actual_align)); + assert(std.math.isPowerOfTwo(abi_alignment)); + return .{ + .@"align" = std.math.log2_int(u32, actual_align), + .abi = std.math.log2_int(u32, abi_alignment), + }; + } + pub fn abiAlign(ty: Type, target: Target) AlignAs { + const abi_align = ty.abiAlignment(target); + return init(abi_align, abi_align); + } + pub fn fieldAlign(struct_ty: Type, field_i: usize, target: Target) AlignAs { + return init( + struct_ty.structFieldAlign(field_i, target), + struct_ty.structFieldType(field_i).abiAlignment(target), + ); + } + pub fn unionPayloadAlign(union_ty: Type, target: Target) AlignAs { + const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_payload_align = union_obj.abiAlignment(target, false); + return init(union_payload_align, union_payload_align); + } + + pub fn getAlign(self: AlignAs) u32 { + return @as(u32, 1) << self.@"align"; + } + }; + pub const Index = u32; pub const Store = struct { arena: std.heap.ArenaAllocator.State = .{}, set: Set = .{}, pub const Set = struct { - pub const Map = std.ArrayHashMapUnmanaged(CType, void, HashContext32, true); + pub const Map = std.ArrayHashMapUnmanaged(CType, void, HashContext, true); + const HashContext = struct { + store: *const Set, + + pub fn hash(self: @This(), cty: CType) Map.Hash { + return @truncate(Map.Hash, cty.hash(self.store.*)); + } + pub fn eql(_: @This(), lhs: CType, rhs: CType, _: usize) bool { + return lhs.eql(rhs); + } + }; map: Map = .{}, @@ -328,7 +340,7 @@ pub const CType = extern union { pub fn indexToHash(self: Set, index: Index) Map.Hash { if (index < Tag.no_payload_count) - return (HashContext32{ .store = &self }).hash(self.indexToCType(index)); + return (HashContext{ .store = &self }).hash(self.indexToCType(index)); return self.map.entries.items(.hash)[index - Tag.no_payload_count]; } @@ -905,7 +917,7 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "array", .type = array_idx, - .alignas = Payload.Fields.AlignAs.abiAlign(ty, lookup.getTarget()), + .alignas = AlignAs.abiAlign(ty, lookup.getTarget()), }; self.initAnon(kind, fwd_idx, 1); } else self.init(switch (kind) { @@ -1004,12 +1016,12 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "ptr", .type = ptr_idx, - .alignas = Payload.Fields.AlignAs.abiAlign(ptr_ty, target), + .alignas = AlignAs.abiAlign(ptr_ty, target), }; self.storage.anon.fields[1] = .{ .name = "len", .type = Tag.uintptr_t.toIndex(), - .alignas = Payload.Fields.AlignAs.abiAlign(Type.usize, target), + .alignas = AlignAs.abiAlign(Type.usize, target), }; self.initAnon(kind, fwd_idx, 2); } else self.init(switch (kind) { @@ -1125,7 +1137,7 @@ pub const CType = extern union { self.storage.anon.fields[field_count] = .{ .name = "payload", .type = payload_idx.?, - .alignas = Payload.Fields.AlignAs.unionPayloadAlign(ty, target), + .alignas = AlignAs.unionPayloadAlign(ty, target), }; field_count += 1; } @@ -1133,7 +1145,7 @@ pub const CType = extern union { self.storage.anon.fields[field_count] = .{ .name = "tag", .type = tag_idx.?, - .alignas = Payload.Fields.AlignAs.abiAlign(tag_ty.?, target), + .alignas = AlignAs.abiAlign(tag_ty.?, target), }; field_count += 1; } @@ -1158,11 +1170,7 @@ pub const CType = extern union { const field_ty = ty.structFieldType(field_i); if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; - const field_align = Payload.Fields.AlignAs.fieldAlign( - ty, - field_i, - target, - ); + const field_align = AlignAs.fieldAlign(ty, field_i, target); if (field_align.@"align" < field_align.abi) { is_packed = true; if (!lookup.isMutable()) break; @@ -1235,12 +1243,12 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "payload", .type = payload_idx, - .alignas = Payload.Fields.AlignAs.abiAlign(payload_ty, target), + .alignas = AlignAs.abiAlign(payload_ty, target), }; self.storage.anon.fields[1] = .{ .name = "is_null", .type = Tag.bool.toIndex(), - .alignas = Payload.Fields.AlignAs.abiAlign(Type.bool, target), + .alignas = AlignAs.abiAlign(Type.bool, target), }; self.initAnon(kind, fwd_idx, 2); } else self.init(switch (kind) { @@ -1273,12 +1281,12 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "payload", .type = payload_idx, - .alignas = Payload.Fields.AlignAs.abiAlign(payload_ty, target), + .alignas = AlignAs.abiAlign(payload_ty, target), }; self.storage.anon.fields[1] = .{ .name = "error", .type = error_idx, - .alignas = Payload.Fields.AlignAs.abiAlign(error_ty, target), + .alignas = AlignAs.abiAlign(error_ty, target), }; self.initAnon(kind, fwd_idx, 2); } else self.init(switch (kind) { @@ -1551,7 +1559,7 @@ pub const CType = extern union { .complete, .parameter, .payload => .complete, .global => .global, }).?, - .alignas = Payload.Fields.AlignAs.fieldAlign(ty, field_i, target), + .alignas = AlignAs.fieldAlign(ty, field_i, target), }; } @@ -1635,28 +1643,6 @@ pub const CType = extern union { } } - pub const HashContext64 = struct { - store: *const Store.Set, - - pub fn hash(self: @This(), cty: CType) u64 { - return cty.hash(self.store.*); - } - pub fn eql(_: @This(), lhs: CType, rhs: CType) bool { - return lhs.eql(rhs); - } - }; - - pub const HashContext32 = struct { - store: *const Store.Set, - - pub fn hash(self: @This(), cty: CType) u32 { - return @truncate(u32, cty.hash(self.store.*)); - } - pub fn eql(_: @This(), lhs: CType, rhs: CType, _: usize) bool { - return lhs.eql(rhs); - } - }; - pub const TypeAdapter64 = struct { kind: Kind, lookup: Convert.Lookup, @@ -1719,7 +1705,7 @@ pub const CType = extern union { else => unreachable, }, mem.span(c_field.name), - ) or Payload.Fields.AlignAs.fieldAlign(ty, field_i, target).@"align" != + ) or AlignAs.fieldAlign(ty, field_i, target).@"align" != c_field.alignas.@"align") return false; } return true; @@ -1840,10 +1826,7 @@ pub const CType = extern union { .Union => ty.unionFields().keys()[field_i], else => unreachable, }); - autoHash( - hasher, - Payload.Fields.AlignAs.fieldAlign(ty, field_i, target).@"align", - ); + autoHash(hasher, AlignAs.fieldAlign(ty, field_i, target).@"align"); } }, diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 191c7bf7eb..50fef7f646 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -1247,6 +1247,7 @@ test "load packed vector element" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO var x: @Vector(2, u15) = .{ 1, 4 }; try expect((&x[0]).* == 1); @@ -1259,6 +1260,7 @@ test "store packed vector element" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO var v = @Vector(4, u1){ 1, 1, 1, 1 }; try expectEqual(@Vector(4, u1){ 1, 1, 1, 1 }, v); -- cgit v1.2.3 From 477be90c0c18a473c5b0c84c0fbcc9ce41e47738 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 25 Feb 2023 00:18:30 -0500 Subject: CBE: replace locals list with a hash map Replace `ArrayList` with `ArrayHashMap` since we want to be able to remove by element. --- src/codegen/c.zig | 39 +++++++++++++++++---------------------- 1 file changed, 17 insertions(+), 22 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/c.zig b/src/codegen/c.zig index e1fc715f8b..cf428d4bd6 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -94,7 +94,7 @@ const Local = struct { const LocalIndex = u16; const LocalType = struct { cty_idx: CType.Index, alignas: CType.AlignAs }; -const LocalsList = std.ArrayListUnmanaged(LocalIndex); +const LocalsList = std.AutoArrayHashMapUnmanaged(LocalIndex, void); const LocalsMap = std.AutoArrayHashMapUnmanaged(LocalType, LocalsList); const LocalsStack = std.ArrayListUnmanaged(LocalsMap); @@ -362,10 +362,10 @@ pub const Function = struct { .cty_idx = try f.typeToIndex(ty, .complete), .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)), })) |locals_list| { - if (locals_list.popOrNull()) |local_index| { - const local = &f.locals.items[local_index]; + if (locals_list.popOrNull()) |local_entry| { + const local = &f.locals.items[local_entry.key]; local.loop_depth = @intCast(LoopDepth, f.free_locals_stack.items.len - 1); - return .{ .new_local = local_index }; + return .{ .new_local = local_entry.key }; } } @@ -2606,7 +2606,7 @@ pub fn genFunc(f: *Function) !void { log.debug("inserting local {d} into free_locals", .{local_index}); const gop = try free_locals.getOrPut(gpa, local.getType()); if (!gop.found_existing) gop.value_ptr.* = .{}; - try gop.value_ptr.append(gpa, local_index); + try gop.value_ptr.putNoClobber(gpa, local_index, {}); } const SortContext = struct { @@ -2622,7 +2622,7 @@ pub fn genFunc(f: *Function) !void { const w = o.code_header.writer(); for (free_locals.values()) |list| { - for (list.items) |local_index| { + for (list.keys()) |local_index| { const local = f.locals.items[local_index]; try o.dg.renderCTypeAndName(w, local.cty_idx, .{ .local = local_index }, .{}, local.alignas); try w.writeAll(";\n "); @@ -4494,11 +4494,11 @@ fn airLoop(f: *Function, inst: Air.Inst.Index) !CValue { while (it.next()) |entry| { const gop = try old_free_locals.getOrPut(gpa, entry.key_ptr.*); if (gop.found_existing) { - try gop.value_ptr.appendSlice(gpa, entry.value_ptr.items); - } else { - gop.value_ptr.* = entry.value_ptr.*; - entry.value_ptr.* = .{}; - } + try gop.value_ptr.ensureUnusedCapacity(gpa, entry.value_ptr.count()); + for (entry.value_ptr.keys()) |local_index| { + gop.value_ptr.putAssumeCapacityNoClobber(local_index, {}); + } + } else gop.value_ptr.* = entry.value_ptr.move(); } deinitFreeLocalsMap(gpa, new_free_locals); new_free_locals.* = old_free_locals.move(); @@ -7458,14 +7458,13 @@ fn freeLocal(f: *Function, inst: Air.Inst.Index, local_index: LocalIndex, ref_in const gop = try f.free_locals_stack.items[local.loop_depth].getOrPut(gpa, local.getType()); if (!gop.found_existing) gop.value_ptr.* = .{}; if (std.debug.runtime_safety) { - // If this trips, it means a local is being inserted into the - // free_locals map while it already exists in the map, which is not - // allowed. - assert(mem.indexOfScalar(LocalIndex, gop.value_ptr.items, local_index) == null); // If this trips, an unfreeable allocation was attempted to be freed. assert(!f.allocs.contains(local_index)); } - try gop.value_ptr.append(gpa, local_index); + // If this trips, it means a local is being inserted into the + // free_locals map while it already exists in the map, which is not + // allowed. + try gop.value_ptr.putNoClobber(gpa, local_index, {}); } const BigTomb = struct { @@ -7528,7 +7527,7 @@ fn noticeBranchFrees( if (std.debug.runtime_safety) { // new allocs are no longer freeable, so make sure they aren't in the free list if (free_locals.getPtr(local.getType())) |locals_list| { - assert(mem.indexOfScalar(LocalIndex, locals_list.items, local_index) == null); + assert(!locals_list.contains(local_index)); } } continue; @@ -7544,10 +7543,6 @@ fn noticeBranchFrees( const local_index = @intCast(LocalIndex, local_i); const local = &f.locals.items[local_index]; // new allocs are no longer freeable, so remove them from the free list - if (free_locals.getPtr(local.getType())) |locals_list| { - if (mem.indexOfScalar(LocalIndex, locals_list.items, local_index)) |i| { - _ = locals_list.swapRemove(i); - } - } + if (free_locals.getPtr(local.getType())) |locals_list| _ = locals_list.swapRemove(local_index); } } -- cgit v1.2.3