diff options
Diffstat (limited to 'lib')
33 files changed, 2278 insertions, 791 deletions
diff --git a/lib/compiler/aro/aro.zig b/lib/compiler/aro/aro.zig index c39972f5c9..8e3da2aa97 100644 --- a/lib/compiler/aro/aro.zig +++ b/lib/compiler/aro/aro.zig @@ -23,6 +23,7 @@ pub const version_str = backend.version_str; pub const version = backend.version; test { + _ = @import("aro/annex_g.zig"); _ = @import("aro/Builtins.zig"); _ = @import("aro/char_info.zig"); _ = @import("aro/Compilation.zig"); diff --git a/lib/compiler/aro/aro/Attribute.zig b/lib/compiler/aro/aro/Attribute.zig index 4671028b8f..a5b78b8463 100644 --- a/lib/compiler/aro/aro/Attribute.zig +++ b/lib/compiler/aro/aro/Attribute.zig @@ -38,12 +38,64 @@ pub const Kind = enum { } }; +pub const Iterator = struct { + source: union(enum) { + ty: Type, + slice: []const Attribute, + }, + index: usize, + + pub fn initSlice(slice: ?[]const Attribute) Iterator { + return .{ .source = .{ .slice = slice orelse &.{} }, .index = 0 }; + } + + pub fn initType(ty: Type) Iterator { + return .{ .source = .{ .ty = ty }, .index = 0 }; + } + + /// returns the next attribute as well as its index within the slice or current type + /// The index can be used to determine when a nested type has been recursed into + pub fn next(self: *Iterator) ?struct { Attribute, usize } { + switch (self.source) { + .slice => |slice| { + if (self.index < slice.len) { + defer self.index += 1; + return .{ slice[self.index], self.index }; + } + }, + .ty => |ty| { + switch (ty.specifier) { + .typeof_type => { + self.* = .{ .source = .{ .ty = ty.data.sub_type.* }, .index = 0 }; + return self.next(); + }, + .typeof_expr => { + self.* = .{ .source = .{ .ty = ty.data.expr.ty }, .index = 0 }; + return self.next(); + }, + .attributed => { + if (self.index < ty.data.attributed.attributes.len) { + defer self.index += 1; + return .{ ty.data.attributed.attributes[self.index], self.index }; + } + self.* = .{ .source = .{ .ty = ty.data.attributed.base }, .index = 0 }; + return self.next(); + }, + else => {}, + } + }, + } + return null; + } +}; + pub const ArgumentType = enum { string, identifier, int, alignment, float, + complex_float, expression, nullptr_t, @@ -54,6 +106,7 @@ pub const ArgumentType = enum { .int, .alignment => "an integer constant", .nullptr_t => "nullptr", .float => "a floating point number", + .complex_float => "a complex floating point number", .expression => "an expression", }; } @@ -65,7 +118,7 @@ pub fn requiredArgCount(attr: Tag) u32 { inline else => |tag| { comptime var needed = 0; comptime { - const fields = std.meta.fields(@field(attributes, @tagName(tag))); + const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; for (fields) |arg_field| { if (!mem.eql(u8, arg_field.name, "__name_tok") and @typeInfo(arg_field.type) != .optional) needed += 1; } @@ -81,7 +134,7 @@ pub fn maxArgCount(attr: Tag) u32 { inline else => |tag| { comptime var max = 0; comptime { - const fields = std.meta.fields(@field(attributes, @tagName(tag))); + const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; for (fields) |arg_field| { if (!mem.eql(u8, arg_field.name, "__name_tok")) max += 1; } @@ -106,7 +159,7 @@ pub const Formatting = struct { switch (attr) { .calling_convention => unreachable, inline else => |tag| { - const fields = std.meta.fields(@field(attributes, @tagName(tag))); + const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (fields.len == 0) unreachable; const Unwrapped = UnwrapOptional(fields[0].type); @@ -123,14 +176,13 @@ pub const Formatting = struct { switch (attr) { .calling_convention => unreachable, inline else => |tag| { - const fields = std.meta.fields(@field(attributes, @tagName(tag))); + const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (fields.len == 0) unreachable; const Unwrapped = UnwrapOptional(fields[0].type); if (@typeInfo(Unwrapped) != .@"enum") unreachable; const enum_fields = @typeInfo(Unwrapped).@"enum".fields; - @setEvalBranchQuota(3000); const quote = comptime quoteChar(@enumFromInt(@intFromEnum(tag))); comptime var values: []const u8 = quote ++ enum_fields[0].name ++ quote; inline for (enum_fields[1..]) |enum_field| { @@ -148,7 +200,7 @@ pub fn wantsIdentEnum(attr: Tag) bool { switch (attr) { .calling_convention => return false, inline else => |tag| { - const fields = std.meta.fields(@field(attributes, @tagName(tag))); + const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (fields.len == 0) return false; const Unwrapped = UnwrapOptional(fields[0].type); @@ -162,7 +214,7 @@ pub fn wantsIdentEnum(attr: Tag) bool { pub fn diagnoseIdent(attr: Tag, arguments: *Arguments, ident: []const u8) ?Diagnostics.Message { switch (attr) { inline else => |tag| { - const fields = std.meta.fields(@field(attributes, @tagName(tag))); + const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (fields.len == 0) unreachable; const Unwrapped = UnwrapOptional(fields[0].type); if (@typeInfo(Unwrapped) != .@"enum") unreachable; @@ -181,7 +233,7 @@ pub fn diagnoseIdent(attr: Tag, arguments: *Arguments, ident: []const u8) ?Diagn pub fn wantsAlignment(attr: Tag, idx: usize) bool { switch (attr) { inline else => |tag| { - const fields = std.meta.fields(@field(attributes, @tagName(tag))); + const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (fields.len == 0) return false; return switch (idx) { @@ -195,7 +247,7 @@ pub fn wantsAlignment(attr: Tag, idx: usize) bool { pub fn diagnoseAlignment(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Parser.Result, p: *Parser) !?Diagnostics.Message { switch (attr) { inline else => |tag| { - const arg_fields = std.meta.fields(@field(attributes, @tagName(tag))); + const arg_fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (arg_fields.len == 0) unreachable; switch (arg_idx) { @@ -249,8 +301,7 @@ fn diagnoseField( }, .bytes => |bytes| { if (Wanted == Value) { - std.debug.assert(node.tag == .string_literal_expr); - if (!node.ty.elemType().is(.char) and !node.ty.elemType().is(.uchar)) { + if (node.tag != .string_literal_expr or (!node.ty.elemType().is(.char) and !node.ty.elemType().is(.uchar))) { return .{ .tag = .attribute_requires_string, .extra = .{ .str = decl.name }, @@ -264,7 +315,6 @@ fn diagnoseField( @field(@field(arguments, decl.name), field.name) = enum_val; return null; } else { - @setEvalBranchQuota(3000); return .{ .tag = .unknown_attr_enum, .extra = .{ .attr_enum = .{ .tag = std.meta.stringToEnum(Tag, decl.name).? } }, @@ -278,8 +328,19 @@ fn diagnoseField( .int => .int, .bytes => .string, .float => .float, + .complex => .complex_float, .null => .nullptr_t, - else => unreachable, + .int_ty, + .float_ty, + .complex_ty, + .ptr_ty, + .noreturn_ty, + .void_ty, + .func_ty, + .array_ty, + .vector_ty, + .record_ty, + => unreachable, }); } @@ -309,7 +370,7 @@ pub fn diagnose(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Parser.Resu .tag = .attribute_too_many_args, .extra = .{ .attr_arg_count = .{ .attribute = attr, .expected = max_arg_count } }, }; - const arg_fields = std.meta.fields(@field(attributes, decl.name)); + const arg_fields = @typeInfo(@field(attributes, decl.name)).@"struct".fields; switch (arg_idx) { inline 0...arg_fields.len - 1 => |arg_i| { return diagnoseField(decl, arg_fields[arg_i], UnwrapOptional(arg_fields[arg_i].type), arguments, res, node, p); @@ -645,7 +706,7 @@ pub const Arguments = blk: { var union_fields: [decls.len]ZigType.UnionField = undefined; for (decls, &union_fields) |decl, *field| { field.* = .{ - .name = decl.name ++ "", + .name = decl.name, .type = @field(attributes, decl.name), .alignment = 0, }; @@ -730,7 +791,6 @@ pub fn applyVariableAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag: const toks = p.attr_buf.items(.tok)[attr_buf_start..]; p.attr_application_buf.items.len = 0; var base_ty = ty; - if (base_ty.specifier == .attributed) base_ty = base_ty.data.attributed.base; var common = false; var nocommon = false; for (attrs, toks) |attr, tok| switch (attr.tag) { @@ -772,15 +832,10 @@ pub fn applyVariableAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag: .copy, .tls_model, .visibility, - => std.debug.panic("apply variable attribute {s}", .{@tagName(attr.tag)}), + => |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .variables } }), else => try ignoredAttrErr(p, tok, attr.tag, "variables"), }; - const existing = ty.getAttributes(); - if (existing.len == 0 and p.attr_application_buf.items.len == 0) return base_ty; - if (existing.len == 0) return base_ty.withAttributes(p.arena, p.attr_application_buf.items); - - const attributed_type = try Type.Attributed.create(p.arena, base_ty, existing, p.attr_application_buf.items); - return Type{ .specifier = .attributed, .data = .{ .attributed = attributed_type } }; + return base_ty.withAttributes(p.arena, p.attr_application_buf.items); } pub fn applyFieldAttributes(p: *Parser, field_ty: *Type, attr_buf_start: usize) ![]const Attribute { @@ -789,7 +844,7 @@ pub fn applyFieldAttributes(p: *Parser, field_ty: *Type, attr_buf_start: usize) p.attr_application_buf.items.len = 0; for (attrs, toks) |attr, tok| switch (attr.tag) { // zig fmt: off - .@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode, + .@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode, .warn_unused_result, .nodiscard, => try p.attr_application_buf.append(p.gpa, attr), // zig fmt: on .vector_size => try attr.applyVectorSize(p, tok, field_ty), @@ -805,7 +860,6 @@ pub fn applyTypeAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag: ?Di const toks = p.attr_buf.items(.tok)[attr_buf_start..]; p.attr_application_buf.items.len = 0; var base_ty = ty; - if (base_ty.specifier == .attributed) base_ty = base_ty.data.attributed.base; for (attrs, toks) |attr, tok| switch (attr.tag) { // zig fmt: off .@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode, @@ -823,22 +877,10 @@ pub fn applyTypeAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag: ?Di .copy, .scalar_storage_order, .nonstring, - => std.debug.panic("apply type attribute {s}", .{@tagName(attr.tag)}), + => |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .types } }), else => try ignoredAttrErr(p, tok, attr.tag, "types"), }; - - const existing = ty.getAttributes(); - // TODO: the alignment annotation on a type should override - // the decl it refers to. This might not be true for others. Maybe bug. - - // if there are annotations on this type def use those. - if (p.attr_application_buf.items.len > 0) { - return try base_ty.withAttributes(p.arena, p.attr_application_buf.items); - } else if (existing.len > 0) { - // else use the ones on the typedef decl we were refering to. - return try base_ty.withAttributes(p.arena, existing); - } - return base_ty; + return base_ty.withAttributes(p.arena, p.attr_application_buf.items); } pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Type { @@ -846,7 +888,6 @@ pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Typ const toks = p.attr_buf.items(.tok)[attr_buf_start..]; p.attr_application_buf.items.len = 0; var base_ty = ty; - if (base_ty.specifier == .attributed) base_ty = base_ty.data.attributed.base; var hot = false; var cold = false; var @"noinline" = false; @@ -896,6 +937,13 @@ pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Typ else => try p.errStr(.callconv_not_supported, tok, p.tok_ids[tok].lexeme().?), }, }, + .malloc => { + if (base_ty.returnType().isPtr()) { + try p.attr_application_buf.append(p.gpa, attr); + } else { + try ignoredAttrErr(p, tok, attr.tag, "functions that do not return pointers"); + } + }, .access, .alloc_align, .alloc_size, @@ -908,7 +956,6 @@ pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Typ .ifunc, .interrupt, .interrupt_handler, - .malloc, .no_address_safety_analysis, .no_icf, .no_instrument_function, @@ -937,7 +984,7 @@ pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Typ .visibility, .weakref, .zero_call_used_regs, - => std.debug.panic("apply type attribute {s}", .{@tagName(attr.tag)}), + => |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .functions } }), else => try ignoredAttrErr(p, tok, attr.tag, "functions"), }; return ty.withAttributes(p.arena, p.attr_application_buf.items); @@ -1043,11 +1090,14 @@ fn applyTransparentUnion(attr: Attribute, p: *Parser, tok: TokenIndex, ty: Type) } fn applyVectorSize(attr: Attribute, p: *Parser, tok: TokenIndex, ty: *Type) !void { - if (!(ty.isInt() or ty.isFloat()) or !ty.isReal()) { - const orig_ty = try p.typeStr(ty.*); - ty.* = Type.invalid; - return p.errStr(.invalid_vec_elem_ty, tok, orig_ty); + const base = ty.base(); + const is_enum = ty.is(.@"enum"); + if (!(ty.isInt() or ty.isFloat()) or !ty.isReal() or (is_enum and p.comp.langopts.emulate == .gcc)) { + try p.errStr(.invalid_vec_elem_ty, tok, try p.typeStr(ty.*)); + return error.ParsingFailed; } + if (is_enum) return; + const vec_bytes = attr.args.vector_size.bytes; const ty_size = ty.sizeof(p.comp).?; if (vec_bytes % ty_size != 0) { @@ -1057,7 +1107,7 @@ fn applyVectorSize(attr: Attribute, p: *Parser, tok: TokenIndex, ty: *Type) !voi const arr_ty = try p.arena.create(Type.Array); arr_ty.* = .{ .elem = ty.*, .len = vec_size }; - ty.* = Type{ + base.* = .{ .specifier = .vector, .data = .{ .array = arr_ty }, }; diff --git a/lib/compiler/aro/aro/Attribute/names.zig b/lib/compiler/aro/aro/Attribute/names.zig index d315389066..c0732b6118 100644 --- a/lib/compiler/aro/aro/Attribute/names.zig +++ b/lib/compiler/aro/aro/Attribute/names.zig @@ -69,6 +69,7 @@ pub const longest_name = 30; /// If found, returns the index of the node within the `dafsa` array. /// Otherwise, returns `null`. pub fn findInList(first_child_index: u16, char: u8) ?u16 { + @setEvalBranchQuota(206); var index = first_child_index; while (true) { if (dafsa[index].char == char) return index; @@ -787,7 +788,7 @@ const dafsa = [_]Node{ .{ .char = 'i', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 215 }, }; pub const data = blk: { - @setEvalBranchQuota(103); + @setEvalBranchQuota(721); break :blk [_]@This(){ // access .{ .tag = @enumFromInt(0), .properties = .{ .tag = .access, .gnu = true } }, diff --git a/lib/compiler/aro/aro/Builtins.zig b/lib/compiler/aro/aro/Builtins.zig index be24a3ff60..fa92de328a 100644 --- a/lib/compiler/aro/aro/Builtins.zig +++ b/lib/compiler/aro/aro/Builtins.zig @@ -350,7 +350,7 @@ test Iterator { } test "All builtins" { - var comp = Compilation.init(std.testing.allocator); + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); _ = try comp.generateBuiltinMacros(.include_system_defines); var arena = std.heap.ArenaAllocator.init(std.testing.allocator); @@ -373,7 +373,7 @@ test "All builtins" { test "Allocation failures" { const Test = struct { fn testOne(allocator: std.mem.Allocator) !void { - var comp = Compilation.init(allocator); + var comp = Compilation.init(allocator, std.fs.cwd()); defer comp.deinit(); _ = try comp.generateBuiltinMacros(.include_system_defines); var arena = std.heap.ArenaAllocator.init(comp.gpa); diff --git a/lib/compiler/aro/aro/Builtins/Builtin.zig b/lib/compiler/aro/aro/Builtins/Builtin.zig index c5cf98608b..6e5217b4da 100644 --- a/lib/compiler/aro/aro/Builtins/Builtin.zig +++ b/lib/compiler/aro/aro/Builtins/Builtin.zig @@ -71,6 +71,7 @@ pub const longest_name = 43; /// If found, returns the index of the node within the `dafsa` array. /// Otherwise, returns `null`. pub fn findInList(first_child_index: u16, char: u8) ?u16 { + @setEvalBranchQuota(7972); var index = first_child_index; while (true) { if (dafsa[index].char == char) return index; @@ -5165,7 +5166,7 @@ const dafsa = [_]Node{ .{ .char = 'e', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 4913 }, }; pub const data = blk: { - @setEvalBranchQuota(30_000); + @setEvalBranchQuota(27902); break :blk [_]@This(){ // _Block_object_assign .{ .tag = @enumFromInt(0), .properties = .{ .param_str = "vv*vC*iC", .header = .blocks, .attributes = .{ .lib_function_without_prefix = true } } }, diff --git a/lib/compiler/aro/aro/Builtins/eval.zig b/lib/compiler/aro/aro/Builtins/eval.zig new file mode 100644 index 0000000000..008da152d4 --- /dev/null +++ b/lib/compiler/aro/aro/Builtins/eval.zig @@ -0,0 +1,86 @@ +const std = @import("std"); +const backend = @import("../../backend.zig"); +const Interner = backend.Interner; +const Builtins = @import("../Builtins.zig"); +const Builtin = Builtins.Builtin; +const Parser = @import("../Parser.zig"); +const Tree = @import("../Tree.zig"); +const NodeIndex = Tree.NodeIndex; +const Type = @import("../Type.zig"); +const Value = @import("../Value.zig"); + +fn makeNan(comptime T: type, str: []const u8) T { + const UnsignedSameSize = std.meta.Int(.unsigned, @bitSizeOf(T)); + const parsed = std.fmt.parseUnsigned(UnsignedSameSize, str[0 .. str.len - 1], 0) catch 0; + const bits: switch (T) { + f32 => u23, + f64 => u52, + f80 => u63, + f128 => u112, + else => @compileError("Invalid type for makeNan"), + } = @truncate(parsed); + return @bitCast(@as(UnsignedSameSize, bits) | @as(UnsignedSameSize, @bitCast(std.math.nan(T)))); +} + +pub fn eval(tag: Builtin.Tag, p: *Parser, args: []const NodeIndex) !Value { + const builtin = Builtin.fromTag(tag); + if (!builtin.properties.attributes.const_evaluable) return .{}; + + switch (tag) { + Builtin.tagFromName("__builtin_inff").?, + Builtin.tagFromName("__builtin_inf").?, + Builtin.tagFromName("__builtin_infl").?, + => { + const ty: Type = switch (tag) { + Builtin.tagFromName("__builtin_inff").? => .{ .specifier = .float }, + Builtin.tagFromName("__builtin_inf").? => .{ .specifier = .double }, + Builtin.tagFromName("__builtin_infl").? => .{ .specifier = .long_double }, + else => unreachable, + }; + const f: Interner.Key.Float = switch (ty.bitSizeof(p.comp).?) { + 32 => .{ .f32 = std.math.inf(f32) }, + 64 => .{ .f64 = std.math.inf(f64) }, + 80 => .{ .f80 = std.math.inf(f80) }, + 128 => .{ .f128 = std.math.inf(f128) }, + else => unreachable, + }; + return Value.intern(p.comp, .{ .float = f }); + }, + Builtin.tagFromName("__builtin_isinf").? => blk: { + if (args.len == 0) break :blk; + const val = p.value_map.get(args[0]) orelse break :blk; + return Value.fromBool(val.isInf(p.comp)); + }, + Builtin.tagFromName("__builtin_isinf_sign").? => blk: { + if (args.len == 0) break :blk; + const val = p.value_map.get(args[0]) orelse break :blk; + switch (val.isInfSign(p.comp)) { + .unknown => {}, + .finite => return Value.zero, + .positive => return Value.one, + .negative => return Value.int(@as(i64, -1), p.comp), + } + }, + Builtin.tagFromName("__builtin_isnan").? => blk: { + if (args.len == 0) break :blk; + const val = p.value_map.get(args[0]) orelse break :blk; + return Value.fromBool(val.isNan(p.comp)); + }, + Builtin.tagFromName("__builtin_nan").? => blk: { + if (args.len == 0) break :blk; + const val = p.getDecayedStringLiteral(args[0]) orelse break :blk; + const bytes = p.comp.interner.get(val.ref()).bytes; + + const f: Interner.Key.Float = switch ((Type{ .specifier = .double }).bitSizeof(p.comp).?) { + 32 => .{ .f32 = makeNan(f32, bytes) }, + 64 => .{ .f64 = makeNan(f64, bytes) }, + 80 => .{ .f80 = makeNan(f80, bytes) }, + 128 => .{ .f128 = makeNan(f128, bytes) }, + else => unreachable, + }; + return Value.intern(p.comp, .{ .float = f }); + }, + else => {}, + } + return .{}; +} diff --git a/lib/compiler/aro/aro/Compilation.zig b/lib/compiler/aro/aro/Compilation.zig index f04df5001e..6093bdc509 100644 --- a/lib/compiler/aro/aro/Compilation.zig +++ b/lib/compiler/aro/aro/Compilation.zig @@ -127,22 +127,27 @@ types: struct { } = .{}, string_interner: StrInt = .{}, interner: Interner = .{}, +/// If this is not null, the directory containing the specified Source will be searched for includes +/// Used by MS extensions which allow searching for includes relative to the directory of the main source file. ms_cwd_source_id: ?Source.Id = null, +cwd: std.fs.Dir, -pub fn init(gpa: Allocator) Compilation { +pub fn init(gpa: Allocator, cwd: std.fs.Dir) Compilation { return .{ .gpa = gpa, .diagnostics = Diagnostics.init(gpa), + .cwd = cwd, }; } /// Initialize Compilation with default environment, /// pragma handlers and emulation mode set to target. -pub fn initDefault(gpa: Allocator) !Compilation { +pub fn initDefault(gpa: Allocator, cwd: std.fs.Dir) !Compilation { var comp: Compilation = .{ .gpa = gpa, .environment = try Environment.loadAll(gpa), .diagnostics = Diagnostics.init(gpa), + .cwd = cwd, }; errdefer comp.deinit(); try comp.addDefaultPragmaHandlers(); @@ -534,7 +539,7 @@ pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefi if (system_defines_mode == .include_system_defines) { try buf.appendSlice( \\#define __VERSION__ "Aro - ++ @import("../backend.zig").version_str ++ "\"\n" ++ + ++ " " ++ @import("../backend.zig").version_str ++ "\"\n" ++ \\#define __Aro__ \\ ); @@ -550,6 +555,9 @@ pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefi \\#define __STDC_NO_VLA__ 1 \\#define __STDC_UTF_16__ 1 \\#define __STDC_UTF_32__ 1 + \\#define __STDC_EMBED_NOT_FOUND__ 0 + \\#define __STDC_EMBED_FOUND__ 1 + \\#define __STDC_EMBED_EMPTY__ 2 \\ ); if (comp.langopts.standard.StdCVersionMacro()) |stdc_version| { @@ -719,8 +727,13 @@ fn generateBuiltinTypes(comp: *Compilation) !void { try comp.generateNsConstantStringType(); } +pub fn float80Type(comp: *const Compilation) ?Type { + if (comp.langopts.emulate != .gcc) return null; + return target_util.float80Type(comp.target); +} + /// Smallest integer type with at least N bits -fn intLeastN(comp: *const Compilation, bits: usize, signedness: std.builtin.Signedness) Type { +pub fn intLeastN(comp: *const Compilation, bits: usize, signedness: std.builtin.Signedness) Type { if (bits == 64 and (comp.target.isDarwin() or comp.target.isWasm())) { // WebAssembly and Darwin use `long long` for `int_least64_t` and `int_fast64_t`. return .{ .specifier = if (signedness == .signed) .long_long else .ulong_long }; @@ -903,7 +916,7 @@ fn generateNsConstantStringType(comp: *Compilation) !void { comp.types.ns_constant_string.fields[2] = .{ .name = try StrInt.intern(comp, "str"), .ty = const_char_ptr }; comp.types.ns_constant_string.fields[3] = .{ .name = try StrInt.intern(comp, "length"), .ty = .{ .specifier = .long } }; comp.types.ns_constant_string.ty = .{ .specifier = .@"struct", .data = .{ .record = &comp.types.ns_constant_string.record } }; - record_layout.compute(&comp.types.ns_constant_string.record, comp.types.ns_constant_string.ty, comp, null); + record_layout.compute(&comp.types.ns_constant_string.record, comp.types.ns_constant_string.ty, comp, null) catch unreachable; } fn generateVaListType(comp: *Compilation) !Type { @@ -911,12 +924,12 @@ fn generateVaListType(comp: *Compilation) !Type { const kind: Kind = switch (comp.target.cpu.arch) { .aarch64 => switch (comp.target.os.tag) { .windows => @as(Kind, .char_ptr), - .ios, .macos, .tvos, .watchos, .visionos => .char_ptr, + .ios, .macos, .tvos, .watchos => .char_ptr, else => .aarch64_va_list, }, .sparc, .wasm32, .wasm64, .bpfel, .bpfeb, .riscv32, .riscv64, .avr, .spirv32, .spirv64 => .void_ptr, .powerpc => switch (comp.target.os.tag) { - .ios, .macos, .tvos, .watchos, .visionos, .aix => @as(Kind, .char_ptr), + .ios, .macos, .tvos, .watchos, .aix => @as(Kind, .char_ptr), else => return Type{ .specifier = .void }, // unknown }, .x86, .msp430 => .char_ptr, @@ -951,7 +964,7 @@ fn generateVaListType(comp: *Compilation) !Type { record_ty.fields[3] = .{ .name = try StrInt.intern(comp, "__gr_offs"), .ty = .{ .specifier = .int } }; record_ty.fields[4] = .{ .name = try StrInt.intern(comp, "__vr_offs"), .ty = .{ .specifier = .int } }; ty = .{ .specifier = .@"struct", .data = .{ .record = record_ty } }; - record_layout.compute(record_ty, ty, comp, null); + record_layout.compute(record_ty, ty, comp, null) catch unreachable; }, .x86_64_va_list => { const record_ty = try arena.create(Type.Record); @@ -969,7 +982,7 @@ fn generateVaListType(comp: *Compilation) !Type { record_ty.fields[2] = .{ .name = try StrInt.intern(comp, "overflow_arg_area"), .ty = void_ptr }; record_ty.fields[3] = .{ .name = try StrInt.intern(comp, "reg_save_area"), .ty = void_ptr }; ty = .{ .specifier = .@"struct", .data = .{ .record = record_ty } }; - record_layout.compute(record_ty, ty, comp, null); + record_layout.compute(record_ty, ty, comp, null) catch unreachable; }, } if (kind == .char_ptr or kind == .void_ptr) { @@ -988,13 +1001,28 @@ fn generateVaListType(comp: *Compilation) !Type { fn generateIntMax(comp: *const Compilation, w: anytype, name: []const u8, ty: Type) !void { const bit_count: u8 = @intCast(ty.sizeof(comp).? * 8); const unsigned = ty.isUnsignedInt(comp); - const max = if (bit_count == 128) - @as(u128, if (unsigned) std.math.maxInt(u128) else std.math.maxInt(u128)) - else - ty.maxInt(comp); + const max: u128 = switch (bit_count) { + 8 => if (unsigned) std.math.maxInt(u8) else std.math.maxInt(i8), + 16 => if (unsigned) std.math.maxInt(u16) else std.math.maxInt(i16), + 32 => if (unsigned) std.math.maxInt(u32) else std.math.maxInt(i32), + 64 => if (unsigned) std.math.maxInt(u64) else std.math.maxInt(i64), + 128 => if (unsigned) std.math.maxInt(u128) else std.math.maxInt(i128), + else => unreachable, + }; try w.print("#define __{s}_MAX__ {d}{s}\n", .{ name, max, ty.intValueSuffix(comp) }); } +/// Largest value that can be stored in wchar_t +pub fn wcharMax(comp: *const Compilation) u32 { + const unsigned = comp.types.wchar.isUnsignedInt(comp); + return switch (comp.types.wchar.bitSizeof(comp).?) { + 8 => if (unsigned) std.math.maxInt(u8) else std.math.maxInt(i8), + 16 => if (unsigned) std.math.maxInt(u16) else std.math.maxInt(i16), + 32 => if (unsigned) std.math.maxInt(u32) else std.math.maxInt(i32), + else => unreachable, + }; +} + fn generateExactWidthIntMax(comp: *const Compilation, w: anytype, specifier: Type.Specifier) !void { var ty = Type{ .specifier = specifier }; const bit_count: u8 = @intCast(ty.sizeof(comp).? * 8); @@ -1039,6 +1067,12 @@ pub fn nextLargestIntSameSign(comp: *const Compilation, ty: Type) ?Type { return null; } +/// Maximum size of an array, in bytes +pub fn maxArrayBytes(comp: *const Compilation) u64 { + const max_bits = @min(61, comp.target.ptrBitWidth()); + return (@as(u64, 1) << @truncate(max_bits)) - 1; +} + /// If `enum E { ... }` syntax has a fixed underlying integer type regardless of the presence of /// __attribute__((packed)) or the range of values of the corresponding enumerator constants, /// specify it here. @@ -1060,7 +1094,7 @@ pub fn getCharSignedness(comp: *const Compilation) std.builtin.Signedness { pub fn addBuiltinIncludeDir(comp: *Compilation, aro_dir: []const u8) !void { var search_path = aro_dir; while (std.fs.path.dirname(search_path)) |dirname| : (search_path = dirname) { - var base_dir = std.fs.cwd().openDir(dirname, .{}) catch continue; + var base_dir = comp.cwd.openDir(dirname, .{}) catch continue; defer base_dir.close(); base_dir.access("include/stddef.h", .{}) catch continue; @@ -1266,7 +1300,7 @@ fn addSourceFromPathExtra(comp: *Compilation, path: []const u8, kind: Source.Kin return error.FileNotFound; } - const file = try std.fs.cwd().openFile(path, .{}); + const file = try comp.cwd.openFile(path, .{}); defer file.close(); const contents = file.readToEndAlloc(comp.gpa, std.math.maxInt(u32)) catch |err| switch (err) { @@ -1349,10 +1383,9 @@ pub fn hasInclude( return false; } - const cwd = std.fs.cwd(); if (std.fs.path.isAbsolute(filename)) { if (which == .next) return false; - return !std.meta.isError(cwd.access(filename, .{})); + return !std.meta.isError(comp.cwd.access(filename, .{})); } const cwd_source_id = switch (include_type) { @@ -1372,7 +1405,7 @@ pub fn hasInclude( while (try it.nextWithFile(filename, sf_allocator)) |found| { defer sf_allocator.free(found.path); - if (!std.meta.isError(cwd.access(found.path, .{}))) return true; + if (!std.meta.isError(comp.cwd.access(found.path, .{}))) return true; } return false; } @@ -1392,7 +1425,7 @@ fn getFileContents(comp: *Compilation, path: []const u8, limit: ?u32) ![]const u return error.FileNotFound; } - const file = try std.fs.cwd().openFile(path, .{}); + const file = try comp.cwd.openFile(path, .{}); defer file.close(); var buf = std.ArrayList(u8).init(comp.gpa); @@ -1571,6 +1604,17 @@ pub fn hasBuiltinFunction(comp: *const Compilation, builtin: Builtin) bool { } } +pub fn locSlice(comp: *const Compilation, loc: Source.Location) []const u8 { + var tmp_tokenizer = Tokenizer{ + .buf = comp.getSource(loc.id).buf, + .langopts = comp.langopts, + .index = loc.byte_offset, + .source = .generated, + }; + const tok = tmp_tokenizer.next(); + return tmp_tokenizer.buf[tok.start..tok.end]; +} + pub const CharUnitSize = enum(u32) { @"1" = 1, @"2" = 2, @@ -1590,7 +1634,7 @@ pub const addDiagnostic = Diagnostics.add; test "addSourceFromReader" { const Test = struct { fn addSourceFromReader(str: []const u8, expected: []const u8, warning_count: u32, splices: []const u32) !void { - var comp = Compilation.init(std.testing.allocator); + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); var buf_reader = std.io.fixedBufferStream(str); @@ -1602,7 +1646,7 @@ test "addSourceFromReader" { } fn withAllocationFailures(allocator: std.mem.Allocator) !void { - var comp = Compilation.init(allocator); + var comp = Compilation.init(allocator, std.fs.cwd()); defer comp.deinit(); _ = try comp.addSourceFromBuffer("path", "spliced\\\nbuffer\n"); @@ -1644,7 +1688,7 @@ test "addSourceFromReader - exhaustive check for carriage return elimination" { const alen = alphabet.len; var buf: [alphabet.len]u8 = [1]u8{alphabet[0]} ** alen; - var comp = Compilation.init(std.testing.allocator); + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); var source_count: u32 = 0; @@ -1672,7 +1716,7 @@ test "ignore BOM at beginning of file" { const Test = struct { fn run(buf: []const u8) !void { - var comp = Compilation.init(std.testing.allocator); + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); var buf_reader = std.io.fixedBufferStream(buf); diff --git a/lib/compiler/aro/aro/Diagnostics.zig b/lib/compiler/aro/aro/Diagnostics.zig index 8f80e4393d..3039a45ef8 100644 --- a/lib/compiler/aro/aro/Diagnostics.zig +++ b/lib/compiler/aro/aro/Diagnostics.zig @@ -47,6 +47,10 @@ pub const Message = struct { tag: Attribute.Tag, specifier: enum { @"struct", @"union", @"enum" }, }, + attribute_todo: struct { + tag: Attribute.Tag, + kind: enum { variables, fields, types, functions }, + }, builtin_with_header: struct { builtin: Builtin.Tag, header: Header, @@ -210,6 +214,9 @@ pub const Options = struct { normalized: Kind = .default, @"shift-count-negative": Kind = .default, @"shift-count-overflow": Kind = .default, + @"constant-conversion": Kind = .default, + @"sign-conversion": Kind = .default, + nonnull: Kind = .default, }; const Diagnostics = @This(); @@ -222,14 +229,14 @@ errors: u32 = 0, macro_backtrace_limit: u32 = 6, pub fn warningExists(name: []const u8) bool { - inline for (std.meta.fields(Options)) |f| { + inline for (@typeInfo(Options).@"struct".fields) |f| { if (mem.eql(u8, f.name, name)) return true; } return false; } pub fn set(d: *Diagnostics, name: []const u8, to: Kind) !void { - inline for (std.meta.fields(Options)) |f| { + inline for (@typeInfo(Options).@"struct".fields) |f| { if (mem.eql(u8, f.name, name)) { @field(d.options, f.name) = to; return; @@ -422,6 +429,10 @@ pub fn renderMessage(comp: *Compilation, m: anytype, msg: Message) void { @tagName(msg.extra.ignored_record_attr.tag), @tagName(msg.extra.ignored_record_attr.specifier), }), + .attribute_todo => printRt(m, prop.msg, .{ "{s}", "{s}" }, .{ + @tagName(msg.extra.attribute_todo.tag), + @tagName(msg.extra.attribute_todo.kind), + }), .builtin_with_header => printRt(m, prop.msg, .{ "{s}", "{s}" }, .{ @tagName(msg.extra.builtin_with_header.header), Builtin.nameFromTag(msg.extra.builtin_with_header.builtin).span(), diff --git a/lib/compiler/aro/aro/Diagnostics/messages.zig b/lib/compiler/aro/aro/Diagnostics/messages.zig index acc5fd562c..c56641a461 100644 --- a/lib/compiler/aro/aro/Diagnostics/messages.zig +++ b/lib/compiler/aro/aro/Diagnostics/messages.zig @@ -107,6 +107,9 @@ pub const Tag = enum { multiple_default, previous_case, expected_arguments, + callee_with_static_array, + array_argument_too_small, + non_null_argument, expected_arguments_old, expected_at_least_arguments, invalid_static_star, @@ -214,6 +217,7 @@ pub const Tag = enum { pre_c23_compat, unbound_vla, array_too_large, + record_too_large, incompatible_ptr_init, incompatible_ptr_init_sign, incompatible_ptr_assign, @@ -349,6 +353,8 @@ pub const Tag = enum { non_standard_escape_char, invalid_pp_stringify_escape, vla, + int_value_changed, + sign_conversion, float_overflow_conversion, float_out_of_range, float_zero_conversion, @@ -425,7 +431,8 @@ pub const Tag = enum { bit_int, unsigned_bit_int_too_small, signed_bit_int_too_small, - bit_int_too_big, + unsigned_bit_int_too_big, + signed_bit_int_too_big, keyword_macro, ptr_arithmetic_incomplete, callconv_not_supported, @@ -509,6 +516,9 @@ pub const Tag = enum { complex_conj, overflow_builtin_requires_int, overflow_result_requires_ptr, + attribute_todo, + invalid_type_underlying_enum, + auto_type_self_initialized, pub fn property(tag: Tag) Properties { return named_data[@intFromEnum(tag)]; @@ -613,6 +623,9 @@ pub const Tag = enum { .{ .msg = "multiple default cases in the same switch", .kind = .@"error" }, .{ .msg = "previous case defined here", .kind = .note }, .{ .msg = expected_arguments, .extra = .arguments, .kind = .@"error" }, + .{ .msg = "callee declares array parameter as static here", .kind = .note }, + .{ .msg = "array argument is too small; contains {d} elements, callee requires at least {d}", .extra = .arguments, .kind = .warning, .opt = W("array-bounds") }, + .{ .msg = "null passed to a callee that requires a non-null argument", .kind = .warning, .opt = W("nonnull") }, .{ .msg = expected_arguments, .extra = .arguments, .kind = .warning }, .{ .msg = "expected at least {d} argument(s) got {d}", .extra = .arguments, .kind = .warning }, .{ .msg = "'static' may not be used with an unspecified variable length array size", .kind = .@"error" }, @@ -720,6 +733,7 @@ pub const Tag = enum { .{ .msg = "{s} is incompatible with C standards before C23", .extra = .str, .kind = .off, .suppress_unless_version = .c23, .opt = W("pre-c23-compat") }, .{ .msg = "variable length array must be bound in function definition", .kind = .@"error" }, .{ .msg = "array is too large", .kind = .@"error" }, + .{ .msg = "type '{s}' is too large", .kind = .@"error", .extra = .str }, .{ .msg = "incompatible pointer types initializing {s}", .extra = .str, .opt = W("incompatible-pointer-types"), .kind = .warning }, .{ .msg = "incompatible pointer types initializing {s}" ++ pointer_sign_message, .extra = .str, .opt = W("pointer-sign"), .kind = .warning }, .{ .msg = "incompatible pointer types assigning to {s}", .extra = .str, .opt = W("incompatible-pointer-types"), .kind = .warning }, @@ -855,6 +869,8 @@ pub const Tag = enum { .{ .msg = "use of non-standard escape character '\\{s}'", .kind = .off, .opt = W("pedantic"), .extra = .invalid_escape }, .{ .msg = "invalid string literal, ignoring final '\\'", .kind = .warning }, .{ .msg = "variable length array used", .kind = .off, .opt = W("vla") }, + .{ .msg = "implicit conversion from {s}", .extra = .str, .kind = .warning, .opt = W("constant-conversion") }, + .{ .msg = "implicit conversion changes signedness: {s}", .extra = .str, .kind = .off, .opt = W("sign-conversion") }, .{ .msg = "implicit conversion of non-finite value from {s} is undefined", .extra = .str, .kind = .off, .opt = W("float-overflow-conversion") }, .{ .msg = "implicit conversion of out of range value from {s} is undefined", .extra = .str, .kind = .warning, .opt = W("literal-conversion") }, .{ .msg = "implicit conversion from {s}", .extra = .str, .kind = .off, .opt = W("float-zero-conversion") }, @@ -929,9 +945,10 @@ pub const Tag = enum { .{ .msg = "this declarator", .kind = .note }, .{ .msg = "{s} is not supported on this target", .extra = .str, .kind = .@"error" }, .{ .msg = "'_BitInt' in C17 and earlier is a Clang extension'", .kind = .off, .pedantic = true, .opt = W("bit-int-extension"), .suppress_version = .c23 }, - .{ .msg = "{s} must have a bit size of at least 1", .extra = .str, .kind = .@"error" }, - .{ .msg = "{s} must have a bit size of at least 2", .extra = .str, .kind = .@"error" }, - .{ .msg = "{s} of bit sizes greater than " ++ std.fmt.comptimePrint("{d}", .{Properties.max_bits}) ++ " not supported", .extra = .str, .kind = .@"error" }, + .{ .msg = "{s}unsigned _BitInt must have a bit size of at least 1", .extra = .str, .kind = .@"error" }, + .{ .msg = "{s}signed _BitInt must have a bit size of at least 2", .extra = .str, .kind = .@"error" }, + .{ .msg = "{s}unsigned _BitInt of bit sizes greater than " ++ std.fmt.comptimePrint("{d}", .{Properties.max_bits}) ++ " not supported", .extra = .str, .kind = .@"error" }, + .{ .msg = "{s}signed _BitInt of bit sizes greater than " ++ std.fmt.comptimePrint("{d}", .{Properties.max_bits}) ++ " not supported", .extra = .str, .kind = .@"error" }, .{ .msg = "keyword is hidden by macro definition", .kind = .off, .pedantic = true, .opt = W("keyword-macro") }, .{ .msg = "arithmetic on a pointer to an incomplete type '{s}'", .extra = .str, .kind = .@"error" }, .{ .msg = "'{s}' calling convention is not supported for this target", .extra = .str, .opt = W("ignored-attributes"), .kind = .warning }, @@ -1015,6 +1032,9 @@ pub const Tag = enum { .{ .msg = "ISO C does not support '~' for complex conjugation of '{s}'", .opt = W("pedantic"), .extra = .str, .kind = .off }, .{ .msg = "operand argument to overflow builtin must be an integer ('{s}' invalid)", .extra = .str, .kind = .@"error" }, .{ .msg = "result argument to overflow builtin must be a pointer to a non-const integer ('{s}' invalid)", .extra = .str, .kind = .@"error" }, + .{ .msg = "TODO: implement '{s}' attribute for {s}", .extra = .attribute_todo, .kind = .@"error" }, + .{ .msg = "non-integral type '{s}' is an invalid underlying type", .extra = .str, .kind = .@"error" }, + .{ .msg = "variable '{s}' declared with deduced type '__auto_type' cannot appear in its own initializer", .extra = .str, .kind = .@"error" }, }; }; }; diff --git a/lib/compiler/aro/aro/Driver.zig b/lib/compiler/aro/aro/Driver.zig index 6876395b8a..7bdfd2c81e 100644 --- a/lib/compiler/aro/aro/Driver.zig +++ b/lib/compiler/aro/aro/Driver.zig @@ -47,6 +47,20 @@ color: ?bool = null, nobuiltininc: bool = false, nostdinc: bool = false, nostdlibinc: bool = false, +debug_dump_letters: packed struct(u3) { + d: bool = false, + m: bool = false, + n: bool = false, + + /// According to GCC, specifying letters whose behavior conflicts is undefined. + /// We follow clang in that `-dM` always takes precedence over `-dD` + pub fn getPreprocessorDumpMode(self: @This()) Preprocessor.DumpMode { + if (self.m) return .macros_only; + if (self.d) return .macros_and_result; + if (self.n) return .macro_names_and_result; + return .result_only; + } +} = .{}, /// Full path to the aro executable aro_name: []const u8 = "", @@ -92,6 +106,9 @@ pub const usage = \\ \\Compile options: \\ -c, --compile Only run preprocess, compile, and assemble steps + \\ -dM Output #define directives for all the macros defined during the execution of the preprocessor + \\ -dD Like -dM except that it outputs both the #define directives and the result of preprocessing + \\ -dN Like -dD, but emit only the macro names, not their expansions. \\ -D <macro>=<value> Define <macro> to <value> (defaults to 1) \\ -E Only run the preprocessor \\ -fchar8_t Enable char8_t (enabled by default in C23 and later) @@ -234,6 +251,12 @@ pub fn parseArgs( d.system_defines = .no_system_defines; } else if (mem.eql(u8, arg, "-c") or mem.eql(u8, arg, "--compile")) { d.only_compile = true; + } else if (mem.eql(u8, arg, "-dD")) { + d.debug_dump_letters.d = true; + } else if (mem.eql(u8, arg, "-dM")) { + d.debug_dump_letters.m = true; + } else if (mem.eql(u8, arg, "-dN")) { + d.debug_dump_letters.n = true; } else if (mem.eql(u8, arg, "-E")) { d.only_preprocess = true; } else if (mem.eql(u8, arg, "-P") or mem.eql(u8, arg, "--no-line-commands")) { @@ -636,13 +659,17 @@ fn processSource( if (d.comp.langopts.ms_extensions) { d.comp.ms_cwd_source_id = source.id; } - + const dump_mode = d.debug_dump_letters.getPreprocessorDumpMode(); if (d.verbose_pp) pp.verbose = true; if (d.only_preprocess) { pp.preserve_whitespace = true; if (d.line_commands) { pp.linemarkers = if (d.use_line_directives) .line_directives else .numeric_directives; } + switch (dump_mode) { + .macros_and_result, .macro_names_and_result => pp.store_macro_tokens = true, + .result_only, .macros_only => {}, + } } try pp.preprocessSources(&.{ source, builtin, user_macros }); @@ -663,7 +690,8 @@ fn processSource( defer if (d.output_name != null) file.close(); var buf_w = std.io.bufferedWriter(file.writer()); - pp.prettyPrintTokens(buf_w.writer()) catch |er| + + pp.prettyPrintTokens(buf_w.writer(), dump_mode) catch |er| return d.fatal("unable to write result: {s}", .{errorDescription(er)}); buf_w.flush() catch |er| diff --git a/lib/compiler/aro/aro/Driver/Filesystem.zig b/lib/compiler/aro/aro/Driver/Filesystem.zig index a81f313753..07cbeac03c 100644 --- a/lib/compiler/aro/aro/Driver/Filesystem.zig +++ b/lib/compiler/aro/aro/Driver/Filesystem.zig @@ -56,7 +56,7 @@ fn existsFake(entries: []const Filesystem.Entry, path: []const u8) bool { } fn canExecutePosix(path: []const u8) bool { - std.os.access(path, std.os.X_OK) catch return false; + std.posix.access(path, std.posix.X_OK) catch return false; // Todo: ensure path is not a directory return true; } @@ -173,7 +173,7 @@ pub const Filesystem = union(enum) { pub fn exists(fs: Filesystem, path: []const u8) bool { switch (fs) { .real => { - std.os.access(path, std.os.F_OK) catch return false; + std.fs.cwd().access(path, .{}) catch return false; return true; }, .fake => |paths| return existsFake(paths, path), diff --git a/lib/compiler/aro/aro/Hideset.zig b/lib/compiler/aro/aro/Hideset.zig index 433be9f393..ad8a089ae6 100644 --- a/lib/compiler/aro/aro/Hideset.zig +++ b/lib/compiler/aro/aro/Hideset.zig @@ -46,15 +46,15 @@ const Item = struct { const List = std.MultiArrayList(Item); }; -const Index = enum(u32) { +pub const Index = enum(u32) { none = std.math.maxInt(u32), _, }; map: std.AutoHashMapUnmanaged(Identifier, Index) = .{}, -/// Used for computing intersection of two lists; stored here so that allocations can be retained +/// Used for computing union/intersection of two lists; stored here so that allocations can be retained /// until hideset is deinit'ed -intersection_map: std.AutoHashMapUnmanaged(Identifier, void) = .{}, +tmp_map: std.AutoHashMapUnmanaged(Identifier, void) = .{}, linked_list: Item.List = .{}, comp: *const Compilation, @@ -72,7 +72,7 @@ const Iterator = struct { pub fn deinit(self: *Hideset) void { self.map.deinit(self.comp.gpa); - self.intersection_map.deinit(self.comp.gpa); + self.tmp_map.deinit(self.comp.gpa); self.linked_list.deinit(self.comp.gpa); } @@ -83,7 +83,7 @@ pub fn clearRetainingCapacity(self: *Hideset) void { pub fn clearAndFree(self: *Hideset) void { self.map.clearAndFree(self.comp.gpa); - self.intersection_map.clearAndFree(self.comp.gpa); + self.tmp_map.clearAndFree(self.comp.gpa); self.linked_list.shrinkAndFree(self.comp.gpa, 0); } @@ -109,8 +109,13 @@ fn ensureUnusedCapacity(self: *Hideset, new_size: usize) !void { /// Creates a one-item list with contents `identifier` fn createNodeAssumeCapacity(self: *Hideset, identifier: Identifier) Index { + return self.createNodeAssumeCapacityExtra(identifier, .none); +} + +/// Creates a one-item list with contents `identifier` +fn createNodeAssumeCapacityExtra(self: *Hideset, identifier: Identifier, next: Index) Index { const next_idx = self.linked_list.len; - self.linked_list.appendAssumeCapacity(.{ .identifier = identifier }); + self.linked_list.appendAssumeCapacity(.{ .identifier = identifier, .next = next }); return @enumFromInt(next_idx); } @@ -121,24 +126,24 @@ pub fn prepend(self: *Hideset, loc: Source.Location, tail: Index) !Index { return @enumFromInt(new_idx); } -/// Copy a, then attach b at the end +/// Attach elements of `b` to the front of `a` (if they're not in `a`) pub fn @"union"(self: *Hideset, a: Index, b: Index) !Index { - var cur: Index = .none; + if (a == .none) return b; + if (b == .none) return a; + self.tmp_map.clearRetainingCapacity(); + + var it = self.iterator(b); + while (it.next()) |identifier| { + try self.tmp_map.put(self.comp.gpa, identifier, {}); + } + var head: Index = b; try self.ensureUnusedCapacity(self.len(a)); - var it = self.iterator(a); + it = self.iterator(a); while (it.next()) |identifier| { - const new_idx = self.createNodeAssumeCapacity(identifier); - if (head == b) { - head = new_idx; + if (!self.tmp_map.contains(identifier)) { + head = self.createNodeAssumeCapacityExtra(identifier, head); } - if (cur != .none) { - self.linked_list.items(.next)[@intFromEnum(cur)] = new_idx; - } - cur = new_idx; - } - if (cur != .none) { - self.linked_list.items(.next)[@intFromEnum(cur)] = b; } return head; } @@ -163,20 +168,20 @@ fn len(self: *const Hideset, list: Index) usize { pub fn intersection(self: *Hideset, a: Index, b: Index) !Index { if (a == .none or b == .none) return .none; - self.intersection_map.clearRetainingCapacity(); + self.tmp_map.clearRetainingCapacity(); var cur: Index = .none; var head: Index = .none; var it = self.iterator(a); var a_len: usize = 0; while (it.next()) |identifier| : (a_len += 1) { - try self.intersection_map.put(self.comp.gpa, identifier, {}); + try self.tmp_map.put(self.comp.gpa, identifier, {}); } try self.ensureUnusedCapacity(@min(a_len, self.len(b))); it = self.iterator(b); while (it.next()) |identifier| { - if (self.intersection_map.contains(identifier)) { + if (self.tmp_map.contains(identifier)) { const new_idx = self.createNodeAssumeCapacity(identifier); if (head == .none) { head = new_idx; diff --git a/lib/compiler/aro/aro/Parser.zig b/lib/compiler/aro/aro/Parser.zig index d8d6f9d71c..0a8907b23a 100644 --- a/lib/compiler/aro/aro/Parser.zig +++ b/lib/compiler/aro/aro/Parser.zig @@ -28,6 +28,7 @@ const StrInt = @import("StringInterner.zig"); const StringId = StrInt.StringId; const Builtins = @import("Builtins.zig"); const Builtin = Builtins.Builtin; +const evalBuiltin = @import("Builtins/eval.zig").eval; const target_util = @import("target.zig"); const Switch = struct { @@ -100,7 +101,7 @@ value_map: Tree.ValueMap, // buffers used during compilation syms: SymbolStack = .{}, -strings: std.ArrayList(u8), +strings: std.ArrayListAligned(u8, 4), labels: std.ArrayList(Label), list_buf: NodeList, decl_buf: NodeList, @@ -130,6 +131,10 @@ const_decl_folding: ConstDeclFoldingMode = .fold_const_decls, /// address-of-label expression (tracked with contains_address_of_label) computed_goto_tok: ?TokenIndex = null, +/// __auto_type may only be used with a single declarator. Keep track of the name +/// so that it is not used in its own initializer. +auto_type_decl_name: StringId = .empty, + /// Various variables that are different for each function. func: struct { /// null if not in function, will always be plain func, var_args_func or old_style_func @@ -160,7 +165,7 @@ record: struct { } fn addFieldsFromAnonymous(r: @This(), p: *Parser, ty: Type) Error!void { - for (ty.data.record.fields) |f| { + for (ty.getRecord().?.fields) |f| { if (f.isAnonymousRecord()) { try r.addFieldsFromAnonymous(p, f.ty.canonicalize(.standard)); } else if (f.name_tok != 0) { @@ -470,7 +475,7 @@ pub fn typePairStrExtra(p: *Parser, a: Type, msg: []const u8, b: Type) ![]const return try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]); } -pub fn floatValueChangedStr(p: *Parser, res: *Result, old_value: Value, int_ty: Type) ![]const u8 { +pub fn valueChangedStr(p: *Parser, res: *Result, old_value: Value, int_ty: Type) ![]const u8 { const strings_top = p.strings.items.len; defer p.strings.items.len = strings_top; @@ -572,6 +577,14 @@ fn nodeIs(p: *Parser, node: NodeIndex, tag: Tree.Tag) bool { return p.getNode(node, tag) != null; } +pub fn getDecayedStringLiteral(p: *Parser, node: NodeIndex) ?Value { + const cast_node = p.getNode(node, .implicit_cast) orelse return null; + const data = p.nodes.items(.data)[@intFromEnum(cast_node)]; + if (data.cast.kind != .array_to_pointer) return null; + const literal_node = p.getNode(data.cast.operand, .string_literal_expr) orelse return null; + return p.value_map.get(literal_node); +} + fn getNode(p: *Parser, node: NodeIndex, tag: Tree.Tag) ?NodeIndex { var cur = node; const tags = p.nodes.items(.tag); @@ -680,7 +693,7 @@ pub fn parse(pp: *Preprocessor) Compilation.Error!Tree { .gpa = pp.comp.gpa, .arena = arena.allocator(), .tok_ids = pp.tokens.items(.id), - .strings = std.ArrayList(u8).init(pp.comp.gpa), + .strings = std.ArrayListAligned(u8, 4).init(pp.comp.gpa), .value_map = Tree.ValueMap.init(pp.comp.gpa), .data = NodeList.init(pp.comp.gpa), .labels = std.ArrayList(Label).init(pp.comp.gpa), @@ -725,7 +738,7 @@ pub fn parse(pp: *Preprocessor) Compilation.Error!Tree { defer p.syms.popScope(); // NodeIndex 0 must be invalid - _ = try p.addNode(.{ .tag = .invalid, .ty = undefined, .data = undefined }); + _ = try p.addNode(.{ .tag = .invalid, .ty = undefined, .data = undefined, .loc = undefined }); { if (p.comp.langopts.hasChar8_T()) { @@ -747,6 +760,10 @@ pub fn parse(pp: *Preprocessor) Compilation.Error!Tree { if (ty.isArray()) ty.decayArray(); try p.syms.defineTypedef(&p, try StrInt.intern(p.comp, "__NSConstantString"), pp.comp.types.ns_constant_string.ty, 0, .none); + + if (p.comp.float80Type()) |float80_ty| { + try p.syms.defineTypedef(&p, try StrInt.intern(p.comp, "__float80"), float80_ty, 0, .none); + } } while (p.eatToken(.eof) == null) { @@ -862,6 +879,8 @@ fn nextExternDecl(p: *Parser) void { .keyword_int, .keyword_long, .keyword_signed, + .keyword_signed1, + .keyword_signed2, .keyword_unsigned, .keyword_float, .keyword_double, @@ -1018,10 +1037,8 @@ fn decl(p: *Parser) Error!bool { // Collect old style parameter declarations. if (init_d.d.old_style_func != null) { - const attrs = init_d.d.ty.getAttributes(); - var base_ty = if (init_d.d.ty.specifier == .attributed) init_d.d.ty.data.attributed.base else init_d.d.ty; + var base_ty = init_d.d.ty.base(); base_ty.specifier = .func; - init_d.d.ty = try base_ty.withAttributes(p.arena, attrs); const param_buf_top = p.param_buf.items.len; defer p.param_buf.items.len = param_buf_top; @@ -1116,6 +1133,7 @@ fn decl(p: *Parser) Error!bool { .ty = init_d.d.ty, .tag = try decl_spec.validateFnDef(p), .data = .{ .decl = .{ .name = init_d.d.name, .node = body } }, + .loc = @enumFromInt(init_d.d.name), }); try p.decl_buf.append(node); @@ -1142,9 +1160,18 @@ fn decl(p: *Parser) Error!bool { if (init_d.d.old_style_func) |tok_i| try p.errTok(.invalid_old_style_params, tok_i); const tag = try decl_spec.validate(p, &init_d.d.ty, init_d.initializer.node != .none); - const node = try p.addNode(.{ .ty = init_d.d.ty, .tag = tag, .data = .{ - .decl = .{ .name = init_d.d.name, .node = init_d.initializer.node }, - } }); + const tok = switch (decl_spec.storage_class) { + .auto, .@"extern", .register, .static, .typedef => |tok| tok, + .none => init_d.d.name, + }; + const node = try p.addNode(.{ + .ty = init_d.d.ty, + .tag = tag, + .data = .{ + .decl = .{ .name = init_d.d.name, .node = init_d.initializer.node }, + }, + .loc = @enumFromInt(tok), + }); try p.decl_buf.append(node); const interned_name = try StrInt.intern(p.comp, p.tokSlice(init_d.d.name)); @@ -1287,6 +1314,7 @@ fn staticAssert(p: *Parser) Error!bool { .lhs = res.node, .rhs = str.node, } }, + .loc = @enumFromInt(static_assert), }); try p.decl_buf.append(node); return true; @@ -1407,6 +1435,8 @@ fn typeof(p: *Parser) Error!?Type { const l_paren = try p.expectToken(.l_paren); if (try p.typeName()) |ty| { try p.expectClosing(l_paren, .r_paren); + if (ty.is(.invalid)) return null; + const typeof_ty = try p.arena.create(Type); typeof_ty.* = .{ .data = ty.data, @@ -1428,6 +1458,8 @@ fn typeof(p: *Parser) Error!?Type { .specifier = .nullptr_t, .qual = if (unqual) .{} else typeof_expr.ty.qual.inheritFromTypeof(), }; + } else if (typeof_expr.ty.is(.invalid)) { + return null; } const inner = try p.arena.create(Type.Expr); @@ -1774,6 +1806,8 @@ fn initDeclarator(p: *Parser, decl_spec: *DeclSpec, attr_buf_top: usize) Error!? } else { apply_var_attributes = true; } + const c23_auto = init_d.d.ty.is(.c23_auto); + const auto_type = init_d.d.ty.is(.auto_type); if (p.eatToken(.equal)) |eq| init: { if (decl_spec.storage_class == .typedef or @@ -1801,19 +1835,21 @@ fn initDeclarator(p: *Parser, decl_spec: *DeclSpec, attr_buf_top: usize) Error!? const interned_name = try StrInt.intern(p.comp, p.tokSlice(init_d.d.name)); try p.syms.declareSymbol(p, interned_name, init_d.d.ty, init_d.d.name, .none); + if (c23_auto or auto_type) { + p.auto_type_decl_name = interned_name; + } + defer p.auto_type_decl_name = .empty; + var init_list_expr = try p.initializer(init_d.d.ty); init_d.initializer = init_list_expr; if (!init_list_expr.ty.isArray()) break :init; - if (init_d.d.ty.specifier == .incomplete_array) { - // Modifying .data is exceptionally allowed for .incomplete_array. - init_d.d.ty.data.array.len = init_list_expr.ty.arrayLen() orelse break :init; - init_d.d.ty.specifier = .array; + if (init_d.d.ty.is(.incomplete_array)) { + init_d.d.ty.setIncompleteArrayLen(init_list_expr.ty.arrayLen() orelse break :init); } } const name = init_d.d.name; - const c23_auto = init_d.d.ty.is(.c23_auto); - if (init_d.d.ty.is(.auto_type) or c23_auto) { + if (auto_type or c23_auto) { if (init_d.initializer.node == .none) { init_d.d.ty = Type.invalid; if (c23_auto) { @@ -1872,6 +1908,8 @@ fn initDeclarator(p: *Parser, decl_spec: *DeclSpec, attr_buf_top: usize) Error!? /// | keyword_float /// | keyword_double /// | keyword_signed +/// | keyword_signed1 +/// | keyword_signed2 /// | keyword_unsigned /// | keyword_bool /// | keyword_c23_bool @@ -1911,14 +1949,13 @@ fn typeSpec(p: *Parser, ty: *Type.Builder) Error!bool { .keyword_long => try ty.combine(p, .long, p.tok_i), .keyword_int64, .keyword_int64_2 => try ty.combine(p, .long_long, p.tok_i), .keyword_int128 => try ty.combine(p, .int128, p.tok_i), - .keyword_signed => try ty.combine(p, .signed, p.tok_i), + .keyword_signed, .keyword_signed1, .keyword_signed2 => try ty.combine(p, .signed, p.tok_i), .keyword_unsigned => try ty.combine(p, .unsigned, p.tok_i), .keyword_fp16 => try ty.combine(p, .fp16, p.tok_i), .keyword_float16 => try ty.combine(p, .float16, p.tok_i), .keyword_float => try ty.combine(p, .float, p.tok_i), .keyword_double => try ty.combine(p, .double, p.tok_i), .keyword_complex => try ty.combine(p, .complex, p.tok_i), - .keyword_float80 => try ty.combine(p, .float80, p.tok_i), .keyword_float128_1, .keyword_float128_2 => { if (!p.comp.hasFloat128()) { try p.errStr(.type_not_supported_on_target, p.tok_i, p.tok_ids[p.tok_i].lexeme().?); @@ -2128,6 +2165,7 @@ fn recordSpec(p: *Parser) Error!Type { .tag = if (is_struct) .struct_forward_decl else .union_forward_decl, .ty = ty, .data = .{ .decl_ref = ident }, + .loc = @enumFromInt(ident), })); return ty; } @@ -2248,19 +2286,22 @@ fn recordSpec(p: *Parser) Error!Type { // TODO: msvc considers `#pragma pack` on a per-field basis .msvc => p.pragma_pack, }; - record_layout.compute(record_ty, ty, p.comp, pragma_pack_value); + record_layout.compute(record_ty, ty, p.comp, pragma_pack_value) catch |er| switch (er) { + error.Overflow => try p.errStr(.record_too_large, maybe_ident orelse kind_tok, try p.typeStr(ty)), + }; } // finish by creating a node var node: Tree.Node = .{ .tag = if (is_struct) .struct_decl_two else .union_decl_two, .ty = ty, - .data = .{ .bin = .{ .lhs = .none, .rhs = .none } }, + .data = .{ .two = .{ .none, .none } }, + .loc = @enumFromInt(maybe_ident orelse kind_tok), }; switch (record_decls.len) { 0 => {}, - 1 => node.data = .{ .bin = .{ .lhs = record_decls[0], .rhs = .none } }, - 2 => node.data = .{ .bin = .{ .lhs = record_decls[0], .rhs = record_decls[1] } }, + 1 => node.data = .{ .two = .{ record_decls[0], .none } }, + 2 => node.data = .{ .two = .{ record_decls[0], record_decls[1] } }, else => { node.tag = if (is_struct) .struct_decl else .union_decl; node.data = .{ .range = try p.addList(record_decls) }; @@ -2383,6 +2424,7 @@ fn recordDeclarator(p: *Parser) Error!bool { .tag = .indirect_record_field_decl, .ty = ty, .data = undefined, + .loc = @enumFromInt(first_tok), }); try p.decl_buf.append(node); try p.record.addFieldsFromAnonymous(p, ty); @@ -2402,6 +2444,7 @@ fn recordDeclarator(p: *Parser) Error!bool { .tag = .record_field_decl, .ty = ty, .data = .{ .decl = .{ .name = name_tok, .node = bits_node } }, + .loc = @enumFromInt(if (name_tok != 0) name_tok else first_tok), }); try p.decl_buf.append(node); } @@ -2461,7 +2504,8 @@ fn enumSpec(p: *Parser) Error!Type { const maybe_ident = try p.eatIdentifier(); const fixed_ty = if (p.eatToken(.colon)) |colon| fixed: { - const fixed = (try p.typeName()) orelse { + const ty_start = p.tok_i; + const fixed = (try p.specQual()) orelse { if (p.record.kind != .invalid) { // This is a bit field. p.tok_i -= 1; @@ -2471,6 +2515,12 @@ fn enumSpec(p: *Parser) Error!Type { try p.errTok(.enum_fixed, colon); break :fixed null; }; + + if (!fixed.isInt() or fixed.is(.@"enum")) { + try p.errStr(.invalid_type_underlying_enum, ty_start, try p.typeStr(fixed)); + break :fixed Type.int; + } + try p.errTok(.enum_fixed, colon); break :fixed fixed; } else null; @@ -2505,6 +2555,7 @@ fn enumSpec(p: *Parser) Error!Type { .tag = .enum_forward_decl, .ty = ty, .data = .{ .decl_ref = ident }, + .loc = @enumFromInt(ident), })); return ty; } @@ -2587,7 +2638,7 @@ fn enumSpec(p: *Parser) Error!Type { continue; const symbol = p.syms.getPtr(field.name, .vars); - try symbol.val.intCast(dest_ty, p.comp); + _ = try symbol.val.intCast(dest_ty, p.comp); symbol.ty = dest_ty; p.nodes.items(.ty)[@intFromEnum(field_nodes[i])] = dest_ty; field.ty = dest_ty; @@ -2615,13 +2666,18 @@ fn enumSpec(p: *Parser) Error!Type { } // finish by creating a node - var node: Tree.Node = .{ .tag = .enum_decl_two, .ty = ty, .data = .{ - .bin = .{ .lhs = .none, .rhs = .none }, - } }; + var node: Tree.Node = .{ + .tag = .enum_decl_two, + .ty = ty, + .data = .{ + .two = .{ .none, .none }, + }, + .loc = @enumFromInt(maybe_ident orelse enum_tok), + }; switch (field_nodes.len) { 0 => {}, - 1 => node.data = .{ .bin = .{ .lhs = field_nodes[0], .rhs = .none } }, - 2 => node.data = .{ .bin = .{ .lhs = field_nodes[0], .rhs = field_nodes[1] } }, + 1 => node.data = .{ .two = .{ field_nodes[0], .none } }, + 2 => node.data = .{ .two = .{ field_nodes[0], field_nodes[1] } }, else => { node.tag = .enum_decl; node.data = .{ .range = try p.addList(field_nodes) }; @@ -2679,8 +2735,6 @@ const Enumerator = struct { return; } if (try e.res.val.add(e.res.val, Value.one, e.res.ty, p.comp)) { - const byte_size = e.res.ty.sizeof(p.comp).?; - const bit_size: u8 = @intCast(if (e.res.ty.isUnsignedInt(p.comp)) byte_size * 8 else byte_size * 8 - 1); if (e.fixed) { try p.errStr(.enum_not_representable_fixed, tok, try p.typeStr(e.res.ty)); return; @@ -2689,6 +2743,8 @@ const Enumerator = struct { try p.errTok(.enumerator_overflow, tok); break :blk larger; } else blk: { + const signed = !e.res.ty.isUnsignedInt(p.comp); + const bit_size: u8 = @intCast(e.res.ty.bitSizeof(p.comp).? - @intFromBool(signed)); try p.errExtra(.enum_not_representable, tok, .{ .pow_2_as_string = bit_size }); break :blk Type{ .specifier = .ulong_long }; }; @@ -2792,14 +2848,12 @@ fn enumerator(p: *Parser, e: *Enumerator) Error!?EnumFieldAndNode { if (err_start == p.comp.diagnostics.list.items.len) { // only do these warnings if we didn't already warn about overflow or non-representable values if (e.res.val.compare(.lt, Value.zero, p.comp)) { - const min_int = (Type{ .specifier = .int }).minInt(p.comp); - const min_val = try Value.int(min_int, p.comp); + const min_val = try Value.minInt(Type.int, p.comp); if (e.res.val.compare(.lt, min_val, p.comp)) { try p.errStr(.enumerator_too_small, name_tok, try e.res.str(p)); } } else { - const max_int = (Type{ .specifier = .int }).maxInt(p.comp); - const max_val = try Value.int(max_int, p.comp); + const max_val = try Value.maxInt(Type.int, p.comp); if (e.res.val.compare(.gt, max_val, p.comp)) { try p.errStr(.enumerator_too_large, name_tok, try e.res.str(p)); } @@ -2815,6 +2869,7 @@ fn enumerator(p: *Parser, e: *Enumerator) Error!?EnumFieldAndNode { .name = name_tok, .node = res.node, } }, + .loc = @enumFromInt(name_tok), }); try p.value_map.put(node, e.res.val); return EnumFieldAndNode{ .field = .{ @@ -2991,15 +3046,12 @@ fn directDeclarator(p: *Parser, base_type: Type, d: *Declarator, kind: Declarato } const outer = try p.directDeclarator(base_type, d, kind); - var max_bits = p.comp.target.ptrBitWidth(); - if (max_bits > 61) max_bits = 61; - const max_bytes = (@as(u64, 1) << @truncate(max_bits)) - 1; if (!size.ty.isInt()) { try p.errStr(.array_size_non_int, size_tok, try p.typeStr(size.ty)); return error.ParsingFailed; } - if (base_type.is(.c23_auto)) { + if (base_type.is(.c23_auto) or outer.is(.invalid)) { // issue error later return Type.invalid; } else if (size.val.opt_ref == .none) { @@ -3030,7 +3082,7 @@ fn directDeclarator(p: *Parser, base_type: Type, d: *Declarator, kind: Declarato } else { // `outer` is validated later so it may be invalid here const outer_size = outer.sizeof(p.comp); - const max_elems = max_bytes / @max(1, outer_size orelse 1); + const max_elems = p.comp.maxArrayBytes() / @max(1, outer_size orelse 1); var size_val = size.val; if (size_val.isZero(p.comp)) { @@ -3047,7 +3099,7 @@ fn directDeclarator(p: *Parser, base_type: Type, d: *Declarator, kind: Declarato arr_ty.len = max_elems; } res_ty.data = .{ .array = arr_ty }; - res_ty.specifier = .array; + res_ty.specifier = if (static != null) .static_array else .array; } try res_ty.combine(outer); @@ -3120,12 +3172,14 @@ fn directDeclarator(p: *Parser, base_type: Type, d: *Declarator, kind: Declarato fn pointer(p: *Parser, base_ty: Type) Error!Type { var ty = base_ty; while (p.eatToken(.asterisk)) |_| { - const elem_ty = try p.arena.create(Type); - elem_ty.* = ty; - ty = Type{ - .specifier = .pointer, - .data = .{ .sub_type = elem_ty }, - }; + if (!ty.is(.invalid)) { + const elem_ty = try p.arena.create(Type); + elem_ty.* = ty; + ty = Type{ + .specifier = .pointer, + .data = .{ .sub_type = elem_ty }, + }; + } var quals = Type.Qualifiers.Builder{}; _ = try p.typeQual(&quals); try quals.finish(p, &ty); @@ -3237,6 +3291,75 @@ fn typeName(p: *Parser) Error!?Type { return try Attribute.applyTypeAttributes(p, ty, attr_buf_top, .align_ignored); } +fn complexInitializer(p: *Parser, init_ty: Type) Error!Result { + assert(p.tok_ids[p.tok_i] == .l_brace); + assert(init_ty.isComplex()); + + const real_ty = init_ty.makeReal(); + if (real_ty.isInt()) { + return p.todo("Complex integer initializers"); + } + const l_brace = p.tok_i; + p.tok_i += 1; + try p.errTok(.complex_component_init, l_brace); + + const first_tok = p.tok_i; + var first = try p.assignExpr(); + try first.expect(p); + try p.coerceInit(&first, first_tok, real_ty); + + var second: Result = .{ + .ty = real_ty, + .val = Value.zero, + }; + if (p.eatToken(.comma)) |_| { + const second_tok = p.tok_i; + const maybe_second = try p.assignExpr(); + if (!maybe_second.empty(p)) { + second = maybe_second; + try p.coerceInit(&second, second_tok, real_ty); + } + } + + // Eat excess initializers + var extra_tok: ?TokenIndex = null; + while (p.eatToken(.comma)) |_| { + if (p.tok_ids[p.tok_i] == .r_brace) break; + extra_tok = p.tok_i; + const extra = try p.assignExpr(); + if (extra.empty(p)) { + try p.errTok(.expected_expr, p.tok_i); + p.skipTo(.r_brace); + return error.ParsingFailed; + } + } + try p.expectClosing(l_brace, .r_brace); + if (extra_tok) |tok| { + try p.errTok(.excess_scalar_init, tok); + } + + const arr_init_node: Tree.Node = .{ + .tag = .array_init_expr_two, + .ty = init_ty, + .data = .{ .two = .{ first.node, second.node } }, + .loc = @enumFromInt(l_brace), + }; + var res: Result = .{ + .node = try p.addNode(arr_init_node), + .ty = init_ty, + }; + if (first.val.opt_ref != .none and second.val.opt_ref != .none) { + res.val = try Value.intern(p.comp, switch (real_ty.bitSizeof(p.comp).?) { + 32 => .{ .complex = .{ .cf32 = .{ first.val.toFloat(f32, p.comp), second.val.toFloat(f32, p.comp) } } }, + 64 => .{ .complex = .{ .cf64 = .{ first.val.toFloat(f64, p.comp), second.val.toFloat(f64, p.comp) } } }, + 80 => .{ .complex = .{ .cf80 = .{ first.val.toFloat(f80, p.comp), second.val.toFloat(f80, p.comp) } } }, + 128 => .{ .complex = .{ .cf128 = .{ first.val.toFloat(f128, p.comp), second.val.toFloat(f128, p.comp) } } }, + else => unreachable, + }); + } + return res; +} + /// initializer /// : assignExpr /// | '{' initializerItems '}' @@ -3255,6 +3378,9 @@ fn initializer(p: *Parser, init_ty: Type) Error!Result { return error.ParsingFailed; } + if (init_ty.isComplex()) { + return p.complexInitializer(init_ty); + } var il: InitList = .{}; defer il.deinit(p.gpa); @@ -3754,9 +3880,15 @@ fn convertInitList(p: *Parser, il: InitList, init_ty: Type) Error!NodeIndex { var arr_init_node: Tree.Node = .{ .tag = .array_init_expr_two, .ty = init_ty, - .data = .{ .bin = .{ .lhs = .none, .rhs = .none } }, + .data = .{ .two = .{ .none, .none } }, }; + const max_elems = p.comp.maxArrayBytes() / (@max(1, elem_ty.sizeof(p.comp) orelse 1)); + if (start > max_elems) { + try p.errTok(.array_too_large, il.tok); + start = max_elems; + } + if (init_ty.specifier == .incomplete_array) { arr_init_node.ty.specifier = .array; arr_init_node.ty.data.array.len = start; @@ -3767,8 +3899,6 @@ fn convertInitList(p: *Parser, il: InitList, init_ty: Type) Error!NodeIndex { .specifier = .array, .data = .{ .array = arr_ty }, }; - const attrs = init_ty.getAttributes(); - arr_init_node.ty = try arr_init_node.ty.withAttributes(p.arena, attrs); } else if (start < max_items) { const elem = try p.addNode(.{ .tag = .array_filler_expr, @@ -3781,8 +3911,8 @@ fn convertInitList(p: *Parser, il: InitList, init_ty: Type) Error!NodeIndex { const items = p.list_buf.items[list_buf_top..]; switch (items.len) { 0 => {}, - 1 => arr_init_node.data.bin.lhs = items[0], - 2 => arr_init_node.data.bin = .{ .lhs = items[0], .rhs = items[1] }, + 1 => arr_init_node.data.two[0] = items[0], + 2 => arr_init_node.data.two = .{ items[0], items[1] }, else => { arr_init_node.tag = .array_init_expr; arr_init_node.data = .{ .range = try p.addList(items) }; @@ -3813,13 +3943,13 @@ fn convertInitList(p: *Parser, il: InitList, init_ty: Type) Error!NodeIndex { var struct_init_node: Tree.Node = .{ .tag = .struct_init_expr_two, .ty = init_ty, - .data = .{ .bin = .{ .lhs = .none, .rhs = .none } }, + .data = .{ .two = .{ .none, .none } }, }; const items = p.list_buf.items[list_buf_top..]; switch (items.len) { 0 => {}, - 1 => struct_init_node.data.bin.lhs = items[0], - 2 => struct_init_node.data.bin = .{ .lhs = items[0], .rhs = items[1] }, + 1 => struct_init_node.data.two[0] = items[0], + 2 => struct_init_node.data.two = .{ items[0], items[1] }, else => { struct_init_node.tag = .struct_init_expr; struct_init_node.data = .{ .range = try p.addList(items) }; @@ -3894,7 +4024,7 @@ fn asmOperand(p: *Parser, names: *std.ArrayList(?TokenIndex), constraints: *Node /// | asmStr ':' asmOperand* ':' asmOperand* /// | asmStr ':' asmOperand* ':' asmOperand* : asmStr? (',' asmStr)* /// | asmStr ':' asmOperand* ':' asmOperand* : asmStr? (',' asmStr)* : IDENTIFIER (',' IDENTIFIER)* -fn gnuAsmStmt(p: *Parser, quals: Tree.GNUAssemblyQualifiers, l_paren: TokenIndex) Error!NodeIndex { +fn gnuAsmStmt(p: *Parser, quals: Tree.GNUAssemblyQualifiers, asm_tok: TokenIndex, l_paren: TokenIndex) Error!NodeIndex { const asm_str = try p.asmStr(); try p.checkAsmStr(asm_str.val, l_paren); @@ -3903,6 +4033,7 @@ fn gnuAsmStmt(p: *Parser, quals: Tree.GNUAssemblyQualifiers, l_paren: TokenIndex .tag = .gnu_asm_simple, .ty = .{ .specifier = .void }, .data = .{ .un = asm_str.node }, + .loc = @enumFromInt(asm_tok), }); } @@ -4007,6 +4138,7 @@ fn gnuAsmStmt(p: *Parser, quals: Tree.GNUAssemblyQualifiers, l_paren: TokenIndex .tag = .addr_of_label, .data = .{ .decl_ref = label }, .ty = result_ty, + .loc = @enumFromInt(ident), }); try exprs.append(label_addr_node); @@ -4088,9 +4220,10 @@ fn assembly(p: *Parser, kind: enum { global, decl_label, stmt }) Error!?NodeInde .tag = .file_scope_asm, .ty = .{ .specifier = .void }, .data = .{ .decl = .{ .name = asm_tok, .node = asm_str.node } }, + .loc = @enumFromInt(asm_tok), }); }, - .stmt => result_node = try p.gnuAsmStmt(quals, l_paren), + .stmt => result_node = try p.gnuAsmStmt(quals, asm_tok, l_paren), } try p.expectClosing(l_paren, .r_paren); @@ -4141,7 +4274,7 @@ fn asmStr(p: *Parser) Error!Result { fn stmt(p: *Parser) Error!NodeIndex { if (try p.labeledStmt()) |some| return some; if (try p.compoundStmt(false, null)) |some| return some; - if (p.eatToken(.keyword_if)) |_| { + if (p.eatToken(.keyword_if)) |kw_if| { const l_paren = try p.expectToken(.l_paren); const cond_tok = p.tok_i; var cond = try p.expr(); @@ -4160,14 +4293,16 @@ fn stmt(p: *Parser) Error!NodeIndex { return try p.addNode(.{ .tag = .if_then_else_stmt, .data = .{ .if3 = .{ .cond = cond.node, .body = (try p.addList(&.{ then, @"else" })).start } }, + .loc = @enumFromInt(kw_if), }) else return try p.addNode(.{ .tag = .if_then_stmt, .data = .{ .bin = .{ .lhs = cond.node, .rhs = then } }, + .loc = @enumFromInt(kw_if), }); } - if (p.eatToken(.keyword_switch)) |_| { + if (p.eatToken(.keyword_switch)) |kw_switch| { const l_paren = try p.expectToken(.l_paren); const cond_tok = p.tok_i; var cond = try p.expr(); @@ -4197,9 +4332,10 @@ fn stmt(p: *Parser) Error!NodeIndex { return try p.addNode(.{ .tag = .switch_stmt, .data = .{ .bin = .{ .lhs = cond.node, .rhs = body } }, + .loc = @enumFromInt(kw_switch), }); } - if (p.eatToken(.keyword_while)) |_| { + if (p.eatToken(.keyword_while)) |kw_while| { const l_paren = try p.expectToken(.l_paren); const cond_tok = p.tok_i; var cond = try p.expr(); @@ -4221,9 +4357,10 @@ fn stmt(p: *Parser) Error!NodeIndex { return try p.addNode(.{ .tag = .while_stmt, .data = .{ .bin = .{ .lhs = cond.node, .rhs = body } }, + .loc = @enumFromInt(kw_while), }); } - if (p.eatToken(.keyword_do)) |_| { + if (p.eatToken(.keyword_do)) |kw_do| { const body = body: { const old_loop = p.in_loop; p.in_loop = true; @@ -4248,9 +4385,10 @@ fn stmt(p: *Parser) Error!NodeIndex { return try p.addNode(.{ .tag = .do_while_stmt, .data = .{ .bin = .{ .lhs = cond.node, .rhs = body } }, + .loc = @enumFromInt(kw_do), }); } - if (p.eatToken(.keyword_for)) |_| { + if (p.eatToken(.keyword_for)) |kw_for| { try p.syms.pushScope(p); defer p.syms.popScope(); const decl_buf_top = p.decl_buf.items.len; @@ -4301,16 +4439,22 @@ fn stmt(p: *Parser) Error!NodeIndex { return try p.addNode(.{ .tag = .for_decl_stmt, .data = .{ .range = .{ .start = start, .end = end } }, + .loc = @enumFromInt(kw_for), }); } else if (init.node == .none and cond.node == .none and incr.node == .none) { return try p.addNode(.{ .tag = .forever_stmt, .data = .{ .un = body }, + .loc = @enumFromInt(kw_for), }); - } else return try p.addNode(.{ .tag = .for_stmt, .data = .{ .if3 = .{ - .cond = body, - .body = (try p.addList(&.{ init.node, cond.node, incr.node })).start, - } } }); + } else return try p.addNode(.{ + .tag = .for_stmt, + .data = .{ .if3 = .{ + .cond = body, + .body = (try p.addList(&.{ init.node, cond.node, incr.node })).start, + } }, + .loc = @enumFromInt(kw_for), + }); } if (p.eatToken(.keyword_goto)) |goto_tok| { if (p.eatToken(.asterisk)) |_| { @@ -4338,7 +4482,7 @@ fn stmt(p: *Parser) Error!NodeIndex { } } - try e.un(p, .computed_goto_stmt); + try e.un(p, .computed_goto_stmt, goto_tok); _ = try p.expectToken(.semicolon); return e.node; } @@ -4351,17 +4495,18 @@ fn stmt(p: *Parser) Error!NodeIndex { return try p.addNode(.{ .tag = .goto_stmt, .data = .{ .decl_ref = name_tok }, + .loc = @enumFromInt(goto_tok), }); } if (p.eatToken(.keyword_continue)) |cont| { if (!p.in_loop) try p.errTok(.continue_not_in_loop, cont); _ = try p.expectToken(.semicolon); - return try p.addNode(.{ .tag = .continue_stmt, .data = undefined }); + return try p.addNode(.{ .tag = .continue_stmt, .data = undefined, .loc = @enumFromInt(cont) }); } if (p.eatToken(.keyword_break)) |br| { if (!p.in_loop and p.@"switch" == null) try p.errTok(.break_not_in_loop_or_switch, br); _ = try p.expectToken(.semicolon); - return try p.addNode(.{ .tag = .break_stmt, .data = undefined }); + return try p.addNode(.{ .tag = .break_stmt, .data = undefined, .loc = @enumFromInt(br) }); } if (try p.returnStmt()) |some| return some; if (try p.assembly(.stmt)) |some| return some; @@ -4380,8 +4525,8 @@ fn stmt(p: *Parser) Error!NodeIndex { defer p.attr_buf.len = attr_buf_top; try p.attributeSpecifier(); - if (p.eatToken(.semicolon)) |_| { - var null_node: Tree.Node = .{ .tag = .null_stmt, .data = undefined }; + if (p.eatToken(.semicolon)) |semicolon| { + var null_node: Tree.Node = .{ .tag = .null_stmt, .data = undefined, .loc = @enumFromInt(semicolon) }; null_node.ty = try Attribute.applyStatementAttributes(p, null_node.ty, expr_start, attr_buf_top); return p.addNode(null_node); } @@ -4422,6 +4567,7 @@ fn labeledStmt(p: *Parser) Error!?NodeIndex { var labeled_stmt = Tree.Node{ .tag = .labeled_stmt, .data = .{ .decl = .{ .name = name_tok, .node = try p.labelableStmt() } }, + .loc = @enumFromInt(name_tok), }; labeled_stmt.ty = try Attribute.applyLabelAttributes(p, labeled_stmt.ty, attr_buf_top); return try p.addNode(labeled_stmt); @@ -4464,9 +4610,11 @@ fn labeledStmt(p: *Parser) Error!?NodeIndex { if (second_item) |some| return try p.addNode(.{ .tag = .case_range_stmt, .data = .{ .if3 = .{ .cond = s, .body = (try p.addList(&.{ first_item.node, some.node })).start } }, + .loc = @enumFromInt(case), }) else return try p.addNode(.{ .tag = .case_stmt, .data = .{ .bin = .{ .lhs = first_item.node, .rhs = s } }, + .loc = @enumFromInt(case), }); } else if (p.eatToken(.keyword_default)) |default| { _ = try p.expectToken(.colon); @@ -4474,6 +4622,7 @@ fn labeledStmt(p: *Parser) Error!?NodeIndex { const node = try p.addNode(.{ .tag = .default_stmt, .data = .{ .un = s }, + .loc = @enumFromInt(default), }); const @"switch" = p.@"switch" orelse { try p.errStr(.case_not_in_switch, default, "default"); @@ -4492,7 +4641,7 @@ fn labeledStmt(p: *Parser) Error!?NodeIndex { fn labelableStmt(p: *Parser) Error!NodeIndex { if (p.tok_ids[p.tok_i] == .r_brace) { try p.err(.label_compound_end); - return p.addNode(.{ .tag = .null_stmt, .data = undefined }); + return p.addNode(.{ .tag = .null_stmt, .data = undefined, .loc = @enumFromInt(p.tok_i) }); } return p.stmt(); } @@ -4557,6 +4706,7 @@ fn compoundStmt(p: *Parser, is_fn_body: bool, stmt_expr_state: ?*StmtExprState) else => {}, } } + const r_brace = p.tok_i - 1; if (noreturn_index) |some| { // if new labels were defined we cannot be certain that the code is unreachable @@ -4580,7 +4730,7 @@ fn compoundStmt(p: *Parser, is_fn_body: bool, stmt_expr_state: ?*StmtExprState) try p.errStr(.func_does_not_return, p.tok_i - 1, func_name); } } - try p.decl_buf.append(try p.addNode(.{ .tag = .implicit_return, .ty = p.func.ty.?.returnType(), .data = .{ .return_zero = return_zero } })); + try p.decl_buf.append(try p.addNode(.{ .tag = .implicit_return, .ty = p.func.ty.?.returnType(), .data = .{ .return_zero = return_zero }, .loc = @enumFromInt(r_brace) })); } if (p.func.ident) |some| try p.decl_buf.insert(decl_buf_top, some.node); if (p.func.pretty_ident) |some| try p.decl_buf.insert(decl_buf_top, some.node); @@ -4588,13 +4738,14 @@ fn compoundStmt(p: *Parser, is_fn_body: bool, stmt_expr_state: ?*StmtExprState) var node: Tree.Node = .{ .tag = .compound_stmt_two, - .data = .{ .bin = .{ .lhs = .none, .rhs = .none } }, + .data = .{ .two = .{ .none, .none } }, + .loc = @enumFromInt(l_brace), }; const statements = p.decl_buf.items[decl_buf_top..]; switch (statements.len) { 0 => {}, - 1 => node.data = .{ .bin = .{ .lhs = statements[0], .rhs = .none } }, - 2 => node.data = .{ .bin = .{ .lhs = statements[0], .rhs = statements[1] } }, + 1 => node.data = .{ .two = .{ statements[0], .none } }, + 2 => node.data = .{ .two = .{ statements[0], statements[1] } }, else => { node.tag = .compound_stmt; node.data = .{ .range = try p.addList(statements) }; @@ -4618,8 +4769,8 @@ fn nodeIsNoreturn(p: *Parser, node: NodeIndex) NoreturnKind { }, .compound_stmt_two => { const data = p.nodes.items(.data)[@intFromEnum(node)]; - const lhs_type = if (data.bin.lhs != .none) p.nodeIsNoreturn(data.bin.lhs) else .no; - const rhs_type = if (data.bin.rhs != .none) p.nodeIsNoreturn(data.bin.rhs) else .no; + const lhs_type = if (data.two[0] != .none) p.nodeIsNoreturn(data.two[0]) else .no; + const rhs_type = if (data.two[1] != .none) p.nodeIsNoreturn(data.two[1]) else .no; if (lhs_type == .complex or rhs_type == .complex) return .complex; if (lhs_type == .yes or rhs_type == .yes) return .yes; return .no; @@ -4704,6 +4855,8 @@ fn nextStmt(p: *Parser, l_brace: TokenIndex) !void { .keyword_int, .keyword_long, .keyword_signed, + .keyword_signed1, + .keyword_signed2, .keyword_unsigned, .keyword_float, .keyword_double, @@ -4743,17 +4896,17 @@ fn returnStmt(p: *Parser) Error!?NodeIndex { if (e.node == .none) { if (!ret_ty.is(.void)) try p.errStr(.func_should_return, ret_tok, p.tokSlice(p.func.name)); - return try p.addNode(.{ .tag = .return_stmt, .data = .{ .un = e.node } }); + return try p.addNode(.{ .tag = .return_stmt, .data = .{ .un = e.node }, .loc = @enumFromInt(ret_tok) }); } else if (ret_ty.is(.void)) { try p.errStr(.void_func_returns_value, e_tok, p.tokSlice(p.func.name)); - return try p.addNode(.{ .tag = .return_stmt, .data = .{ .un = e.node } }); + return try p.addNode(.{ .tag = .return_stmt, .data = .{ .un = e.node }, .loc = @enumFromInt(ret_tok) }); } try e.lvalConversion(p); try e.coerce(p, ret_ty, e_tok, .ret); try e.saveValue(p); - return try p.addNode(.{ .tag = .return_stmt, .data = .{ .un = e.node } }); + return try p.addNode(.{ .tag = .return_stmt, .data = .{ .un = e.node }, .loc = @enumFromInt(ret_tok) }); } // ====== expressions ====== @@ -4802,7 +4955,6 @@ const CallExpr = union(enum) { } fn shouldPromoteVarArg(self: CallExpr, arg_idx: u32) bool { - @setEvalBranchQuota(2000); return switch (self) { .standard => true, .builtin => |builtin| switch (builtin.tag) { @@ -4810,10 +4962,13 @@ const CallExpr = union(enum) { Builtin.tagFromName("__va_start").?, Builtin.tagFromName("va_start").?, => arg_idx != 1, - Builtin.tagFromName("__builtin_complex").?, Builtin.tagFromName("__builtin_add_overflow").?, - Builtin.tagFromName("__builtin_sub_overflow").?, + Builtin.tagFromName("__builtin_complex").?, + Builtin.tagFromName("__builtin_isinf").?, + Builtin.tagFromName("__builtin_isinf_sign").?, Builtin.tagFromName("__builtin_mul_overflow").?, + Builtin.tagFromName("__builtin_isnan").?, + Builtin.tagFromName("__builtin_sub_overflow").?, => false, else => true, }, @@ -4827,7 +4982,6 @@ const CallExpr = union(enum) { } fn checkVarArg(self: CallExpr, p: *Parser, first_after: TokenIndex, param_tok: TokenIndex, arg: *Result, arg_idx: u32) !void { - @setEvalBranchQuota(10_000); if (self == .standard) return; const builtin_tok = p.nodes.items(.data)[@intFromEnum(self.builtin.node)].decl.name; @@ -4852,13 +5006,15 @@ const CallExpr = union(enum) { /// of arguments, `paramCountOverride` is used to tell us how many arguments we should actually expect to see for /// these custom-typechecked functions. fn paramCountOverride(self: CallExpr) ?u32 { - @setEvalBranchQuota(10_000); return switch (self) { .standard => null, .builtin => |builtin| switch (builtin.tag) { Builtin.tagFromName("__c11_atomic_thread_fence").?, Builtin.tagFromName("__c11_atomic_signal_fence").?, Builtin.tagFromName("__c11_atomic_is_lock_free").?, + Builtin.tagFromName("__builtin_isinf").?, + Builtin.tagFromName("__builtin_isinf_sign").?, + Builtin.tagFromName("__builtin_isnan").?, => 1, Builtin.tagFromName("__builtin_complex").?, @@ -4903,7 +5059,6 @@ const CallExpr = union(enum) { } fn returnType(self: CallExpr, p: *Parser, callable_ty: Type) Type { - @setEvalBranchQuota(6000); return switch (self) { .standard => callable_ty.returnType(), .builtin => |builtin| switch (builtin.tag) { @@ -4977,12 +5132,12 @@ const CallExpr = union(enum) { var call_node: Tree.Node = .{ .tag = .call_expr_one, .ty = ret_ty, - .data = .{ .bin = .{ .lhs = func_node, .rhs = .none } }, + .data = .{ .two = .{ func_node, .none } }, }; const args = p.list_buf.items[list_buf_top..]; switch (arg_count) { 0 => {}, - 1 => call_node.data.bin.rhs = args[1], // args[0] == func.node + 1 => call_node.data.two[1] = args[1], // args[0] == func.node else => { call_node.tag = .call_expr; call_node.data = .{ .range = try p.addList(args) }; @@ -5005,7 +5160,8 @@ const CallExpr = union(enum) { call_node.data = .{ .range = try p.addList(args) }; }, } - return Result{ .node = builtin.node, .ty = ret_ty }; + const val = try evalBuiltin(builtin.tag, p, args[1..]); + return Result{ .node = builtin.node, .ty = ret_ty, .val = val }; }, } } @@ -5016,6 +5172,8 @@ pub const Result = struct { ty: Type = .{ .specifier = .int }, val: Value = .{}, + const invalid: Result = .{ .ty = Type.invalid }; + pub fn str(res: Result, p: *Parser) ![]const u8 { switch (res.val.opt_ref) { .none => return "(none)", @@ -5073,30 +5231,21 @@ pub const Result = struct { .post_inc_expr, .post_dec_expr, => return, - .call_expr_one => { - const fn_ptr = p.nodes.items(.data)[@intFromEnum(cur_node)].bin.lhs; - const fn_ty = p.nodes.items(.ty)[@intFromEnum(fn_ptr)].elemType(); - const cast_info = p.nodes.items(.data)[@intFromEnum(fn_ptr)].cast.operand; - const decl_ref = p.nodes.items(.data)[@intFromEnum(cast_info)].decl_ref; - if (fn_ty.hasAttribute(.nodiscard)) try p.errStr(.nodiscard_unused, expr_start, p.tokSlice(decl_ref)); - if (fn_ty.hasAttribute(.warn_unused_result)) try p.errStr(.warn_unused_result, expr_start, p.tokSlice(decl_ref)); - return; - }, - .call_expr => { - const fn_ptr = p.data.items[p.nodes.items(.data)[@intFromEnum(cur_node)].range.start]; - const fn_ty = p.nodes.items(.ty)[@intFromEnum(fn_ptr)].elemType(); - const cast_info = p.nodes.items(.data)[@intFromEnum(fn_ptr)].cast.operand; - const decl_ref = p.nodes.items(.data)[@intFromEnum(cast_info)].decl_ref; - if (fn_ty.hasAttribute(.nodiscard)) try p.errStr(.nodiscard_unused, expr_start, p.tokSlice(decl_ref)); - if (fn_ty.hasAttribute(.warn_unused_result)) try p.errStr(.warn_unused_result, expr_start, p.tokSlice(decl_ref)); + .call_expr, .call_expr_one => { + const tmp_tree = p.tmpTree(); + const child_nodes = tmp_tree.childNodes(cur_node); + const fn_ptr = child_nodes[0]; + const call_info = tmp_tree.callableResultUsage(fn_ptr) orelse return; + if (call_info.nodiscard) try p.errStr(.nodiscard_unused, expr_start, p.tokSlice(call_info.tok)); + if (call_info.warn_unused_result) try p.errStr(.warn_unused_result, expr_start, p.tokSlice(call_info.tok)); return; }, .stmt_expr => { const body = p.nodes.items(.data)[@intFromEnum(cur_node)].un; switch (p.nodes.items(.tag)[@intFromEnum(body)]) { .compound_stmt_two => { - const body_stmt = p.nodes.items(.data)[@intFromEnum(body)].bin; - cur_node = if (body_stmt.rhs != .none) body_stmt.rhs else body_stmt.lhs; + const body_stmt = p.nodes.items(.data)[@intFromEnum(body)].two; + cur_node = if (body_stmt[1] != .none) body_stmt[1] else body_stmt[0]; }, .compound_stmt => { const data = p.nodes.items(.data)[@intFromEnum(body)]; @@ -5112,29 +5261,31 @@ pub const Result = struct { try p.errTok(.unused_value, expr_start); } - fn boolRes(lhs: *Result, p: *Parser, tag: Tree.Tag, rhs: Result) !void { + fn boolRes(lhs: *Result, p: *Parser, tag: Tree.Tag, rhs: Result, tok_i: TokenIndex) !void { if (lhs.val.opt_ref == .null) { lhs.val = Value.zero; } if (lhs.ty.specifier != .invalid) { lhs.ty = Type.int; } - return lhs.bin(p, tag, rhs); + return lhs.bin(p, tag, rhs, tok_i); } - fn bin(lhs: *Result, p: *Parser, tag: Tree.Tag, rhs: Result) !void { + fn bin(lhs: *Result, p: *Parser, tag: Tree.Tag, rhs: Result, tok_i: TokenIndex) !void { lhs.node = try p.addNode(.{ .tag = tag, .ty = lhs.ty, .data = .{ .bin = .{ .lhs = lhs.node, .rhs = rhs.node } }, + .loc = @enumFromInt(tok_i), }); } - fn un(operand: *Result, p: *Parser, tag: Tree.Tag) Error!void { + fn un(operand: *Result, p: *Parser, tag: Tree.Tag, tok_i: TokenIndex) Error!void { operand.node = try p.addNode(.{ .tag = tag, .ty = operand.ty, .data = .{ .un = operand.node }, + .loc = @enumFromInt(tok_i), }); } @@ -5368,10 +5519,14 @@ pub const Result = struct { fn lvalConversion(res: *Result, p: *Parser) Error!void { if (res.ty.isFunc()) { - const elem_ty = try p.arena.create(Type); - elem_ty.* = res.ty; - res.ty.specifier = .pointer; - res.ty.data = .{ .sub_type = elem_ty }; + if (res.ty.isInvalidFunc()) { + res.ty = .{ .specifier = .invalid }; + } else { + const elem_ty = try p.arena.create(Type); + elem_ty.* = res.ty; + res.ty.specifier = .pointer; + res.ty.data = .{ .sub_type = elem_ty }; + } try res.implicitCast(p, .function_to_pointer); } else if (res.ty.isArray()) { res.val = .{}; @@ -5455,7 +5610,14 @@ pub const Result = struct { try res.implicitCast(p, .complex_float_to_complex_int); } } else if (!res.ty.eql(int_ty, p.comp, true)) { - try res.val.intCast(int_ty, p.comp); + const old_val = res.val; + const value_change_kind = try res.val.intCast(int_ty, p.comp); + switch (value_change_kind) { + .none => {}, + .truncated => try p.errStr(.int_value_changed, tok, try p.valueChangedStr(res, old_val, int_ty)), + .sign_changed => try p.errStr(.sign_conversion, tok, try p.typePairStrExtra(res.ty, " to ", int_ty)), + } + const old_real = res.ty.isReal(); const new_real = int_ty.isReal(); if (old_real and new_real) { @@ -5486,8 +5648,8 @@ pub const Result = struct { .none => return p.errStr(.float_to_int, tok, try p.typePairStrExtra(res.ty, " to ", int_ty)), .out_of_range => return p.errStr(.float_out_of_range, tok, try p.typePairStrExtra(res.ty, " to ", int_ty)), .overflow => return p.errStr(.float_overflow_conversion, tok, try p.typePairStrExtra(res.ty, " to ", int_ty)), - .nonzero_to_zero => return p.errStr(.float_zero_conversion, tok, try p.floatValueChangedStr(res, old_value, int_ty)), - .value_changed => return p.errStr(.float_value_changed, tok, try p.floatValueChangedStr(res, old_value, int_ty)), + .nonzero_to_zero => return p.errStr(.float_zero_conversion, tok, try p.valueChangedStr(res, old_value, int_ty)), + .value_changed => return p.errStr(.float_value_changed, tok, try p.valueChangedStr(res, old_value, int_ty)), } } @@ -5555,7 +5717,7 @@ pub const Result = struct { res.ty = ptr_ty; try res.implicitCast(p, .bool_to_pointer); } else if (res.ty.isInt()) { - try res.val.intCast(ptr_ty, p.comp); + _ = try res.val.intCast(ptr_ty, p.comp); res.ty = ptr_ty; try res.implicitCast(p, .int_to_pointer); } @@ -5620,16 +5782,14 @@ pub const Result = struct { // if either is a float cast to that type if (a.ty.isFloat() or b.ty.isFloat()) { - const float_types = [7][2]Type.Specifier{ + const float_types = [6][2]Type.Specifier{ .{ .complex_long_double, .long_double }, .{ .complex_float128, .float128 }, - .{ .complex_float80, .float80 }, .{ .complex_double, .double }, .{ .complex_float, .float }, // No `_Complex __fp16` type .{ .invalid, .fp16 }, - // No `_Complex _Float16` - .{ .invalid, .float16 }, + .{ .complex_float16, .float16 }, }; const a_spec = a.ty.canonicalize(.standard).specifier; const b_spec = b.ty.canonicalize(.standard).specifier; @@ -5647,7 +5807,7 @@ pub const Result = struct { if (try a.floatConversion(b, a_spec, b_spec, p, float_types[3])) return; if (try a.floatConversion(b, a_spec, b_spec, p, float_types[4])) return; if (try a.floatConversion(b, a_spec, b_spec, p, float_types[5])) return; - if (try a.floatConversion(b, a_spec, b_spec, p, float_types[6])) return; + unreachable; } if (a.ty.eql(b.ty, p.comp, true)) { @@ -5875,6 +6035,10 @@ pub const Result = struct { if (to.is(.bool)) { res.val.boolCast(p.comp); } else if (old_float and new_int) { + if (to.hasIncompleteSize()) { + try p.errStr(.cast_to_incomplete_type, l_paren, try p.typeStr(to)); + return error.ParsingFailed; + } // Explicit cast, no conversion warning _ = try res.val.floatToInt(to, p.comp); } else if (new_float and old_int) { @@ -5886,7 +6050,7 @@ pub const Result = struct { try p.errStr(.cast_to_incomplete_type, l_paren, try p.typeStr(to)); return error.ParsingFailed; } - try res.val.intCast(to, p.comp); + _ = try res.val.intCast(to, p.comp); } } else if (to.get(.@"union")) |union_ty| { if (union_ty.data.record.hasFieldOfType(res.ty, p.comp)) { @@ -5918,12 +6082,13 @@ pub const Result = struct { .tag = .explicit_cast, .ty = res.ty, .data = .{ .cast = .{ .operand = res.node, .kind = cast_kind } }, + .loc = @enumFromInt(l_paren), }); } fn intFitsInType(res: Result, p: *Parser, ty: Type) !bool { - const max_int = try Value.int(ty.maxInt(p.comp), p.comp); - const min_int = try Value.int(ty.minInt(p.comp), p.comp); + const max_int = try Value.maxInt(ty, p.comp); + const min_int = try Value.minInt(ty, p.comp); return res.val.compare(.lte, max_int, p.comp) and (res.ty.isUnsignedInt(p.comp) or res.val.compare(.gte, min_int, p.comp)); } @@ -6091,7 +6256,7 @@ fn expr(p: *Parser) Error!Result { var err_start = p.comp.diagnostics.list.items.len; var lhs = try p.assignExpr(); if (p.tok_ids[p.tok_i] == .comma) try lhs.expect(p); - while (p.eatToken(.comma)) |_| { + while (p.eatToken(.comma)) |comma| { try lhs.maybeWarnUnused(p, expr_start, err_start); expr_start = p.tok_i; err_start = p.comp.diagnostics.list.items.len; @@ -6101,7 +6266,7 @@ fn expr(p: *Parser) Error!Result { try rhs.lvalConversion(p); lhs.val = rhs.val; lhs.ty = rhs.ty; - try lhs.bin(p, .comma_expr, rhs); + try lhs.bin(p, .comma_expr, rhs, comma); } return lhs; } @@ -6183,7 +6348,7 @@ fn assignExpr(p: *Parser) Error!Result { } } _ = try lhs_copy.adjustTypes(tok, &rhs, p, if (tag == .mod_assign_expr) .integer else .arithmetic); - try lhs.bin(p, tag, rhs); + try lhs.bin(p, tag, rhs, bit_or.?); return lhs; }, .sub_assign_expr, @@ -6194,7 +6359,7 @@ fn assignExpr(p: *Parser) Error!Result { } else { _ = try lhs_copy.adjustTypes(tok, &rhs, p, .arithmetic); } - try lhs.bin(p, tag, rhs); + try lhs.bin(p, tag, rhs, bit_or.?); return lhs; }, .shl_assign_expr, @@ -6204,7 +6369,7 @@ fn assignExpr(p: *Parser) Error!Result { .bit_or_assign_expr, => { _ = try lhs_copy.adjustTypes(tok, &rhs, p, .integer); - try lhs.bin(p, tag, rhs); + try lhs.bin(p, tag, rhs, bit_or.?); return lhs; }, else => unreachable, @@ -6212,7 +6377,7 @@ fn assignExpr(p: *Parser) Error!Result { try rhs.coerce(p, lhs.ty, tok, .assign); - try lhs.bin(p, tag, rhs); + try lhs.bin(p, tag, rhs, bit_or.?); return lhs; } @@ -6280,6 +6445,7 @@ fn condExpr(p: *Parser) Error!Result { .tag = .binary_cond_expr, .ty = cond.ty, .data = .{ .if3 = .{ .cond = cond.node, .body = (try p.addList(&.{ cond_then.node, then_expr.node })).start } }, + .loc = @enumFromInt(cond_tok), }); return cond; } @@ -6305,6 +6471,7 @@ fn condExpr(p: *Parser) Error!Result { .tag = .cond_expr, .ty = cond.ty, .data = .{ .if3 = .{ .cond = cond.node, .body = (try p.addList(&.{ then_expr.node, else_expr.node })).start } }, + .loc = @enumFromInt(cond_tok), }); return cond; } @@ -6324,8 +6491,10 @@ fn lorExpr(p: *Parser) Error!Result { if (try lhs.adjustTypes(tok, &rhs, p, .boolean_logic)) { const res = lhs.val.toBool(p.comp) or rhs.val.toBool(p.comp); lhs.val = Value.fromBool(res); + } else { + lhs.val.boolCast(p.comp); } - try lhs.boolRes(p, .bool_or_expr, rhs); + try lhs.boolRes(p, .bool_or_expr, rhs, tok); } return lhs; } @@ -6345,8 +6514,10 @@ fn landExpr(p: *Parser) Error!Result { if (try lhs.adjustTypes(tok, &rhs, p, .boolean_logic)) { const res = lhs.val.toBool(p.comp) and rhs.val.toBool(p.comp); lhs.val = Value.fromBool(res); + } else { + lhs.val.boolCast(p.comp); } - try lhs.boolRes(p, .bool_and_expr, rhs); + try lhs.boolRes(p, .bool_and_expr, rhs, tok); } return lhs; } @@ -6362,7 +6533,7 @@ fn orExpr(p: *Parser) Error!Result { if (try lhs.adjustTypes(tok, &rhs, p, .integer)) { lhs.val = try lhs.val.bitOr(rhs.val, p.comp); } - try lhs.bin(p, .bit_or_expr, rhs); + try lhs.bin(p, .bit_or_expr, rhs, tok); } return lhs; } @@ -6378,7 +6549,7 @@ fn xorExpr(p: *Parser) Error!Result { if (try lhs.adjustTypes(tok, &rhs, p, .integer)) { lhs.val = try lhs.val.bitXor(rhs.val, p.comp); } - try lhs.bin(p, .bit_xor_expr, rhs); + try lhs.bin(p, .bit_xor_expr, rhs, tok); } return lhs; } @@ -6394,7 +6565,7 @@ fn andExpr(p: *Parser) Error!Result { if (try lhs.adjustTypes(tok, &rhs, p, .integer)) { lhs.val = try lhs.val.bitAnd(rhs.val, p.comp); } - try lhs.bin(p, .bit_and_expr, rhs); + try lhs.bin(p, .bit_and_expr, rhs, tok); } return lhs; } @@ -6414,8 +6585,10 @@ fn eqExpr(p: *Parser) Error!Result { const op: std.math.CompareOperator = if (tag == .equal_expr) .eq else .neq; const res = lhs.val.compare(op, rhs.val, p.comp); lhs.val = Value.fromBool(res); + } else { + lhs.val.boolCast(p.comp); } - try lhs.boolRes(p, tag, rhs); + try lhs.boolRes(p, tag, rhs, ne.?); } return lhs; } @@ -6443,8 +6616,10 @@ fn compExpr(p: *Parser) Error!Result { }; const res = lhs.val.compare(op, rhs.val, p.comp); lhs.val = Value.fromBool(res); + } else { + lhs.val.boolCast(p.comp); } - try lhs.boolRes(p, tag, rhs); + try lhs.boolRes(p, tag, rhs, ge.?); } return lhs; } @@ -6474,7 +6649,7 @@ fn shiftExpr(p: *Parser) Error!Result { lhs.val = try lhs.val.shr(rhs.val, lhs.ty, p.comp); } } - try lhs.bin(p, tag, rhs); + try lhs.bin(p, tag, rhs, shr.?); } return lhs; } @@ -6504,7 +6679,7 @@ fn addExpr(p: *Parser) Error!Result { try p.errStr(.ptr_arithmetic_incomplete, minus.?, try p.typeStr(lhs_ty.elemType())); lhs.ty = Type.invalid; } - try lhs.bin(p, tag, rhs); + try lhs.bin(p, tag, rhs, minus.?); } return lhs; } @@ -6538,7 +6713,7 @@ fn mulExpr(p: *Parser) Error!Result { lhs.ty.signedness(p.comp) != .unsigned) try p.errOverflow(mul.?, lhs); } else if (div != null) { if (try lhs.val.div(lhs.val, rhs.val, lhs.ty, p.comp) and - lhs.ty.signedness(p.comp) != .unsigned) try p.errOverflow(mul.?, lhs); + lhs.ty.signedness(p.comp) != .unsigned) try p.errOverflow(div.?, lhs); } else { var res = try Value.rem(lhs.val, rhs.val, lhs.ty, p.comp); if (res.opt_ref == .none) { @@ -6554,7 +6729,7 @@ fn mulExpr(p: *Parser) Error!Result { } } - try lhs.bin(p, tag, rhs); + try lhs.bin(p, tag, rhs, percent.?); } return lhs; } @@ -6573,7 +6748,7 @@ fn removeUnusedWarningForTok(p: *Parser, last_expr_tok: TokenIndex) void { } /// castExpr -/// : '(' compoundStmt ')' +/// : '(' compoundStmt ')' suffixExpr* /// | '(' typeName ')' castExpr /// | '(' typeName ')' '{' initializerItems '}' /// | __builtin_choose_expr '(' integerConstExpr ',' assignExpr ',' assignExpr ')' @@ -6584,6 +6759,7 @@ fn removeUnusedWarningForTok(p: *Parser, last_expr_tok: TokenIndex) void { fn castExpr(p: *Parser) Error!Result { if (p.eatToken(.l_paren)) |l_paren| cast_expr: { if (p.tok_ids[p.tok_i] == .l_brace) { + const tok = p.tok_i; try p.err(.gnu_statement_expression); if (p.func.ty == null) { try p.err(.stmt_expr_not_allowed_file_scope); @@ -6599,7 +6775,12 @@ fn castExpr(p: *Parser) Error!Result { .val = stmt_expr_state.last_expr_res.val, }; try p.expectClosing(l_paren, .r_paren); - try res.un(p, .stmt_expr); + try res.un(p, .stmt_expr, tok); + while (true) { + const suffix = try p.suffixExpr(res); + if (suffix.empty(p)) break; + res = suffix; + } return res; } const ty = (try p.typeName()) orelse { @@ -6634,23 +6815,26 @@ fn castExpr(p: *Parser) Error!Result { } fn typesCompatible(p: *Parser) Error!Result { + const builtin_tok = p.tok_i; p.tok_i += 1; const l_paren = try p.expectToken(.l_paren); + const first_tok = p.tok_i; const first = (try p.typeName()) orelse { try p.err(.expected_type); p.skipTo(.r_paren); return error.ParsingFailed; }; - const lhs = try p.addNode(.{ .tag = .invalid, .ty = first, .data = undefined }); + const lhs = try p.addNode(.{ .tag = .invalid, .ty = first, .data = undefined, .loc = @enumFromInt(first_tok) }); _ = try p.expectToken(.comma); + const second_tok = p.tok_i; const second = (try p.typeName()) orelse { try p.err(.expected_type); p.skipTo(.r_paren); return error.ParsingFailed; }; - const rhs = try p.addNode(.{ .tag = .invalid, .ty = second, .data = undefined }); + const rhs = try p.addNode(.{ .tag = .invalid, .ty = second, .data = undefined, .loc = @enumFromInt(second_tok) }); try p.expectClosing(l_paren, .r_paren); @@ -6665,10 +6849,15 @@ fn typesCompatible(p: *Parser) Error!Result { const res = Result{ .val = Value.fromBool(compatible), - .node = try p.addNode(.{ .tag = .builtin_types_compatible_p, .ty = Type.int, .data = .{ .bin = .{ - .lhs = lhs, - .rhs = rhs, - } } }), + .node = try p.addNode(.{ + .tag = .builtin_types_compatible_p, + .ty = Type.int, + .data = .{ .bin = .{ + .lhs = lhs, + .rhs = rhs, + } }, + .loc = @enumFromInt(builtin_tok), + }), }; try p.value_map.put(res.node, res.val); return res; @@ -6786,11 +6975,11 @@ fn offsetofMemberDesignator(p: *Parser, base_ty: Type, want_bits: bool) Error!Re errdefer p.skipTo(.r_paren); const base_field_name_tok = try p.expectIdentifier(); const base_field_name = try StrInt.intern(p.comp, p.tokSlice(base_field_name_tok)); - try p.validateFieldAccess(base_ty, base_ty, base_field_name_tok, base_field_name); + const base_record_ty = base_ty.getRecord().?; + try p.validateFieldAccess(base_record_ty, base_ty, base_field_name_tok, base_field_name); const base_node = try p.addNode(.{ .tag = .default_init_expr, .ty = base_ty, .data = undefined }); var cur_offset: u64 = 0; - const base_record_ty = base_ty.canonicalize(.standard); var lhs = try p.fieldAccessExtra(base_node, base_record_ty, base_field_name, false, &cur_offset); var total_offset = cur_offset; @@ -6800,13 +6989,12 @@ fn offsetofMemberDesignator(p: *Parser, base_ty: Type, want_bits: bool) Error!Re const field_name_tok = try p.expectIdentifier(); const field_name = try StrInt.intern(p.comp, p.tokSlice(field_name_tok)); - if (!lhs.ty.isRecord()) { + const lhs_record_ty = lhs.ty.getRecord() orelse { try p.errStr(.offsetof_ty, field_name_tok, try p.typeStr(lhs.ty)); return error.ParsingFailed; - } - try p.validateFieldAccess(lhs.ty, lhs.ty, field_name_tok, field_name); - const record_ty = lhs.ty.canonicalize(.standard); - lhs = try p.fieldAccessExtra(lhs.node, record_ty, field_name, false, &cur_offset); + }; + try p.validateFieldAccess(lhs_record_ty, lhs.ty, field_name_tok, field_name); + lhs = try p.fieldAccessExtra(lhs.node, lhs_record_ty, field_name, false, &cur_offset); total_offset += cur_offset; }, .l_bracket => { @@ -6824,11 +7012,14 @@ fn offsetofMemberDesignator(p: *Parser, base_ty: Type, want_bits: bool) Error!Re try ptr.lvalConversion(p); try index.lvalConversion(p); - if (!index.ty.isInt()) try p.errTok(.invalid_index, l_bracket_tok); - try p.checkArrayBounds(index, lhs, l_bracket_tok); + if (index.ty.isInt()) { + try p.checkArrayBounds(index, lhs, l_bracket_tok); + } else { + try p.errTok(.invalid_index, l_bracket_tok); + } try index.saveValue(p); - try ptr.bin(p, .array_access_expr, index); + try ptr.bin(p, .array_access_expr, index, l_bracket_tok); lhs = ptr; }, else => break, @@ -6867,6 +7058,7 @@ fn unExpr(p: *Parser) Error!Result { .tag = .addr_of_label, .data = .{ .decl_ref = name_tok }, .ty = result_ty, + .loc = @enumFromInt(address_tok), }), .ty = result_ty, }; @@ -6886,19 +7078,21 @@ fn unExpr(p: *Parser) Error!Result { { if (tree.isBitfield(member_node)) try p.errTok(.addr_of_bitfield, tok); } - if (!tree.isLval(operand.node)) { + if (!tree.isLval(operand.node) and !operand.ty.is(.invalid)) { try p.errTok(.addr_of_rvalue, tok); } if (operand.ty.qual.register) try p.errTok(.addr_of_register, tok); - const elem_ty = try p.arena.create(Type); - elem_ty.* = operand.ty; - operand.ty = Type{ - .specifier = .pointer, - .data = .{ .sub_type = elem_ty }, - }; + if (!operand.ty.is(.invalid)) { + const elem_ty = try p.arena.create(Type); + elem_ty.* = operand.ty; + operand.ty = Type{ + .specifier = .pointer, + .data = .{ .sub_type = elem_ty }, + }; + } try operand.saveValue(p); - try operand.un(p, .addr_of_expr); + try operand.un(p, .addr_of_expr, tok); return operand; }, .asterisk => { @@ -6917,7 +7111,7 @@ fn unExpr(p: *Parser) Error!Result { try p.errStr(.deref_incomplete_ty_ptr, asterisk_loc, try p.typeStr(operand.ty)); } operand.ty.qual = .{}; - try operand.un(p, .deref_expr); + try operand.un(p, .deref_expr, tok); return operand; }, .plus => { @@ -6943,12 +7137,12 @@ fn unExpr(p: *Parser) Error!Result { try p.errStr(.invalid_argument_un, tok, try p.typeStr(operand.ty)); try operand.usualUnaryConversion(p, tok); - if (operand.val.is(.int, p.comp) or operand.val.is(.float, p.comp)) { + if (operand.val.isArithmetic(p.comp)) { _ = try operand.val.sub(Value.zero, operand.val, operand.ty, p.comp); } else { operand.val = .{}; } - try operand.un(p, .negate_expr); + try operand.un(p, .negate_expr, tok); return operand; }, .plus_plus => { @@ -6974,7 +7168,7 @@ fn unExpr(p: *Parser) Error!Result { operand.val = .{}; } - try operand.un(p, .pre_inc_expr); + try operand.un(p, .pre_inc_expr, tok); return operand; }, .minus_minus => { @@ -7000,7 +7194,7 @@ fn unExpr(p: *Parser) Error!Result { operand.val = .{}; } - try operand.un(p, .pre_dec_expr); + try operand.un(p, .pre_dec_expr, tok); return operand; }, .tilde => { @@ -7016,11 +7210,14 @@ fn unExpr(p: *Parser) Error!Result { } } else if (operand.ty.isComplex()) { try p.errStr(.complex_conj, tok, try p.typeStr(operand.ty)); + if (operand.val.is(.complex, p.comp)) { + operand.val = try operand.val.complexConj(operand.ty, p.comp); + } } else { try p.errStr(.invalid_argument_un, tok, try p.typeStr(operand.ty)); operand.val = .{}; } - try operand.un(p, .bit_not_expr); + try operand.un(p, .bit_not_expr, tok); return operand; }, .bang => { @@ -7045,7 +7242,7 @@ fn unExpr(p: *Parser) Error!Result { } } operand.ty = .{ .specifier = .int }; - try operand.un(p, .bool_not_expr); + try operand.un(p, .bool_not_expr, tok); return operand; }, .keyword_sizeof => { @@ -7089,7 +7286,7 @@ fn unExpr(p: *Parser) Error!Result { res.ty = p.comp.types.size; } } - try res.un(p, .sizeof_expr); + try res.un(p, .sizeof_expr, tok); return res; }, .keyword_alignof, @@ -7127,7 +7324,7 @@ fn unExpr(p: *Parser) Error!Result { try p.errStr(.invalid_alignof, expected_paren, try p.typeStr(res.ty)); res.ty = Type.invalid; } - try res.un(p, .alignof_expr); + try res.un(p, .alignof_expr, tok); return res; }, .keyword_extension => { @@ -7147,15 +7344,18 @@ fn unExpr(p: *Parser) Error!Result { var operand = try p.castExpr(); try operand.expect(p); try operand.lvalConversion(p); + if (operand.ty.is(.invalid)) return Result.invalid; if (!operand.ty.isInt() and !operand.ty.isFloat()) { try p.errStr(.invalid_imag, imag_tok, try p.typeStr(operand.ty)); } - if (operand.ty.isReal()) { + if (operand.ty.isComplex()) { + operand.val = try operand.val.imaginaryPart(p.comp); + } else if (operand.ty.isReal()) { switch (p.comp.langopts.emulate) { .msvc => {}, // Doesn't support `_Complex` or `__imag` in the first place .gcc => operand.val = Value.zero, .clang => { - if (operand.val.is(.int, p.comp)) { + if (operand.val.is(.int, p.comp) or operand.val.is(.float, p.comp)) { operand.val = Value.zero; } else { operand.val = .{}; @@ -7165,7 +7365,7 @@ fn unExpr(p: *Parser) Error!Result { } // convert _Complex T to T operand.ty = operand.ty.makeReal(); - try operand.un(p, .imag_expr); + try operand.un(p, .imag_expr, tok); return operand; }, .keyword_real1, .keyword_real2 => { @@ -7175,12 +7375,14 @@ fn unExpr(p: *Parser) Error!Result { var operand = try p.castExpr(); try operand.expect(p); try operand.lvalConversion(p); + if (operand.ty.is(.invalid)) return Result.invalid; if (!operand.ty.isInt() and !operand.ty.isFloat()) { try p.errStr(.invalid_real, real_tok, try p.typeStr(operand.ty)); } // convert _Complex T to T operand.ty = operand.ty.makeReal(); - try operand.un(p, .real_expr); + operand.val = try operand.val.realPart(p.comp); + try operand.un(p, .real_expr, tok); return operand; }, else => { @@ -7253,7 +7455,7 @@ fn compoundLiteral(p: *Parser) Error!Result { if (d.constexpr) |_| { // TODO error if not constexpr } - try init_list_expr.un(p, tag); + try init_list_expr.un(p, tag, l_paren); return init_list_expr; } @@ -7284,7 +7486,7 @@ fn suffixExpr(p: *Parser, lhs: Result) Error!Result { } try operand.usualUnaryConversion(p, p.tok_i); - try operand.un(p, .post_inc_expr); + try operand.un(p, .post_inc_expr, p.tok_i); return operand; }, .minus_minus => { @@ -7302,7 +7504,7 @@ fn suffixExpr(p: *Parser, lhs: Result) Error!Result { } try operand.usualUnaryConversion(p, p.tok_i); - try operand.un(p, .post_dec_expr); + try operand.un(p, .post_dec_expr, p.tok_i); return operand; }, .l_bracket => { @@ -7319,12 +7521,18 @@ fn suffixExpr(p: *Parser, lhs: Result) Error!Result { try index.lvalConversion(p); if (ptr.ty.isPtr()) { ptr.ty = ptr.ty.elemType(); - if (!index.ty.isInt()) try p.errTok(.invalid_index, l_bracket); - try p.checkArrayBounds(index_before_conversion, array_before_conversion, l_bracket); + if (index.ty.isInt()) { + try p.checkArrayBounds(index_before_conversion, array_before_conversion, l_bracket); + } else { + try p.errTok(.invalid_index, l_bracket); + } } else if (index.ty.isPtr()) { index.ty = index.ty.elemType(); - if (!ptr.ty.isInt()) try p.errTok(.invalid_index, l_bracket); - try p.checkArrayBounds(array_before_conversion, index_before_conversion, l_bracket); + if (ptr.ty.isInt()) { + try p.checkArrayBounds(array_before_conversion, index_before_conversion, l_bracket); + } else { + try p.errTok(.invalid_index, l_bracket); + } std.mem.swap(Result, &ptr, &index); } else { try p.errTok(.invalid_subscript, l_bracket); @@ -7332,7 +7540,7 @@ fn suffixExpr(p: *Parser, lhs: Result) Error!Result { try ptr.saveValue(p); try index.saveValue(p); - try ptr.bin(p, .array_access_expr, index); + try ptr.bin(p, .array_access_expr, index, l_bracket); return ptr; }, .period => { @@ -7364,16 +7572,12 @@ fn fieldAccess( const expr_ty = lhs.ty; const is_ptr = expr_ty.isPtr(); const expr_base_ty = if (is_ptr) expr_ty.elemType() else expr_ty; - const record_ty = expr_base_ty.canonicalize(.standard); + const record_ty = expr_base_ty.getRecord() orelse { + try p.errStr(.expected_record_ty, field_name_tok, try p.typeStr(expr_ty)); + return error.ParsingFailed; + }; - switch (record_ty.specifier) { - .@"struct", .@"union" => {}, - else => { - try p.errStr(.expected_record_ty, field_name_tok, try p.typeStr(expr_ty)); - return error.ParsingFailed; - }, - } - if (record_ty.hasIncompleteSize()) { + if (record_ty.isIncomplete()) { try p.errStr(.deref_incomplete_ty_ptr, field_name_tok - 2, try p.typeStr(expr_base_ty)); return error.ParsingFailed; } @@ -7386,7 +7590,7 @@ fn fieldAccess( return p.fieldAccessExtra(lhs.node, record_ty, field_name, is_arrow, &discard); } -fn validateFieldAccess(p: *Parser, record_ty: Type, expr_ty: Type, field_name_tok: TokenIndex, field_name: StringId) Error!void { +fn validateFieldAccess(p: *Parser, record_ty: *const Type.Record, expr_ty: Type, field_name_tok: TokenIndex, field_name: StringId) Error!void { if (record_ty.hasField(field_name)) return; p.strings.items.len = 0; @@ -7401,8 +7605,8 @@ fn validateFieldAccess(p: *Parser, record_ty: Type, expr_ty: Type, field_name_to return error.ParsingFailed; } -fn fieldAccessExtra(p: *Parser, lhs: NodeIndex, record_ty: Type, field_name: StringId, is_arrow: bool, offset_bits: *u64) Error!Result { - for (record_ty.data.record.fields, 0..) |f, i| { +fn fieldAccessExtra(p: *Parser, lhs: NodeIndex, record_ty: *const Type.Record, field_name: StringId, is_arrow: bool, offset_bits: *u64) Error!Result { + for (record_ty.fields, 0..) |f, i| { if (f.isAnonymousRecord()) { if (!f.ty.hasField(field_name)) continue; const inner = try p.addNode(.{ @@ -7410,7 +7614,7 @@ fn fieldAccessExtra(p: *Parser, lhs: NodeIndex, record_ty: Type, field_name: Str .ty = f.ty, .data = .{ .member = .{ .lhs = lhs, .index = @intCast(i) } }, }); - const ret = p.fieldAccessExtra(inner, f.ty, field_name, false, offset_bits); + const ret = p.fieldAccessExtra(inner, f.ty.getRecord().?, field_name, false, offset_bits); offset_bits.* = f.layout.offset_bits; return ret; } @@ -7527,6 +7731,23 @@ fn callExpr(p: *Parser, lhs: Result) Error!Result { continue; } const p_ty = params[arg_count].ty; + if (p_ty.specifier == .static_array) { + const arg_array_len: u64 = arg.ty.arrayLen() orelse std.math.maxInt(u64); + const param_array_len: u64 = p_ty.arrayLen().?; + if (arg_array_len < param_array_len) { + const extra = Diagnostics.Message.Extra{ .arguments = .{ + .expected = @intCast(arg_array_len), + .actual = @intCast(param_array_len), + } }; + try p.errExtra(.array_argument_too_small, param_tok, extra); + try p.errTok(.callee_with_static_array, params[arg_count].name_tok); + } + if (arg.val.isZero(p.comp)) { + try p.errTok(.non_null_argument, param_tok); + try p.errTok(.callee_with_static_array, params[arg_count].name_tok); + } + } + if (call_expr.shouldCoerceArg(arg_count)) { try arg.coerce(p, p_ty, param_tok, .{ .arg = params[arg_count].name_tok }); } @@ -7618,7 +7839,7 @@ fn primaryExpr(p: *Parser) Error!Result { var e = try p.expr(); try e.expect(p); try p.expectClosing(l_paren, .r_paren); - try e.un(p, .paren_expr); + try e.un(p, .paren_expr, l_paren); return e; } switch (p.tok_ids[p.tok_i]) { @@ -7626,6 +7847,10 @@ fn primaryExpr(p: *Parser) Error!Result { const name_tok = try p.expectIdentifier(); const name = p.tokSlice(name_tok); const interned_name = try StrInt.intern(p.comp, name); + if (interned_name == p.auto_type_decl_name) { + try p.errStr(.auto_type_self_initialized, name_tok, name); + return error.ParsingFailed; + } if (p.syms.findSymbol(interned_name)) |sym| { try p.checkDeprecatedUnavailable(sym.ty, name_tok, sym.tok); if (sym.kind == .constexpr) { @@ -7636,6 +7861,7 @@ fn primaryExpr(p: *Parser) Error!Result { .tag = .decl_ref_expr, .ty = sym.ty, .data = .{ .decl_ref = name_tok }, + .loc = @enumFromInt(name_tok), }), }; } @@ -7653,6 +7879,7 @@ fn primaryExpr(p: *Parser) Error!Result { .tag = if (sym.kind == .enumeration) .enumeration_ref else .decl_ref_expr, .ty = sym.ty, .data = .{ .decl_ref = name_tok }, + .loc = @enumFromInt(name_tok), }), }; } @@ -7679,6 +7906,7 @@ fn primaryExpr(p: *Parser) Error!Result { .tag = .builtin_call_expr_one, .ty = some.ty, .data = .{ .decl = .{ .name = name_tok, .node = .none } }, + .loc = @enumFromInt(name_tok), }), }; } @@ -7696,6 +7924,7 @@ fn primaryExpr(p: *Parser) Error!Result { .ty = ty, .tag = .fn_proto, .data = .{ .decl = .{ .name = name_tok } }, + .loc = @enumFromInt(name_tok), }); try p.decl_buf.append(node); @@ -7707,6 +7936,7 @@ fn primaryExpr(p: *Parser) Error!Result { .tag = .decl_ref_expr, .ty = ty, .data = .{ .decl_ref = name_tok }, + .loc = @enumFromInt(name_tok), }), }; } @@ -7714,11 +7944,12 @@ fn primaryExpr(p: *Parser) Error!Result { return error.ParsingFailed; }, .keyword_true, .keyword_false => |id| { + const tok_i = p.tok_i; p.tok_i += 1; const res = Result{ .val = Value.fromBool(id == .keyword_true), .ty = .{ .specifier = .bool }, - .node = try p.addNode(.{ .tag = .bool_literal, .ty = .{ .specifier = .bool }, .data = undefined }), + .node = try p.addNode(.{ .tag = .bool_literal, .ty = .{ .specifier = .bool }, .data = undefined, .loc = @enumFromInt(tok_i) }), }; std.debug.assert(!p.in_macro); // Should have been replaced with .one / .zero try p.value_map.put(res.node, res.val); @@ -7734,6 +7965,7 @@ fn primaryExpr(p: *Parser) Error!Result { .tag = .nullptr_literal, .ty = .{ .specifier = .nullptr_t }, .data = undefined, + .loc = @enumFromInt(p.tok_i), }), }; }, @@ -7770,6 +8002,7 @@ fn primaryExpr(p: *Parser) Error!Result { .tag = .decl_ref_expr, .ty = ty, .data = .{ .decl_ref = tok }, + .loc = @enumFromInt(tok), }), }; }, @@ -7805,6 +8038,7 @@ fn primaryExpr(p: *Parser) Error!Result { .tag = .decl_ref_expr, .ty = ty, .data = .{ .decl_ref = p.tok_i }, + .loc = @enumFromInt(p.tok_i), }), }; }, @@ -7824,16 +8058,16 @@ fn primaryExpr(p: *Parser) Error!Result { .unterminated_char_literal, => return p.charLiteral(), .zero => { - p.tok_i += 1; + defer p.tok_i += 1; var res: Result = .{ .val = Value.zero, .ty = if (p.in_macro) p.comp.types.intmax else Type.int }; - res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined }); + res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined, .loc = @enumFromInt(p.tok_i) }); if (!p.in_macro) try p.value_map.put(res.node, res.val); return res; }, .one => { - p.tok_i += 1; + defer p.tok_i += 1; var res: Result = .{ .val = Value.one, .ty = if (p.in_macro) p.comp.types.intmax else Type.int }; - res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined }); + res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined, .loc = @enumFromInt(p.tok_i) }); if (!p.in_macro) try p.value_map.put(res.node, res.val); return res; }, @@ -7841,7 +8075,7 @@ fn primaryExpr(p: *Parser) Error!Result { .embed_byte => { assert(!p.in_macro); const loc = p.pp.tokens.items(.loc)[p.tok_i]; - p.tok_i += 1; + defer p.tok_i += 1; const buf = p.comp.getSource(.generated).buf[loc.byte_offset..]; var byte: u8 = buf[0] - '0'; for (buf[1..]) |c| { @@ -7850,7 +8084,7 @@ fn primaryExpr(p: *Parser) Error!Result { byte += c - '0'; } var res: Result = .{ .val = try Value.int(byte, p.comp) }; - res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined }); + res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined, .loc = @enumFromInt(p.tok_i) }); try p.value_map.put(res.node, res.val); return res; }, @@ -7869,17 +8103,19 @@ fn makePredefinedIdentifier(p: *Parser, strings_top: usize) !Result { const slice = p.strings.items[strings_top..]; const val = try Value.intern(p.comp, .{ .bytes = slice }); - const str_lit = try p.addNode(.{ .tag = .string_literal_expr, .ty = ty, .data = undefined }); + const str_lit = try p.addNode(.{ .tag = .string_literal_expr, .ty = ty, .data = undefined, .loc = @enumFromInt(p.tok_i) }); if (!p.in_macro) try p.value_map.put(str_lit, val); return Result{ .ty = ty, .node = try p.addNode(.{ .tag = .implicit_static_var, .ty = ty, .data = .{ .decl = .{ .name = p.tok_i, .node = str_lit } }, + .loc = @enumFromInt(p.tok_i), }) }; } fn stringLiteral(p: *Parser) Error!Result { + const string_start = p.tok_i; var string_end = p.tok_i; var string_kind: text_literal.Kind = .char; while (text_literal.Kind.classify(p.tok_ids[string_end], .string_literal)) |next| : (string_end += 1) { @@ -7894,13 +8130,17 @@ fn stringLiteral(p: *Parser) Error!Result { return error.ParsingFailed; } } - assert(string_end > p.tok_i); + const count = string_end - p.tok_i; + assert(count > 0); const char_width = string_kind.charUnitSize(p.comp); const strings_top = p.strings.items.len; defer p.strings.items.len = strings_top; + const literal_start = mem.alignForward(usize, strings_top, @intFromEnum(char_width)); + try p.strings.resize(literal_start); + while (p.tok_i < string_end) : (p.tok_i += 1) { const this_kind = text_literal.Kind.classify(p.tok_ids[p.tok_i], .string_literal).?; const slice = this_kind.contentSlice(p.tokSlice(p.tok_i)); @@ -7940,12 +8180,18 @@ fn stringLiteral(p: *Parser) Error!Result { }, } }, - .improperly_encoded => |bytes| p.strings.appendSliceAssumeCapacity(bytes), + .improperly_encoded => |bytes| { + if (count > 1) { + try p.errTok(.illegal_char_encoding_error, p.tok_i); + return error.ParsingFailed; + } + p.strings.appendSliceAssumeCapacity(bytes); + }, .utf8_text => |view| { switch (char_width) { .@"1" => p.strings.appendSliceAssumeCapacity(view.bytes), .@"2" => { - const capacity_slice: []align(@alignOf(u16)) u8 = @alignCast(p.strings.unusedCapacitySlice()); + const capacity_slice: []align(@alignOf(u16)) u8 = @alignCast(p.strings.allocatedSlice()[literal_start..]); const dest_len = std.mem.alignBackward(usize, capacity_slice.len, 2); const dest = std.mem.bytesAsSlice(u16, capacity_slice[0..dest_len]); const words_written = std.unicode.utf8ToUtf16Le(dest, view.bytes) catch unreachable; @@ -7966,7 +8212,7 @@ fn stringLiteral(p: *Parser) Error!Result { } } p.strings.appendNTimesAssumeCapacity(0, @intFromEnum(char_width)); - const slice = p.strings.items[strings_top..]; + const slice = p.strings.items[literal_start..]; // TODO this won't do anything if there is a cache hit const interned_align = mem.alignForward( @@ -7987,7 +8233,7 @@ fn stringLiteral(p: *Parser) Error!Result { }, .val = val, }; - res.node = try p.addNode(.{ .tag = .string_literal_expr, .ty = res.ty, .data = undefined }); + res.node = try p.addNode(.{ .tag = .string_literal_expr, .ty = res.ty, .data = undefined, .loc = @enumFromInt(string_start) }); if (!p.in_macro) try p.value_map.put(res.node, res.val); return res; } @@ -8004,7 +8250,7 @@ fn charLiteral(p: *Parser) Error!Result { return .{ .ty = Type.int, .val = Value.zero, - .node = try p.addNode(.{ .tag = .char_literal, .ty = Type.int, .data = undefined }), + .node = try p.addNode(.{ .tag = .char_literal, .ty = Type.int, .data = undefined, .loc = @enumFromInt(p.tok_i) }), }; }; if (char_kind == .utf_8) try p.err(.u8_char_lit); @@ -8013,7 +8259,7 @@ fn charLiteral(p: *Parser) Error!Result { const slice = char_kind.contentSlice(p.tokSlice(p.tok_i)); var is_multichar = false; - if (slice.len == 1 and std.ascii.isAscii(slice[0])) { + if (slice.len == 1 and std.ascii.isASCII(slice[0])) { // fast path: single unescaped ASCII char val = slice[0]; } else { @@ -8096,25 +8342,25 @@ fn charLiteral(p: *Parser) Error!Result { // > that of the single character or escape sequence is converted to type int. // This conversion only matters if `char` is signed and has a high-order bit of `1` if (char_kind == .char and !is_multichar and val > 0x7F and p.comp.getCharSignedness() == .signed) { - try value.intCast(.{ .specifier = .char }, p.comp); + _ = try value.intCast(.{ .specifier = .char }, p.comp); } const res = Result{ .ty = if (p.in_macro) macro_ty else ty, .val = value, - .node = try p.addNode(.{ .tag = .char_literal, .ty = ty, .data = undefined }), + .node = try p.addNode(.{ .tag = .char_literal, .ty = ty, .data = undefined, .loc = @enumFromInt(p.tok_i) }), }; if (!p.in_macro) try p.value_map.put(res.node, res.val); return res; } -fn parseFloat(p: *Parser, buf: []const u8, suffix: NumberSuffix) !Result { +fn parseFloat(p: *Parser, buf: []const u8, suffix: NumberSuffix, tok_i: TokenIndex) !Result { const ty = Type{ .specifier = switch (suffix) { .None, .I => .double, .F, .IF => .float, - .F16 => .float16, + .F16, .IF16 => .float16, .L, .IL => .long_double, - .W, .IW => .float80, + .W, .IW => p.comp.float80Type().?.specifier, .Q, .IQ, .F128, .IF128 => .float128, else => unreachable, } }; @@ -8140,21 +8386,29 @@ fn parseFloat(p: *Parser, buf: []const u8, suffix: NumberSuffix) !Result { }); var res = Result{ .ty = ty, - .node = try p.addNode(.{ .tag = .float_literal, .ty = ty, .data = undefined }), + .node = try p.addNode(.{ .tag = .float_literal, .ty = ty, .data = undefined, .loc = @enumFromInt(tok_i) }), .val = val, }; if (suffix.isImaginary()) { try p.err(.gnu_imaginary_constant); res.ty = .{ .specifier = switch (suffix) { .I => .complex_double, + .IF16 => .complex_float16, .IF => .complex_float, .IL => .complex_long_double, - .IW => .complex_float80, + .IW => p.comp.float80Type().?.makeComplex().specifier, .IQ, .IF128 => .complex_float128, else => unreachable, } }; - res.val = .{}; // TODO add complex values - try res.un(p, .imaginary_literal); + res.val = try Value.intern(p.comp, switch (res.ty.bitSizeof(p.comp).?) { + 32 => .{ .complex = .{ .cf16 = .{ 0.0, val.toFloat(f16, p.comp) } } }, + 64 => .{ .complex = .{ .cf32 = .{ 0.0, val.toFloat(f32, p.comp) } } }, + 128 => .{ .complex = .{ .cf64 = .{ 0.0, val.toFloat(f64, p.comp) } } }, + 160 => .{ .complex = .{ .cf80 = .{ 0.0, val.toFloat(f80, p.comp) } } }, + 256 => .{ .complex = .{ .cf128 = .{ 0.0, val.toFloat(f128, p.comp) } } }, + else => unreachable, + }); + try res.un(p, .imaginary_literal, tok_i); } return res; } @@ -8233,12 +8487,14 @@ fn fixedSizeInt(p: *Parser, base: u8, buf: []const u8, suffix: NumberSuffix, tok if (overflow) { try p.errTok(.int_literal_too_big, tok_i); res.ty = .{ .specifier = .ulong_long }; - res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined }); + res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined, .loc = @enumFromInt(tok_i) }); if (!p.in_macro) try p.value_map.put(res.node, res.val); return res; } + const interned_val = try Value.int(val, p.comp); if (suffix.isSignedInteger()) { - if (val > p.comp.types.intmax.maxInt(p.comp)) { + const max_int = try Value.maxInt(p.comp.types.intmax, p.comp); + if (interned_val.compare(.gt, max_int, p.comp)) { try p.errTok(.implicitly_unsigned_literal, tok_i); } } @@ -8266,13 +8522,23 @@ fn fixedSizeInt(p: *Parser, base: u8, buf: []const u8, suffix: NumberSuffix, tok for (specs) |spec| { res.ty = Type{ .specifier = spec }; if (res.ty.compareIntegerRanks(suffix_ty, p.comp).compare(.lt)) continue; - const max_int = res.ty.maxInt(p.comp); - if (val <= max_int) break; + const max_int = try Value.maxInt(res.ty, p.comp); + if (interned_val.compare(.lte, max_int, p.comp)) break; } else { - res.ty = .{ .specifier = .ulong_long }; + res.ty = .{ .specifier = spec: { + if (p.comp.langopts.emulate == .gcc) { + if (target_util.hasInt128(p.comp.target)) { + break :spec .int128; + } else { + break :spec .long_long; + } + } else { + break :spec .ulong_long; + } + } }; } - res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined }); + res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined, .loc = @enumFromInt(tok_i) }); if (!p.in_macro) try p.value_map.put(res.node, res.val); return res; } @@ -8291,7 +8557,7 @@ fn parseInt(p: *Parser, prefix: NumberPrefix, buf: []const u8, suffix: NumberSuf try p.errTok(.gnu_imaginary_constant, tok_i); res.ty = res.ty.makeComplex(); res.val = .{}; - try res.un(p, .imaginary_literal); + try res.un(p, .imaginary_literal, tok_i); } return res; } @@ -8326,17 +8592,6 @@ fn bitInt(p: *Parser, base: u8, buf: []const u8, suffix: NumberSuffix, tok_i: To // value of the constant is positive or was specified in hexadecimal or octal notation. const sign_bits = @intFromBool(suffix.isSignedInteger()); const bits_needed = count + sign_bits; - if (bits_needed > Compilation.bit_int_max_bits) { - const specifier: Type.Builder.Specifier = switch (suffix) { - .WB => .{ .bit_int = 0 }, - .UWB => .{ .ubit_int = 0 }, - .IWB => .{ .complex_bit_int = 0 }, - .IUWB => .{ .complex_ubit_int = 0 }, - else => unreachable, - }; - try p.errStr(.bit_int_too_big, tok_i, specifier.str(p.comp.langopts).?); - return error.ParsingFailed; - } break :blk @intCast(bits_needed); }; @@ -8347,7 +8602,7 @@ fn bitInt(p: *Parser, base: u8, buf: []const u8, suffix: NumberSuffix, tok_i: To .data = .{ .int = .{ .bits = bits_needed, .signedness = suffix.signedness() } }, }, }; - res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined }); + res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined, .loc = @enumFromInt(tok_i) }); if (!p.in_macro) try p.value_map.put(res.node, res.val); return res; } @@ -8420,6 +8675,10 @@ pub fn parseNumberToken(p: *Parser, tok_i: TokenIndex) !Result { } return error.ParsingFailed; }; + if (suffix.isFloat80() and p.comp.float80Type() == null) { + try p.errStr(.invalid_float_suffix, tok_i, suffix_str); + return error.ParsingFailed; + } if (is_float) { assert(prefix == .hex or prefix == .decimal); @@ -8428,7 +8687,7 @@ pub fn parseNumberToken(p: *Parser, tok_i: TokenIndex) !Result { return error.ParsingFailed; } const number = buf[0 .. buf.len - suffix_str.len]; - return p.parseFloat(number, suffix); + return p.parseFloat(number, suffix, tok_i); } else { return p.parseInt(prefix, int_part, suffix, tok_i); } @@ -8444,7 +8703,6 @@ fn ppNum(p: *Parser) Error!Result { } res.ty = if (res.ty.isUnsignedInt(p.comp)) p.comp.types.intmax.makeIntegerUnsigned() else p.comp.types.intmax; } else if (res.val.opt_ref != .none) { - // TODO add complex values try p.value_map.put(res.node, res.val); } return res; @@ -8465,6 +8723,7 @@ fn parseNoEval(p: *Parser, comptime func: fn (*Parser) Error!Result) Error!Resul /// : typeName ':' assignExpr /// | keyword_default ':' assignExpr fn genericSelection(p: *Parser) Error!Result { + const kw_generic = p.tok_i; p.tok_i += 1; const l_paren = try p.expectToken(.l_paren); const controlling_tok = p.tok_i; @@ -8508,17 +8767,23 @@ fn genericSelection(p: *Parser) Error!Result { try p.errStr(.generic_duplicate, start, try p.typeStr(ty)); try p.errStr(.generic_duplicate_here, chosen_tok, try p.typeStr(ty)); } - for (p.list_buf.items[list_buf_top + 1 ..], p.decl_buf.items[decl_buf_top..]) |item, prev_tok| { - const prev_ty = p.nodes.items(.ty)[@intFromEnum(item)]; - if (prev_ty.eql(ty, p.comp, true)) { - try p.errStr(.generic_duplicate, start, try p.typeStr(ty)); - try p.errStr(.generic_duplicate_here, @intFromEnum(prev_tok), try p.typeStr(ty)); + const list_buf = p.list_buf.items[list_buf_top + 1 ..]; + const decl_buf = p.decl_buf.items[decl_buf_top..]; + if (list_buf.len == decl_buf.len) { + // If these do not have the same length, there is already an error + for (list_buf, decl_buf) |item, prev_tok| { + const prev_ty = p.nodes.items(.ty)[@intFromEnum(item)]; + if (prev_ty.eql(ty, p.comp, true)) { + try p.errStr(.generic_duplicate, start, try p.typeStr(ty)); + try p.errStr(.generic_duplicate_here, @intFromEnum(prev_tok), try p.typeStr(ty)); + } } } try p.list_buf.append(try p.addNode(.{ .tag = .generic_association_expr, .ty = ty, .data = .{ .un = node.node }, + .loc = @enumFromInt(start), })); try p.decl_buf.append(@enumFromInt(start)); } else if (p.eatToken(.keyword_default)) |tok| { @@ -8542,10 +8807,12 @@ fn genericSelection(p: *Parser) Error!Result { try p.expectClosing(l_paren, .r_paren); if (chosen.node == .none) { - if (default_tok != null) { + if (default_tok) |tok| { try p.list_buf.insert(list_buf_top + 1, try p.addNode(.{ .tag = .generic_default_expr, .data = .{ .un = default.node }, + .ty = default.ty, + .loc = @enumFromInt(tok), })); chosen = default; } else { @@ -8556,11 +8823,15 @@ fn genericSelection(p: *Parser) Error!Result { try p.list_buf.insert(list_buf_top + 1, try p.addNode(.{ .tag = .generic_association_expr, .data = .{ .un = chosen.node }, + .ty = chosen.ty, + .loc = @enumFromInt(chosen_tok), })); - if (default_tok != null) { + if (default_tok) |tok| { try p.list_buf.append(try p.addNode(.{ .tag = .generic_default_expr, - .data = .{ .un = chosen.node }, + .data = .{ .un = default.node }, + .ty = default.ty, + .loc = @enumFromInt(tok), })); } } @@ -8568,7 +8839,8 @@ fn genericSelection(p: *Parser) Error!Result { var generic_node: Tree.Node = .{ .tag = .generic_expr_one, .ty = chosen.ty, - .data = .{ .bin = .{ .lhs = controlling.node, .rhs = chosen.node } }, + .data = .{ .two = .{ controlling.node, chosen.node } }, + .loc = @enumFromInt(kw_generic), }; const associations = p.list_buf.items[list_buf_top..]; if (associations.len > 2) { // associations[0] == controlling.node @@ -8578,3 +8850,42 @@ fn genericSelection(p: *Parser) Error!Result { chosen.node = try p.addNode(generic_node); return chosen; } + +test "Node locations" { + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); + defer comp.deinit(); + + const file = try comp.addSourceFromBuffer("file.c", + \\int foo = 5; + \\int bar = 10; + \\int main(void) {} + \\ + ); + + const builtin_macros = try comp.generateBuiltinMacros(.no_system_defines); + + var pp = Preprocessor.init(&comp); + defer pp.deinit(); + try pp.addBuiltinMacros(); + + _ = try pp.preprocess(builtin_macros); + + const eof = try pp.preprocess(file); + try pp.addToken(eof); + + var tree = try Parser.parse(&pp); + defer tree.deinit(); + + try std.testing.expectEqual(0, comp.diagnostics.list.items.len); + for (tree.root_decls, 0..) |node, i| { + const tok_i = tree.nodeTok(node).?; + const slice = tree.tokSlice(tok_i); + const expected = switch (i) { + 0 => "foo", + 1 => "bar", + 2 => "main", + else => unreachable, + }; + try std.testing.expectEqualStrings(expected, slice); + } +} diff --git a/lib/compiler/aro/aro/Preprocessor.zig b/lib/compiler/aro/aro/Preprocessor.zig index aa0a64c3e7..63bf085836 100644 --- a/lib/compiler/aro/aro/Preprocessor.zig +++ b/lib/compiler/aro/aro/Preprocessor.zig @@ -97,6 +97,11 @@ poisoned_identifiers: std.StringHashMap(void), /// Map from Source.Id to macro name in the `#ifndef` condition which guards the source, if any include_guards: std.AutoHashMapUnmanaged(Source.Id, []const u8) = .{}, +/// Store `keyword_define` and `keyword_undef` tokens. +/// Used to implement preprocessor debug dump options +/// Must be false unless in -E mode (parser does not handle those token types) +store_macro_tokens: bool = false, + /// Memory is retained to avoid allocation on every single token. top_expansion_buf: ExpandBuf, @@ -622,9 +627,12 @@ fn preprocessExtra(pp: *Preprocessor, source: Source) MacroError!TokenWithExpans } if_level -= 1; }, - .keyword_define => try pp.define(&tokenizer), + .keyword_define => try pp.define(&tokenizer, directive), .keyword_undef => { const macro_name = (try pp.expectMacroName(&tokenizer)) orelse continue; + if (pp.store_macro_tokens) { + try pp.addToken(tokFromRaw(directive)); + } _ = pp.defines.remove(macro_name); try pp.expectNl(&tokenizer); @@ -975,7 +983,7 @@ fn expr(pp: *Preprocessor, tokenizer: *Tokenizer) MacroError!bool { .tok_i = @intCast(token_state.tokens_len), .arena = pp.arena.allocator(), .in_macro = true, - .strings = std.ArrayList(u8).init(pp.comp.gpa), + .strings = std.ArrayListAligned(u8, 4).init(pp.comp.gpa), .data = undefined, .value_map = undefined, @@ -1328,19 +1336,41 @@ fn stringify(pp: *Preprocessor, tokens: []const TokenWithExpansionLocs) !void { try pp.char_buf.append(c); } } - if (pp.char_buf.items[pp.char_buf.items.len - 1] == '\\') { + try pp.char_buf.ensureUnusedCapacity(2); + if (pp.char_buf.items[pp.char_buf.items.len - 1] != '\\') { + pp.char_buf.appendSliceAssumeCapacity("\"\n"); + return; + } + pp.char_buf.appendAssumeCapacity('"'); + var tokenizer: Tokenizer = .{ + .buf = pp.char_buf.items, + .index = 0, + .source = .generated, + .langopts = pp.comp.langopts, + .line = 0, + }; + const item = tokenizer.next(); + if (item.id == .unterminated_string_literal) { const tok = tokens[tokens.len - 1]; try pp.comp.addDiagnostic(.{ .tag = .invalid_pp_stringify_escape, .loc = tok.loc, }, tok.expansionSlice()); - pp.char_buf.items.len -= 1; + pp.char_buf.items.len -= 2; // erase unpaired backslash and appended end quote + pp.char_buf.appendAssumeCapacity('"'); } - try pp.char_buf.appendSlice("\"\n"); + pp.char_buf.appendAssumeCapacity('\n'); } fn reconstructIncludeString(pp: *Preprocessor, param_toks: []const TokenWithExpansionLocs, embed_args: ?*[]const TokenWithExpansionLocs, first: TokenWithExpansionLocs) !?[]const u8 { - assert(param_toks.len != 0); + if (param_toks.len == 0) { + try pp.comp.addDiagnostic(.{ + .tag = .expected_filename, + .loc = first.loc, + }, first.expansionSlice()); + return null; + } + const char_top = pp.char_buf.items.len; defer pp.char_buf.items.len = char_top; @@ -1539,11 +1569,13 @@ fn getPasteArgs(args: []const TokenWithExpansionLocs) []const TokenWithExpansion fn expandFuncMacro( pp: *Preprocessor, - loc: Source.Location, + macro_tok: TokenWithExpansionLocs, func_macro: *const Macro, args: *const MacroArguments, expanded_args: *const MacroArguments, + hideset_arg: Hideset.Index, ) MacroError!ExpandBuf { + var hideset = hideset_arg; var buf = ExpandBuf.init(pp.gpa); try buf.ensureTotalCapacity(func_macro.tokens.len); errdefer buf.deinit(); @@ -1594,16 +1626,21 @@ fn expandFuncMacro( }, else => &[1]TokenWithExpansionLocs{tokFromRaw(raw_next)}, }; - try pp.pasteTokens(&buf, next); if (next.len != 0) break; }, .macro_param_no_expand => { + if (tok_i + 1 < func_macro.tokens.len and func_macro.tokens[tok_i + 1].id == .hash_hash) { + hideset = pp.hideset.get(tokFromRaw(func_macro.tokens[tok_i + 1]).loc); + } const slice = getPasteArgs(args.items[raw.end]); const raw_loc = Source.Location{ .id = raw.source, .byte_offset = raw.start, .line = raw.line }; try bufCopyTokens(&buf, slice, &.{raw_loc}); }, .macro_param => { + if (tok_i + 1 < func_macro.tokens.len and func_macro.tokens[tok_i + 1].id == .hash_hash) { + hideset = pp.hideset.get(tokFromRaw(func_macro.tokens[tok_i + 1]).loc); + } const arg = expanded_args.items[raw.end]; const raw_loc = Source.Location{ .id = raw.source, .byte_offset = raw.start, .line = raw.line }; try bufCopyTokens(&buf, arg, &.{raw_loc}); @@ -1642,9 +1679,9 @@ fn expandFuncMacro( const arg = expanded_args.items[0]; const result = if (arg.len == 0) blk: { const extra = Diagnostics.Message.Extra{ .arguments = .{ .expected = 1, .actual = 0 } }; - try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = loc, .extra = extra }, &.{}); + try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = macro_tok.loc, .extra = extra }, &.{}); break :blk false; - } else try pp.handleBuiltinMacro(raw.id, arg, loc); + } else try pp.handleBuiltinMacro(raw.id, arg, macro_tok.loc); const start = pp.comp.generated_buf.items.len; const w = pp.comp.generated_buf.writer(pp.gpa); try w.print("{}\n", .{@intFromBool(result)}); @@ -1655,7 +1692,7 @@ fn expandFuncMacro( const not_found = "0\n"; const result = if (arg.len == 0) blk: { const extra = Diagnostics.Message.Extra{ .arguments = .{ .expected = 1, .actual = 0 } }; - try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = loc, .extra = extra }, &.{}); + try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = macro_tok.loc, .extra = extra }, &.{}); break :blk not_found; } else res: { var invalid: ?TokenWithExpansionLocs = null; @@ -1687,7 +1724,7 @@ fn expandFuncMacro( if (vendor_ident != null and attr_ident == null) { invalid = vendor_ident; } else if (attr_ident == null and invalid == null) { - invalid = .{ .id = .eof, .loc = loc }; + invalid = .{ .id = .eof, .loc = macro_tok.loc }; } if (invalid) |some| { try pp.comp.addDiagnostic( @@ -1731,7 +1768,7 @@ fn expandFuncMacro( const not_found = "0\n"; const result = if (arg.len == 0) blk: { const extra = Diagnostics.Message.Extra{ .arguments = .{ .expected = 1, .actual = 0 } }; - try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = loc, .extra = extra }, &.{}); + try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = macro_tok.loc, .extra = extra }, &.{}); break :blk not_found; } else res: { var embed_args: []const TokenWithExpansionLocs = &.{}; @@ -1877,11 +1914,11 @@ fn expandFuncMacro( break; }, }; - if (string == null and invalid == null) invalid = .{ .loc = loc, .id = .eof }; + if (string == null and invalid == null) invalid = .{ .loc = macro_tok.loc, .id = .eof }; if (invalid) |some| try pp.comp.addDiagnostic( .{ .tag = .pragma_operator_string_literal, .loc = some.loc }, some.expansionSlice(), - ) else try pp.pragmaOperator(string.?, loc); + ) else try pp.pragmaOperator(string.?, macro_tok.loc); }, .comma => { if (tok_i + 2 < func_macro.tokens.len and func_macro.tokens[tok_i + 1].id == .hash_hash) { @@ -1930,6 +1967,15 @@ fn expandFuncMacro( } removePlacemarkers(&buf); + const macro_expansion_locs = macro_tok.expansionSlice(); + for (buf.items) |*tok| { + try tok.addExpansionLocation(pp.gpa, &.{macro_tok.loc}); + try tok.addExpansionLocation(pp.gpa, macro_expansion_locs); + const tok_hidelist = pp.hideset.get(tok.loc); + const new_hidelist = try pp.hideset.@"union"(tok_hidelist, hideset); + try pp.hideset.put(tok.loc, new_hidelist); + } + return buf; } @@ -2207,8 +2253,10 @@ fn expandMacroExhaustive( else => |e| return e, }; assert(r_paren.id == .r_paren); + var free_arg_expansion_locs = false; defer { for (args.items) |item| { + if (free_arg_expansion_locs) for (item) |tok| TokenWithExpansionLocs.free(tok.expansion_locs, pp.gpa); pp.gpa.free(item); } args.deinit(); @@ -2234,6 +2282,7 @@ fn expandMacroExhaustive( .arguments = .{ .expected = @intCast(macro.params.len), .actual = args_count }, }; if (macro.var_args and args_count < macro.params.len) { + free_arg_expansion_locs = true; try pp.comp.addDiagnostic( .{ .tag = .expected_at_least_arguments, .loc = buf.items[idx].loc, .extra = extra }, buf.items[idx].expansionSlice(), @@ -2243,6 +2292,7 @@ fn expandMacroExhaustive( continue; } if (!macro.var_args and args_count != macro.params.len) { + free_arg_expansion_locs = true; try pp.comp.addDiagnostic( .{ .tag = .expected_arguments, .loc = buf.items[idx].loc, .extra = extra }, buf.items[idx].expansionSlice(), @@ -2264,19 +2314,9 @@ fn expandMacroExhaustive( expanded_args.appendAssumeCapacity(try expand_buf.toOwnedSlice()); } - var res = try pp.expandFuncMacro(macro_tok.loc, macro, &args, &expanded_args); + var res = try pp.expandFuncMacro(macro_tok, macro, &args, &expanded_args, hs); defer res.deinit(); const tokens_added = res.items.len; - - const macro_expansion_locs = macro_tok.expansionSlice(); - for (res.items) |*tok| { - try tok.addExpansionLocation(pp.gpa, &.{macro_tok.loc}); - try tok.addExpansionLocation(pp.gpa, macro_expansion_locs); - const tok_hidelist = pp.hideset.get(tok.loc); - const new_hidelist = try pp.hideset.@"union"(tok_hidelist, hs); - try pp.hideset.put(tok.loc, new_hidelist); - } - const tokens_removed = macro_scan_idx - idx + 1; for (buf.items[idx .. idx + tokens_removed]) |tok| TokenWithExpansionLocs.free(tok.expansion_locs, pp.gpa); try buf.replaceRange(idx, tokens_removed, res.items); @@ -2476,7 +2516,7 @@ fn makeGeneratedToken(pp: *Preprocessor, start: usize, id: Token.Id, source: Tok } /// Defines a new macro and warns if it is a duplicate -fn defineMacro(pp: *Preprocessor, name_tok: RawToken, macro: Macro) Error!void { +fn defineMacro(pp: *Preprocessor, define_tok: RawToken, name_tok: RawToken, macro: Macro) Error!void { const name_str = pp.tokSlice(name_tok); const gop = try pp.defines.getOrPut(pp.gpa, name_str); if (gop.found_existing and !gop.value_ptr.eql(macro, pp)) { @@ -2497,11 +2537,14 @@ fn defineMacro(pp: *Preprocessor, name_tok: RawToken, macro: Macro) Error!void { if (pp.verbose) { pp.verboseLog(name_tok, "macro {s} defined", .{name_str}); } + if (pp.store_macro_tokens) { + try pp.addToken(tokFromRaw(define_tok)); + } gop.value_ptr.* = macro; } /// Handle a #define directive. -fn define(pp: *Preprocessor, tokenizer: *Tokenizer) Error!void { +fn define(pp: *Preprocessor, tokenizer: *Tokenizer, define_tok: RawToken) Error!void { // Get macro name and validate it. const macro_name = tokenizer.nextNoWS(); if (macro_name.id == .keyword_defined) { @@ -2524,7 +2567,7 @@ fn define(pp: *Preprocessor, tokenizer: *Tokenizer) Error!void { // Check for function macros and empty defines. var first = tokenizer.next(); switch (first.id) { - .nl, .eof => return pp.defineMacro(macro_name, .{ + .nl, .eof => return pp.defineMacro(define_tok, macro_name, .{ .params = &.{}, .tokens = &.{}, .var_args = false, @@ -2532,7 +2575,7 @@ fn define(pp: *Preprocessor, tokenizer: *Tokenizer) Error!void { .is_func = false, }), .whitespace => first = tokenizer.next(), - .l_paren => return pp.defineFn(tokenizer, macro_name, first), + .l_paren => return pp.defineFn(tokenizer, define_tok, macro_name, first), else => try pp.err(first, .whitespace_after_macro_name), } if (first.id == .hash_hash) { @@ -2591,7 +2634,7 @@ fn define(pp: *Preprocessor, tokenizer: *Tokenizer) Error!void { } const list = try pp.arena.allocator().dupe(RawToken, pp.token_buf.items); - try pp.defineMacro(macro_name, .{ + try pp.defineMacro(define_tok, macro_name, .{ .loc = tokFromRaw(macro_name).loc, .tokens = list, .params = undefined, @@ -2601,7 +2644,7 @@ fn define(pp: *Preprocessor, tokenizer: *Tokenizer) Error!void { } /// Handle a function like #define directive. -fn defineFn(pp: *Preprocessor, tokenizer: *Tokenizer, macro_name: RawToken, l_paren: RawToken) Error!void { +fn defineFn(pp: *Preprocessor, tokenizer: *Tokenizer, define_tok: RawToken, macro_name: RawToken, l_paren: RawToken) Error!void { assert(macro_name.id.isMacroIdentifier()); var params = std.ArrayList([]const u8).init(pp.gpa); defer params.deinit(); @@ -2778,7 +2821,7 @@ fn defineFn(pp: *Preprocessor, tokenizer: *Tokenizer, macro_name: RawToken, l_pa const param_list = try pp.arena.allocator().dupe([]const u8, params.items); const token_list = try pp.arena.allocator().dupe(RawToken, pp.token_buf.items); - try pp.defineMacro(macro_name, .{ + try pp.defineMacro(define_tok, macro_name, .{ .is_func = true, .params = param_list, .var_args = var_args or gnu_var_args.len != 0, @@ -3241,8 +3284,78 @@ fn printLinemarker( // After how many empty lines are needed to replace them with linemarkers. const collapse_newlines = 8; +pub const DumpMode = enum { + /// Standard preprocessor output; no macros + result_only, + /// Output only #define directives for all the macros defined during the execution of the preprocessor + /// Only macros which are still defined at the end of preprocessing are printed. + /// Only the most recent definition is printed + /// Defines are printed in arbitrary order + macros_only, + /// Standard preprocessor output; but additionally output #define's and #undef's for macros as they are encountered + macros_and_result, + /// Same as macros_and_result, except only the macro name is printed for #define's + macro_names_and_result, +}; + +/// Pretty-print the macro define or undef at location `loc`. +/// We re-tokenize the directive because we are printing a macro that may have the same name as one in +/// `pp.defines` but a different definition (due to being #undef'ed and then redefined) +fn prettyPrintMacro(pp: *Preprocessor, w: anytype, loc: Source.Location, parts: enum { name_only, name_and_body }) !void { + const source = pp.comp.getSource(loc.id); + var tokenizer: Tokenizer = .{ + .buf = source.buf, + .langopts = pp.comp.langopts, + .source = source.id, + .index = loc.byte_offset, + }; + var prev_ws = false; // avoid printing multiple whitespace if /* */ comments are within the macro def + var saw_name = false; // do not print comments before the name token is seen. + while (true) { + const tok = tokenizer.next(); + switch (tok.id) { + .comment => { + if (saw_name) { + prev_ws = false; + try w.print("{s}", .{pp.tokSlice(tok)}); + } + }, + .nl, .eof => break, + .whitespace => { + if (!prev_ws) { + try w.writeByte(' '); + prev_ws = true; + } + }, + else => { + prev_ws = false; + try w.print("{s}", .{pp.tokSlice(tok)}); + }, + } + if (tok.id == .identifier or tok.id == .extended_identifier) { + if (parts == .name_only) break; + saw_name = true; + } + } +} + +fn prettyPrintMacrosOnly(pp: *Preprocessor, w: anytype) !void { + var it = pp.defines.valueIterator(); + while (it.next()) |macro| { + if (macro.is_builtin) continue; + + try w.writeAll("#define "); + try pp.prettyPrintMacro(w, macro.loc, .name_and_body); + try w.writeByte('\n'); + } +} + /// Pretty print tokens and try to preserve whitespace. -pub fn prettyPrintTokens(pp: *Preprocessor, w: anytype) !void { +pub fn prettyPrintTokens(pp: *Preprocessor, w: anytype, macro_dump_mode: DumpMode) !void { + if (macro_dump_mode == .macros_only) { + return pp.prettyPrintMacrosOnly(w); + } + const tok_ids = pp.tokens.items(.id); var i: u32 = 0; @@ -3334,6 +3447,17 @@ pub fn prettyPrintTokens(pp: *Preprocessor, w: anytype) !void { try pp.printLinemarker(w, line_col.line_no, source, .@"resume"); last_nl = true; }, + .keyword_define, .keyword_undef => { + switch (macro_dump_mode) { + .macros_and_result, .macro_names_and_result => { + try w.writeByte('#'); + try pp.prettyPrintMacro(w, cur.loc, if (macro_dump_mode == .macros_and_result) .name_and_body else .name_only); + last_nl = false; + }, + .result_only => unreachable, // `pp.store_macro_tokens` should be false for standard preprocessor output + .macros_only => unreachable, // handled by prettyPrintMacrosOnly + } + }, else => { const slice = pp.expandedSlice(cur); try w.writeAll(slice); @@ -3350,7 +3474,7 @@ test "Preserve pragma tokens sometimes" { var buf = std.ArrayList(u8).init(allocator); defer buf.deinit(); - var comp = Compilation.init(allocator); + var comp = Compilation.init(allocator, std.fs.cwd()); defer comp.deinit(); try comp.addDefaultPragmaHandlers(); @@ -3364,7 +3488,7 @@ test "Preserve pragma tokens sometimes" { const test_runner_macros = try comp.addSourceFromBuffer("<test_runner>", source_text); const eof = try pp.preprocess(test_runner_macros); try pp.addToken(eof); - try pp.prettyPrintTokens(buf.writer()); + try pp.prettyPrintTokens(buf.writer(), .result_only); return allocator.dupe(u8, buf.items); } @@ -3410,7 +3534,7 @@ test "destringify" { try std.testing.expectEqualStrings(destringified, pp.char_buf.items); } }; - var comp = Compilation.init(allocator); + var comp = Compilation.init(allocator, std.fs.cwd()); defer comp.deinit(); var pp = Preprocessor.init(&comp); defer pp.deinit(); @@ -3468,7 +3592,7 @@ test "Include guards" { } fn testIncludeGuard(allocator: std.mem.Allocator, comptime template: []const u8, tok_id: RawToken.Id, expected_guards: u32) !void { - var comp = Compilation.init(allocator); + var comp = Compilation.init(allocator, std.fs.cwd()); defer comp.deinit(); var pp = Preprocessor.init(&comp); defer pp.deinit(); diff --git a/lib/compiler/aro/aro/Source.zig b/lib/compiler/aro/aro/Source.zig index 06e58ecb16..20788af21c 100644 --- a/lib/compiler/aro/aro/Source.zig +++ b/lib/compiler/aro/aro/Source.zig @@ -75,7 +75,17 @@ pub fn lineCol(source: Source, loc: Location) LineCol { i += 1; continue; }; - const cp = std.unicode.utf8Decode(source.buf[i..][0..len]) catch { + const slice = source.buf[i..]; + if (len > slice.len) { + break; + } + const cp = switch (len) { + 1 => slice[0], + 2 => std.unicode.utf8Decode2(slice[0..2].*), + 3 => std.unicode.utf8Decode3(slice[0..3].*), + 4 => std.unicode.utf8Decode4(slice[0..4].*), + else => unreachable, + } catch { i += 1; continue; }; diff --git a/lib/compiler/aro/aro/SymbolStack.zig b/lib/compiler/aro/aro/SymbolStack.zig index dba7223447..be2ee20cb0 100644 --- a/lib/compiler/aro/aro/SymbolStack.zig +++ b/lib/compiler/aro/aro/SymbolStack.zig @@ -178,9 +178,11 @@ pub fn defineTypedef( if (s.get(name, .vars)) |prev| { switch (prev.kind) { .typedef => { - if (!ty.eql(prev.ty, p.comp, true)) { - try p.errStr(.redefinition_of_typedef, tok, try p.typePairStrExtra(ty, " vs ", prev.ty)); - if (prev.tok != 0) try p.errTok(.previous_definition, prev.tok); + if (!prev.ty.is(.invalid)) { + if (!ty.eql(prev.ty, p.comp, true)) { + try p.errStr(.redefinition_of_typedef, tok, try p.typePairStrExtra(ty, " vs ", prev.ty)); + if (prev.tok != 0) try p.errTok(.previous_definition, prev.tok); + } } }, .enumeration, .decl, .def, .constexpr => { @@ -194,7 +196,12 @@ pub fn defineTypedef( .kind = .typedef, .name = name, .tok = tok, - .ty = ty, + .ty = .{ + .name = name, + .specifier = ty.specifier, + .qual = ty.qual, + .data = ty.data, + }, .node = node, .val = .{}, }); diff --git a/lib/compiler/aro/aro/Tokenizer.zig b/lib/compiler/aro/aro/Tokenizer.zig index c5a84b8cc0..f703940fd8 100644 --- a/lib/compiler/aro/aro/Tokenizer.zig +++ b/lib/compiler/aro/aro/Tokenizer.zig @@ -178,6 +178,8 @@ pub const Token = struct { keyword_return, keyword_short, keyword_signed, + keyword_signed1, + keyword_signed2, keyword_sizeof, keyword_static, keyword_struct, @@ -258,7 +260,6 @@ pub const Token = struct { keyword_asm, keyword_asm1, keyword_asm2, - keyword_float80, /// _Float128 keyword_float128_1, /// __float128 @@ -369,6 +370,8 @@ pub const Token = struct { .keyword_return, .keyword_short, .keyword_signed, + .keyword_signed1, + .keyword_signed2, .keyword_sizeof, .keyword_static, .keyword_struct, @@ -417,7 +420,6 @@ pub const Token = struct { .keyword_asm, .keyword_asm1, .keyword_asm2, - .keyword_float80, .keyword_float128_1, .keyword_float128_2, .keyword_int128, @@ -627,6 +629,8 @@ pub const Token = struct { .keyword_return => "return", .keyword_short => "short", .keyword_signed => "signed", + .keyword_signed1 => "__signed", + .keyword_signed2 => "__signed__", .keyword_sizeof => "sizeof", .keyword_static => "static", .keyword_struct => "struct", @@ -702,7 +706,6 @@ pub const Token = struct { .keyword_asm => "asm", .keyword_asm1 => "__asm", .keyword_asm2 => "__asm__", - .keyword_float80 => "__float80", .keyword_float128_1 => "_Float128", .keyword_float128_2 => "__float128", .keyword_int128 => "__int128", @@ -732,7 +735,8 @@ pub const Token = struct { pub fn symbol(id: Id) []const u8 { return switch (id) { - .macro_string, .invalid => unreachable, + .macro_string => unreachable, + .invalid => "invalid bytes", .identifier, .extended_identifier, .macro_func, @@ -873,10 +877,7 @@ pub const Token = struct { } const all_kws = std.StaticStringMap(Id).initComptime(.{ - .{ "auto", auto: { - @setEvalBranchQuota(3000); - break :auto .keyword_auto; - } }, + .{ "auto", .keyword_auto }, .{ "break", .keyword_break }, .{ "case", .keyword_case }, .{ "char", .keyword_char }, @@ -898,6 +899,8 @@ pub const Token = struct { .{ "return", .keyword_return }, .{ "short", .keyword_short }, .{ "signed", .keyword_signed }, + .{ "__signed", .keyword_signed1 }, + .{ "__signed__", .keyword_signed2 }, .{ "sizeof", .keyword_sizeof }, .{ "static", .keyword_static }, .{ "struct", .keyword_struct }, @@ -982,7 +985,6 @@ pub const Token = struct { .{ "asm", .keyword_asm }, .{ "__asm", .keyword_asm1 }, .{ "__asm__", .keyword_asm2 }, - .{ "__float80", .keyword_float80 }, .{ "_Float128", .keyword_float128_1 }, .{ "__float128", .keyword_float128_2 }, .{ "__int128", .keyword_int128 }, @@ -1300,11 +1302,17 @@ pub fn next(self: *Tokenizer) Token { else => {}, }, .char_escape_sequence => switch (c) { - '\r', '\n' => unreachable, // removed by line splicing + '\r', '\n' => { + id = .unterminated_char_literal; + break; + }, else => state = .char_literal, }, .string_escape_sequence => switch (c) { - '\r', '\n' => unreachable, // removed by line splicing + '\r', '\n' => { + id = .unterminated_string_literal; + break; + }, else => state = .string_literal, }, .identifier, .extended_identifier => switch (c) { @@ -1792,7 +1800,7 @@ pub fn nextNoWSComments(self: *Tokenizer) Token { /// Try to tokenize a '::' even if not supported by the current language standard. pub fn colonColon(self: *Tokenizer) Token { var tok = self.nextNoWS(); - if (tok.id == .colon and self.buf[self.index] == ':') { + if (tok.id == .colon and self.index < self.buf.len and self.buf[self.index] == ':') { self.index += 1; tok.id = .colon_colon; } @@ -2142,8 +2150,30 @@ test "C23 keywords" { }, .c23); } +test "Tokenizer fuzz test" { + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); + defer comp.deinit(); + + const input_bytes = std.testing.fuzzInput(.{}); + if (input_bytes.len == 0) return; + + const source = try comp.addSourceFromBuffer("fuzz.c", input_bytes); + + var tokenizer: Tokenizer = .{ + .buf = source.buf, + .source = source.id, + .langopts = comp.langopts, + }; + while (true) { + const prev_index = tokenizer.index; + const tok = tokenizer.next(); + if (tok.id == .eof) break; + try std.testing.expect(prev_index < tokenizer.index); // ensure that the tokenizer always makes progress + } +} + fn expectTokensExtra(contents: []const u8, expected_tokens: []const Token.Id, standard: ?LangOpts.Standard) !void { - var comp = Compilation.init(std.testing.allocator); + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); if (standard) |provided| { comp.langopts.standard = provided; diff --git a/lib/compiler/aro/aro/Tree.zig b/lib/compiler/aro/aro/Tree.zig index e353beaebc..a1b15bd669 100644 --- a/lib/compiler/aro/aro/Tree.zig +++ b/lib/compiler/aro/aro/Tree.zig @@ -137,15 +137,22 @@ pub const Node = struct { tag: Tag, ty: Type = .{ .specifier = .void }, data: Data, + loc: Loc = .none, pub const Range = struct { start: u32, end: u32 }; + pub const Loc = enum(u32) { + none = std.math.maxInt(u32), + _, + }; + pub const Data = union { decl: struct { name: TokenIndex, node: NodeIndex = .none, }, decl_ref: TokenIndex, + two: [2]NodeIndex, range: Range, if3: struct { cond: NodeIndex, @@ -277,7 +284,8 @@ pub const Tag = enum(u8) { // ====== Decl ====== - // _Static_assert + /// _Static_assert + /// loc is token index of _Static_assert static_assert, // function prototype @@ -303,17 +311,18 @@ pub const Tag = enum(u8) { threadlocal_static_var, /// __asm__("...") at file scope + /// loc is token index of __asm__ keyword file_scope_asm, // typedef declaration typedef, // container declarations - /// { lhs; rhs; } + /// { two[0]; two[1]; } struct_decl_two, - /// { lhs; rhs; } + /// { two[0]; two[1]; } union_decl_two, - /// { lhs, rhs, } + /// { two[0], two[1], } enum_decl_two, /// { range } struct_decl, @@ -339,7 +348,7 @@ pub const Tag = enum(u8) { // ====== Stmt ====== labeled_stmt, - /// { first; second; } first and second may be null + /// { two[0]; two[1]; } first and second may be null compound_stmt_two, /// { data } compound_stmt, @@ -476,7 +485,7 @@ pub const Tag = enum(u8) { real_expr, /// lhs[rhs] lhs is pointer/array type, rhs is integer type array_access_expr, - /// first(second) second may be 0 + /// two[0](two[1]) two[1] may be 0 call_expr_one, /// data[0](data[1..]) call_expr, @@ -515,7 +524,7 @@ pub const Tag = enum(u8) { sizeof_expr, /// _Alignof(un?) alignof_expr, - /// _Generic(controlling lhs, chosen rhs) + /// _Generic(controlling two[0], chosen two[1]) generic_expr_one, /// _Generic(controlling range[0], chosen range[1], rest range[2..]) generic_expr, @@ -534,28 +543,34 @@ pub const Tag = enum(u8) { // ====== Initializer expressions ====== - /// { lhs, rhs } + /// { two[0], two[1] } array_init_expr_two, /// { range } array_init_expr, - /// { lhs, rhs } + /// { two[0], two[1] } struct_init_expr_two, /// { range } struct_init_expr, /// { union_init } union_init_expr, + /// (ty){ un } + /// loc is token index of l_paren compound_literal_expr, /// (static ty){ un } + /// loc is token index of l_paren static_compound_literal_expr, /// (thread_local ty){ un } + /// loc is token index of l_paren thread_local_compound_literal_expr, /// (static thread_local ty){ un } + /// loc is token index of l_paren static_thread_local_compound_literal_expr, /// Inserted at the end of a function body if no return stmt is found. /// ty is the functions return type /// data is return_zero which is true if the function is called "main" and ty is compatible with int + /// loc is token index of closing r_brace of function implicit_return, /// Inserted in array_init_expr to represent unspecified elements. @@ -608,6 +623,57 @@ pub fn bitfieldWidth(tree: *const Tree, node: NodeIndex, inspect_lval: bool) ?u3 } } +const CallableResultUsage = struct { + /// name token of the thing being called, for diagnostics + tok: TokenIndex, + /// true if `nodiscard` attribute present + nodiscard: bool, + /// true if `warn_unused_result` attribute present + warn_unused_result: bool, +}; + +pub fn callableResultUsage(tree: *const Tree, node: NodeIndex) ?CallableResultUsage { + const data = tree.nodes.items(.data); + + var cur_node = node; + while (true) switch (tree.nodes.items(.tag)[@intFromEnum(cur_node)]) { + .decl_ref_expr => { + const tok = data[@intFromEnum(cur_node)].decl_ref; + const fn_ty = tree.nodes.items(.ty)[@intFromEnum(node)].elemType(); + return .{ + .tok = tok, + .nodiscard = fn_ty.hasAttribute(.nodiscard), + .warn_unused_result = fn_ty.hasAttribute(.warn_unused_result), + }; + }, + .paren_expr => cur_node = data[@intFromEnum(cur_node)].un, + .comma_expr => cur_node = data[@intFromEnum(cur_node)].bin.rhs, + + .explicit_cast, .implicit_cast => cur_node = data[@intFromEnum(cur_node)].cast.operand, + .addr_of_expr, .deref_expr => cur_node = data[@intFromEnum(cur_node)].un, + .call_expr_one => cur_node = data[@intFromEnum(cur_node)].two[0], + .call_expr => cur_node = tree.data[data[@intFromEnum(cur_node)].range.start], + .member_access_expr, .member_access_ptr_expr => { + const member = data[@intFromEnum(cur_node)].member; + var ty = tree.nodes.items(.ty)[@intFromEnum(member.lhs)]; + if (ty.isPtr()) ty = ty.elemType(); + const record = ty.getRecord().?; + const field = record.fields[member.index]; + const attributes = if (record.field_attributes) |attrs| attrs[member.index] else &.{}; + return .{ + .tok = field.name_tok, + .nodiscard = for (attributes) |attr| { + if (attr.tag == .nodiscard) break true; + } else false, + .warn_unused_result = for (attributes) |attr| { + if (attr.tag == .warn_unused_result) break true; + } else false, + }; + }, + else => return null, + }; +} + pub fn isLval(tree: *const Tree, node: NodeIndex) bool { var is_const: bool = undefined; return tree.isLvalExtra(node, &is_const); @@ -672,17 +738,66 @@ pub fn isLvalExtra(tree: *const Tree, node: NodeIndex, is_const: *bool) bool { } } +/// This should only be used for node tags that represent AST nodes which have an arbitrary number of children +/// It particular it should *not* be used for nodes with .un or .bin data types +/// +/// For call expressions, child_nodes[0] is the function pointer being called and child_nodes[1..] +/// are the arguments +/// +/// For generic selection expressions, child_nodes[0] is the controlling expression, +/// child_nodes[1] is the chosen expression (it is a syntax error for there to be no chosen expression), +/// and child_nodes[2..] are the remaining expressions. +pub fn childNodes(tree: *const Tree, node: NodeIndex) []const NodeIndex { + const tags = tree.nodes.items(.tag); + const data = tree.nodes.items(.data); + switch (tags[@intFromEnum(node)]) { + .compound_stmt_two, + .array_init_expr_two, + .struct_init_expr_two, + .enum_decl_two, + .struct_decl_two, + .union_decl_two, + .call_expr_one, + .generic_expr_one, + => { + const index: u32 = @intFromEnum(node); + const end = std.mem.indexOfScalar(NodeIndex, &data[index].two, .none) orelse 2; + return data[index].two[0..end]; + }, + .compound_stmt, + .array_init_expr, + .struct_init_expr, + .enum_decl, + .struct_decl, + .union_decl, + .call_expr, + .generic_expr, + => { + const range = data[@intFromEnum(node)].range; + return tree.data[range.start..range.end]; + }, + else => unreachable, + } +} + pub fn tokSlice(tree: *const Tree, tok_i: TokenIndex) []const u8 { if (tree.tokens.items(.id)[tok_i].lexeme()) |some| return some; const loc = tree.tokens.items(.loc)[tok_i]; - var tmp_tokenizer = Tokenizer{ - .buf = tree.comp.getSource(loc.id).buf, - .langopts = tree.comp.langopts, - .index = loc.byte_offset, - .source = .generated, + return tree.comp.locSlice(loc); +} + +pub fn nodeTok(tree: *const Tree, node: NodeIndex) ?TokenIndex { + std.debug.assert(node != .none); + const loc = tree.nodes.items(.loc)[@intFromEnum(node)]; + return switch (loc) { + .none => null, + else => |tok_i| @intFromEnum(tok_i), }; - const tok = tmp_tokenizer.next(); - return tmp_tokenizer.buf[tok.start..tok.end]; +} + +pub fn nodeLoc(tree: *const Tree, node: NodeIndex) ?Source.Location { + const tok_i = tree.nodeTok(node) orelse return null; + return tree.tokens.items(.loc)[@intFromEnum(tok_i)]; } pub fn dump(tree: *const Tree, config: std.io.tty.Config, writer: anytype) !void { @@ -766,6 +881,10 @@ fn dumpNode( } try config.setColor(w, TYPE); try w.writeByte('\''); + const name = ty.getName(); + if (name != .empty) { + try w.print("{s}': '", .{mapper.lookup(name)}); + } try ty.dump(mapper, tree.comp.langopts, w); try w.writeByte('\''); @@ -794,7 +913,9 @@ fn dumpNode( if (ty.specifier == .attributed) { try config.setColor(w, ATTRIBUTE); - for (ty.data.attributed.attributes) |attr| { + var it = Attribute.Iterator.initType(ty); + while (it.next()) |item| { + const attr, _ = item; try w.writeByteNTimes(' ', level + half); try w.print("attr: {s}", .{@tagName(attr.tag)}); try tree.dumpAttribute(attr, w); @@ -900,9 +1021,16 @@ fn dumpNode( .enum_decl, .struct_decl, .union_decl, + .compound_stmt_two, + .array_init_expr_two, + .struct_init_expr_two, + .enum_decl_two, + .struct_decl_two, + .union_decl_two, => { + const child_nodes = tree.childNodes(node); const maybe_field_attributes = if (ty.getRecord()) |record| record.field_attributes else null; - for (tree.data[data.range.start..data.range.end], 0..) |stmt, i| { + for (child_nodes, 0..) |stmt, i| { if (i != 0) try w.writeByte('\n'); try tree.dumpNode(stmt, level + delta, mapper, config, w); if (maybe_field_attributes) |field_attributes| { @@ -914,33 +1042,6 @@ fn dumpNode( } } }, - .compound_stmt_two, - .array_init_expr_two, - .struct_init_expr_two, - .enum_decl_two, - .struct_decl_two, - .union_decl_two, - => { - var attr_array = [2][]const Attribute{ &.{}, &.{} }; - const empty: [][]const Attribute = &attr_array; - const field_attributes = if (ty.getRecord()) |record| (record.field_attributes orelse empty.ptr) else empty.ptr; - if (data.bin.lhs != .none) { - try tree.dumpNode(data.bin.lhs, level + delta, mapper, config, w); - if (field_attributes[0].len > 0) { - try config.setColor(w, ATTRIBUTE); - try tree.dumpFieldAttributes(field_attributes[0], level + delta + half, w); - try config.setColor(w, .reset); - } - } - if (data.bin.rhs != .none) { - try tree.dumpNode(data.bin.rhs, level + delta, mapper, config, w); - if (field_attributes[1].len > 0) { - try config.setColor(w, ATTRIBUTE); - try tree.dumpFieldAttributes(field_attributes[1], level + delta + half, w); - try config.setColor(w, .reset); - } - } - }, .union_init_expr => { try w.writeByteNTimes(' ', level + half); try w.writeAll("field index: "); @@ -1130,23 +1231,21 @@ fn dumpNode( try tree.dumpNode(data.un, level + delta, mapper, config, w); } }, - .call_expr => { - try w.writeByteNTimes(' ', level + half); - try w.writeAll("lhs:\n"); - try tree.dumpNode(tree.data[data.range.start], level + delta, mapper, config, w); + .call_expr, .call_expr_one => { + const child_nodes = tree.childNodes(node); + const fn_ptr = child_nodes[0]; + const args = child_nodes[1..]; try w.writeByteNTimes(' ', level + half); - try w.writeAll("args:\n"); - for (tree.data[data.range.start + 1 .. data.range.end]) |arg| try tree.dumpNode(arg, level + delta, mapper, config, w); - }, - .call_expr_one => { - try w.writeByteNTimes(' ', level + half); try w.writeAll("lhs:\n"); - try tree.dumpNode(data.bin.lhs, level + delta, mapper, config, w); - if (data.bin.rhs != .none) { + try tree.dumpNode(fn_ptr, level + delta, mapper, config, w); + + if (args.len > 0) { try w.writeByteNTimes(' ', level + half); - try w.writeAll("arg:\n"); - try tree.dumpNode(data.bin.rhs, level + delta, mapper, config, w); + try w.writeAll("args:\n"); + for (args) |arg| { + try tree.dumpNode(arg, level + delta, mapper, config, w); + } } }, .builtin_call_expr => { @@ -1295,28 +1394,25 @@ fn dumpNode( try tree.dumpNode(data.un, level + delta, mapper, config, w); } }, - .generic_expr_one => { - try w.writeByteNTimes(' ', level + 1); - try w.writeAll("controlling:\n"); - try tree.dumpNode(data.bin.lhs, level + delta, mapper, config, w); - try w.writeByteNTimes(' ', level + 1); - if (data.bin.rhs != .none) { - try w.writeAll("chosen:\n"); - try tree.dumpNode(data.bin.rhs, level + delta, mapper, config, w); - } - }, - .generic_expr => { - const nodes = tree.data[data.range.start..data.range.end]; + .generic_expr, .generic_expr_one => { + const child_nodes = tree.childNodes(node); + const controlling = child_nodes[0]; + const chosen = child_nodes[1]; + const rest = child_nodes[2..]; + try w.writeByteNTimes(' ', level + 1); try w.writeAll("controlling:\n"); - try tree.dumpNode(nodes[0], level + delta, mapper, config, w); + try tree.dumpNode(controlling, level + delta, mapper, config, w); try w.writeByteNTimes(' ', level + 1); try w.writeAll("chosen:\n"); - try tree.dumpNode(nodes[1], level + delta, mapper, config, w); - try w.writeByteNTimes(' ', level + 1); - try w.writeAll("rest:\n"); - for (nodes[2..]) |expr| { - try tree.dumpNode(expr, level + delta, mapper, config, w); + try tree.dumpNode(chosen, level + delta, mapper, config, w); + + if (rest.len > 0) { + try w.writeByteNTimes(' ', level + 1); + try w.writeAll("rest:\n"); + for (rest) |expr| { + try tree.dumpNode(expr, level + delta, mapper, config, w); + } } }, .generic_association_expr, .generic_default_expr, .stmt_expr, .imaginary_literal => { diff --git a/lib/compiler/aro/aro/Tree/number_affixes.zig b/lib/compiler/aro/aro/Tree/number_affixes.zig index 7f01e9f2e7..38ef6b8a56 100644 --- a/lib/compiler/aro/aro/Tree/number_affixes.zig +++ b/lib/compiler/aro/aro/Tree/number_affixes.zig @@ -74,8 +74,8 @@ pub const Suffix = enum { // float and imaginary float F, IF, - // _Float16 - F16, + // _Float16 and imaginary _Float16 + F16, IF16, // __float80 W, @@ -129,6 +129,7 @@ pub const Suffix = enum { .{ .I, &.{"I"} }, .{ .IL, &.{ "I", "L" } }, + .{ .IF16, &.{ "I", "F16" } }, .{ .IF, &.{ "I", "F" } }, .{ .IW, &.{ "I", "W" } }, .{ .IF128, &.{ "I", "F128" } }, @@ -161,7 +162,7 @@ pub const Suffix = enum { pub fn isImaginary(suffix: Suffix) bool { return switch (suffix) { - .I, .IL, .IF, .IU, .IUL, .ILL, .IULL, .IWB, .IUWB, .IF128, .IQ, .IW => true, + .I, .IL, .IF, .IU, .IUL, .ILL, .IULL, .IWB, .IUWB, .IF128, .IQ, .IW, .IF16 => true, .None, .L, .F16, .F, .U, .UL, .LL, .ULL, .WB, .UWB, .F128, .Q, .W => false, }; } @@ -170,7 +171,7 @@ pub const Suffix = enum { return switch (suffix) { .None, .L, .LL, .I, .IL, .ILL, .WB, .IWB => true, .U, .UL, .ULL, .IU, .IUL, .IULL, .UWB, .IUWB => false, - .F, .IF, .F16, .F128, .IF128, .Q, .IQ, .W, .IW => unreachable, + .F, .IF, .F16, .F128, .IF128, .Q, .IQ, .W, .IW, .IF16 => unreachable, }; } @@ -184,4 +185,8 @@ pub const Suffix = enum { else => false, }; } + + pub fn isFloat80(suffix: Suffix) bool { + return suffix == .W or suffix == .IW; + } }; diff --git a/lib/compiler/aro/aro/Type.zig b/lib/compiler/aro/aro/Type.zig index 13fa8ce2e2..8ab2d3164a 100644 --- a/lib/compiler/aro/aro/Type.zig +++ b/lib/compiler/aro/aro/Type.zig @@ -146,17 +146,14 @@ pub const Attributed = struct { attributes: []Attribute, base: Type, - pub fn create(allocator: std.mem.Allocator, base: Type, existing_attributes: []const Attribute, attributes: []const Attribute) !*Attributed { + pub fn create(allocator: std.mem.Allocator, base_ty: Type, attributes: []const Attribute) !*Attributed { const attributed_type = try allocator.create(Attributed); errdefer allocator.destroy(attributed_type); - - const all_attrs = try allocator.alloc(Attribute, existing_attributes.len + attributes.len); - @memcpy(all_attrs[0..existing_attributes.len], existing_attributes); - @memcpy(all_attrs[existing_attributes.len..], attributes); + const duped = try allocator.dupe(Attribute, attributes); attributed_type.* = .{ - .attributes = all_attrs, - .base = base, + .attributes = duped, + .base = base_ty, }; return attributed_type; } @@ -190,13 +187,10 @@ pub const Enum = struct { } }; -// might not need all 4 of these when finished, -// but currently it helps having all 4 when diff-ing -// the rust code. pub const TypeLayout = struct { /// The size of the type in bits. /// - /// This is the value returned by `sizeof` and C and `std::mem::size_of` in Rust + /// This is the value returned by `sizeof` in C /// (but in bits instead of bytes). This is a multiple of `pointer_alignment_bits`. size_bits: u64, /// The alignment of the type, in bits, when used as a field in a record. @@ -205,9 +199,7 @@ pub const TypeLayout = struct { /// cases in GCC where `_Alignof` returns a smaller value. field_alignment_bits: u32, /// The alignment, in bits, of valid pointers to this type. - /// - /// This is the value returned by `std::mem::align_of` in Rust - /// (but in bits instead of bytes). `size_bits` is a multiple of this value. + /// `size_bits` is a multiple of this value. pointer_alignment_bits: u32, /// The required alignment of the type in bits. /// @@ -301,6 +293,15 @@ pub const Record = struct { } return false; } + + pub fn hasField(self: *const Record, name: StringId) bool { + std.debug.assert(!self.isIncomplete()); + for (self.fields) |f| { + if (f.isAnonymousRecord() and f.ty.getRecord().?.hasField(name)) return true; + if (name == f.name) return true; + } + return false; + } }; pub const Specifier = enum { @@ -354,12 +355,11 @@ pub const Specifier = enum { float, double, long_double, - float80, float128, + complex_float16, complex_float, complex_double, complex_long_double, - complex_float80, complex_float128, // data.sub_type @@ -422,6 +422,8 @@ data: union { specifier: Specifier, qual: Qualifiers = .{}, decayed: bool = false, +/// typedef name, if any +name: StringId = .empty, pub const int = Type{ .specifier = .int }; pub const invalid = Type{ .specifier = .invalid }; @@ -435,8 +437,8 @@ pub fn is(ty: Type, specifier: Specifier) bool { pub fn withAttributes(self: Type, allocator: std.mem.Allocator, attributes: []const Attribute) !Type { if (attributes.len == 0) return self; - const attributed_type = try Type.Attributed.create(allocator, self, self.getAttributes(), attributes); - return Type{ .specifier = .attributed, .data = .{ .attributed = attributed_type }, .decayed = self.decayed }; + const attributed_type = try Type.Attributed.create(allocator, self, attributes); + return .{ .specifier = .attributed, .data = .{ .attributed = attributed_type }, .decayed = self.decayed }; } pub fn isCallable(ty: Type) ?Type { @@ -470,6 +472,23 @@ pub fn isArray(ty: Type) bool { }; } +/// Must only be used to set the length of an incomplete array as determined by its initializer +pub fn setIncompleteArrayLen(ty: *Type, len: u64) void { + switch (ty.specifier) { + .incomplete_array => { + // Modifying .data is exceptionally allowed for .incomplete_array. + ty.data.array.len = len; + ty.specifier = .array; + }, + + .typeof_type => ty.data.sub_type.setIncompleteArrayLen(len), + .typeof_expr => ty.data.expr.ty.setIncompleteArrayLen(len), + .attributed => ty.data.attributed.base.setIncompleteArrayLen(len), + + else => unreachable, + } +} + /// Whether the type is promoted if used as a variadic argument or as an argument to a function with no prototype fn undergoesDefaultArgPromotion(ty: Type, comp: *const Compilation) bool { return switch (ty.specifier) { @@ -536,7 +555,7 @@ pub fn isFloat(ty: Type) bool { return switch (ty.specifier) { // zig fmt: off .float, .double, .long_double, .complex_float, .complex_double, .complex_long_double, - .fp16, .float16, .float80, .float128, .complex_float80, .complex_float128 => true, + .fp16, .float16, .float128, .complex_float128, .complex_float16 => true, // zig fmt: on .typeof_type => ty.data.sub_type.isFloat(), .typeof_expr => ty.data.expr.ty.isFloat(), @@ -548,11 +567,11 @@ pub fn isFloat(ty: Type) bool { pub fn isReal(ty: Type) bool { return switch (ty.specifier) { // zig fmt: off - .complex_float, .complex_double, .complex_long_double, .complex_float80, + .complex_float, .complex_double, .complex_long_double, .complex_float128, .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128, - .complex_bit_int => false, + .complex_bit_int, .complex_float16 => false, // zig fmt: on .typeof_type => ty.data.sub_type.isReal(), .typeof_expr => ty.data.expr.ty.isReal(), @@ -564,11 +583,11 @@ pub fn isReal(ty: Type) bool { pub fn isComplex(ty: Type) bool { return switch (ty.specifier) { // zig fmt: off - .complex_float, .complex_double, .complex_long_double, .complex_float80, + .complex_float, .complex_double, .complex_long_double, .complex_float128, .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128, - .complex_bit_int => true, + .complex_bit_int, .complex_float16 => true, // zig fmt: on .typeof_type => ty.data.sub_type.isComplex(), .typeof_expr => ty.data.expr.ty.isComplex(), @@ -671,11 +690,11 @@ pub fn elemType(ty: Type) Type { .attributed => ty.data.attributed.base.elemType(), .invalid => Type.invalid, // zig fmt: off - .complex_float, .complex_double, .complex_long_double, .complex_float80, + .complex_float, .complex_double, .complex_long_double, .complex_float128, .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128, - .complex_bit_int => ty.makeReal(), + .complex_bit_int, .complex_float16 => ty.makeReal(), // zig fmt: on else => unreachable, }; @@ -703,6 +722,16 @@ pub fn params(ty: Type) []Func.Param { }; } +/// Returns true if the return value or any param of `ty` is `.invalid` +/// Asserts that ty is a function type +pub fn isInvalidFunc(ty: Type) bool { + if (ty.returnType().is(.invalid)) return true; + for (ty.params()) |param| { + if (param.ty.is(.invalid)) return true; + } + return false; +} + pub fn arrayLen(ty: Type) ?u64 { return switch (ty.specifier) { .array, .static_array => ty.data.array.len, @@ -726,15 +755,6 @@ pub fn anyQual(ty: Type) bool { }; } -pub fn getAttributes(ty: Type) []const Attribute { - return switch (ty.specifier) { - .attributed => ty.data.attributed.attributes, - .typeof_type => ty.data.sub_type.getAttributes(), - .typeof_expr => ty.data.expr.ty.getAttributes(), - else => &.{}, - }; -} - pub fn getRecord(ty: Type) ?*const Type.Record { return switch (ty.specifier) { .attributed => ty.data.attributed.base.getRecord(), @@ -795,8 +815,8 @@ fn realIntegerConversion(a: Type, b: Type, comp: *const Compilation) Type { pub fn makeIntegerUnsigned(ty: Type) Type { // TODO discards attributed/typeof - var base = ty.canonicalize(.standard); - switch (base.specifier) { + var base_ty = ty.canonicalize(.standard); + switch (base_ty.specifier) { // zig fmt: off .uchar, .ushort, .uint, .ulong, .ulong_long, .uint128, .complex_uchar, .complex_ushort, .complex_uint, .complex_ulong, .complex_ulong_long, .complex_uint128, @@ -804,21 +824,21 @@ pub fn makeIntegerUnsigned(ty: Type) Type { // zig fmt: on .char, .complex_char => { - base.specifier = @enumFromInt(@intFromEnum(base.specifier) + 2); - return base; + base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) + 2); + return base_ty; }, // zig fmt: off .schar, .short, .int, .long, .long_long, .int128, .complex_schar, .complex_short, .complex_int, .complex_long, .complex_long_long, .complex_int128 => { - base.specifier = @enumFromInt(@intFromEnum(base.specifier) + 1); - return base; + base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) + 1); + return base_ty; }, // zig fmt: on .bit_int, .complex_bit_int => { - base.data.int.signedness = .unsigned; - return base; + base_ty.data.int.signedness = .unsigned; + return base_ty; }, else => unreachable, } @@ -837,6 +857,8 @@ pub fn integerPromotion(ty: Type, comp: *Compilation) Type { switch (specifier) { .@"enum" => { if (ty.hasIncompleteSize()) return .{ .specifier = .int }; + if (ty.data.@"enum".fixed) return ty.data.@"enum".tag_ty.integerPromotion(comp); + specifier = ty.data.@"enum".tag_ty.specifier; }, .bit_int, .complex_bit_int => return .{ .specifier = specifier, .data = ty.data }, @@ -915,53 +937,7 @@ pub fn hasUnboundVLA(ty: Type) bool { } pub fn hasField(ty: Type, name: StringId) bool { - switch (ty.specifier) { - .@"struct" => { - std.debug.assert(!ty.data.record.isIncomplete()); - for (ty.data.record.fields) |f| { - if (f.isAnonymousRecord() and f.ty.hasField(name)) return true; - if (name == f.name) return true; - } - }, - .@"union" => { - std.debug.assert(!ty.data.record.isIncomplete()); - for (ty.data.record.fields) |f| { - if (f.isAnonymousRecord() and f.ty.hasField(name)) return true; - if (name == f.name) return true; - } - }, - .typeof_type => return ty.data.sub_type.hasField(name), - .typeof_expr => return ty.data.expr.ty.hasField(name), - .attributed => return ty.data.attributed.base.hasField(name), - .invalid => return false, - else => unreachable, - } - return false; -} - -// TODO handle bitints -pub fn minInt(ty: Type, comp: *const Compilation) i64 { - std.debug.assert(ty.isInt()); - if (ty.isUnsignedInt(comp)) return 0; - return switch (ty.sizeof(comp).?) { - 1 => std.math.minInt(i8), - 2 => std.math.minInt(i16), - 4 => std.math.minInt(i32), - 8 => std.math.minInt(i64), - else => unreachable, - }; -} - -// TODO handle bitints -pub fn maxInt(ty: Type, comp: *const Compilation) u64 { - std.debug.assert(ty.isInt()); - return switch (ty.sizeof(comp).?) { - 1 => if (ty.isUnsignedInt(comp)) @as(u64, std.math.maxInt(u8)) else std.math.maxInt(i8), - 2 => if (ty.isUnsignedInt(comp)) @as(u64, std.math.maxInt(u16)) else std.math.maxInt(i16), - 4 => if (ty.isUnsignedInt(comp)) @as(u64, std.math.maxInt(u32)) else std.math.maxInt(i32), - 8 => if (ty.isUnsignedInt(comp)) @as(u64, std.math.maxInt(u64)) else std.math.maxInt(i64), - else => unreachable, - }; + return ty.getRecord().?.hasField(name); } const TypeSizeOrder = enum { @@ -1004,16 +980,15 @@ pub fn sizeof(ty: Type, comp: *const Compilation) ?u64 { .fp16, .float16 => 2, .float => comp.target.cTypeByteSize(.float), .double => comp.target.cTypeByteSize(.double), - .float80 => 16, .float128 => 16, .bit_int => { - return std.mem.alignForward(u64, (ty.data.int.bits + 7) / 8, ty.alignof(comp)); + return std.mem.alignForward(u64, (@as(u32, ty.data.int.bits) + 7) / 8, ty.alignof(comp)); }, // zig fmt: off .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128, .complex_float, .complex_double, - .complex_long_double, .complex_float80, .complex_float128, .complex_bit_int, + .complex_long_double, .complex_float128, .complex_bit_int, .complex_float16, => return 2 * ty.makeReal().sizeof(comp).?, // zig fmt: on .pointer => unreachable, @@ -1050,7 +1025,6 @@ pub fn bitSizeof(ty: Type, comp: *const Compilation) ?u64 { .attributed => ty.data.attributed.base.bitSizeof(comp), .bit_int => return ty.data.int.bits, .long_double => comp.target.cTypeBitSize(.longdouble), - .float80 => return 80, else => 8 * (ty.sizeof(comp) orelse return null), }; } @@ -1100,7 +1074,7 @@ pub fn alignof(ty: Type, comp: *const Compilation) u29 { .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128, .complex_float, .complex_double, - .complex_long_double, .complex_float80, .complex_float128, .complex_bit_int, + .complex_long_double, .complex_float128, .complex_bit_int, .complex_float16, => return ty.makeReal().alignof(comp), // zig fmt: on @@ -1114,10 +1088,15 @@ pub fn alignof(ty: Type, comp: *const Compilation) u29 { .long_long => comp.target.cTypeAlignment(.longlong), .ulong_long => comp.target.cTypeAlignment(.ulonglong), - .bit_int => @min( - std.math.ceilPowerOfTwoPromote(u16, (ty.data.int.bits + 7) / 8), - 16, // comp.target.maxIntAlignment(), please use your own logic for this value as it is implementation-defined - ), + .bit_int => { + // https://www.open-std.org/jtc1/sc22/wg14/www/docs/n2709.pdf + // _BitInt(N) types align with existing calling conventions. They have the same size and alignment as the + // smallest basic type that can contain them. Types that are larger than __int64_t are conceptually treated + // as struct of register size chunks. The number of chunks is the smallest number that can contain the type. + if (ty.data.int.bits > 64) return 8; + const basic_type = comp.intLeastN(ty.data.int.bits, ty.data.int.signedness); + return basic_type.alignof(comp); + }, .float => comp.target.cTypeAlignment(.float), .double => comp.target.cTypeAlignment(.double), @@ -1126,7 +1105,7 @@ pub fn alignof(ty: Type, comp: *const Compilation) u29 { .int128, .uint128 => if (comp.target.cpu.arch == .s390x and comp.target.os.tag == .linux and comp.target.isGnu()) 8 else 16, .fp16, .float16 => 2, - .float80, .float128 => 16, + .float128 => 16, .pointer, .static_array, .nullptr_t, @@ -1142,7 +1121,11 @@ pub fn alignof(ty: Type, comp: *const Compilation) u29 { }; } -pub const QualHandling = enum { standard, preserve_quals }; +// This enum should be kept public because it is used by the downstream zig translate-c +pub const QualHandling = enum { + standard, + preserve_quals, +}; /// Canonicalize a possibly-typeof() type. If the type is not a typeof() type, simply /// return it. Otherwise, determine the actual qualified type. @@ -1151,17 +1134,12 @@ pub const QualHandling = enum { standard, preserve_quals }; /// arrays and pointers. pub fn canonicalize(ty: Type, qual_handling: QualHandling) Type { var cur = ty; - if (cur.specifier == .attributed) { - cur = cur.data.attributed.base; - cur.decayed = ty.decayed; - } - if (!cur.isTypeof()) return cur; - var qual = cur.qual; while (true) { switch (cur.specifier) { .typeof_type => cur = cur.data.sub_type.*, .typeof_expr => cur = cur.data.expr.ty, + .attributed => cur = cur.data.attributed.base, else => break, } qual = qual.mergeAll(cur.qual); @@ -1189,7 +1167,7 @@ pub fn requestedAlignment(ty: Type, comp: *const Compilation) ?u29 { return switch (ty.specifier) { .typeof_type => ty.data.sub_type.requestedAlignment(comp), .typeof_expr => ty.data.expr.ty.requestedAlignment(comp), - .attributed => annotationAlignment(comp, ty.data.attributed.attributes), + .attributed => annotationAlignment(comp, Attribute.Iterator.initType(ty)), else => null, }; } @@ -1199,12 +1177,27 @@ pub fn enumIsPacked(ty: Type, comp: *const Compilation) bool { return comp.langopts.short_enums or target_util.packAllEnums(comp.target) or ty.hasAttribute(.@"packed"); } -pub fn annotationAlignment(comp: *const Compilation, attrs: ?[]const Attribute) ?u29 { - const a = attrs orelse return null; +pub fn getName(ty: Type) StringId { + return switch (ty.specifier) { + .typeof_type => if (ty.name == .empty) ty.data.sub_type.getName() else ty.name, + .typeof_expr => if (ty.name == .empty) ty.data.expr.ty.getName() else ty.name, + .attributed => if (ty.name == .empty) ty.data.attributed.base.getName() else ty.name, + else => ty.name, + }; +} +pub fn annotationAlignment(comp: *const Compilation, attrs: Attribute.Iterator) ?u29 { + var it = attrs; var max_requested: ?u29 = null; - for (a) |attribute| { + var last_aligned_index: ?usize = null; + while (it.next()) |item| { + const attribute, const index = item; if (attribute.tag != .aligned) continue; + if (last_aligned_index) |aligned_index| { + // once we recurse into a new type, after an `aligned` attribute was found, we're done + if (index <= aligned_index) break; + } + last_aligned_index = index; const requested = if (attribute.args.aligned.alignment) |alignment| alignment.requested else target_util.defaultAlignment(comp.target); if (max_requested == null or max_requested.? < requested) { max_requested = requested; @@ -1225,6 +1218,10 @@ pub fn eql(a_param: Type, b_param: Type, comp: *const Compilation, check_qualifi if (!b.isFunc()) return false; } else if (a.isArray()) { if (!b.isArray()) return false; + } else if (a.specifier == .@"enum" and b.specifier != .@"enum") { + return a.data.@"enum".tag_ty.eql(b, comp, check_qualifiers); + } else if (b.specifier == .@"enum" and a.specifier != .@"enum") { + return a.eql(b.data.@"enum".tag_ty, comp, check_qualifiers); } else if (a.specifier != b.specifier) return false; if (a.qual.atomic != b.qual.atomic) return false; @@ -1315,6 +1312,12 @@ pub fn integerRank(ty: Type, comp: *const Compilation) usize { .long_long, .ulong_long => 6 + (ty.bitSizeof(comp).? << 3), .int128, .uint128 => 7 + (ty.bitSizeof(comp).? << 3), + .typeof_type => ty.data.sub_type.integerRank(comp), + .typeof_expr => ty.data.expr.ty.integerRank(comp), + .attributed => ty.data.attributed.base.integerRank(comp), + + .@"enum" => real.data.@"enum".tag_ty.integerRank(comp), + else => unreachable, }); } @@ -1322,25 +1325,26 @@ pub fn integerRank(ty: Type, comp: *const Compilation) usize { /// Returns true if `a` and `b` are integer types that differ only in sign pub fn sameRankDifferentSign(a: Type, b: Type, comp: *const Compilation) bool { if (!a.isInt() or !b.isInt()) return false; + if (a.hasIncompleteSize() or b.hasIncompleteSize()) return false; if (a.integerRank(comp) != b.integerRank(comp)) return false; return a.isUnsignedInt(comp) != b.isUnsignedInt(comp); } pub fn makeReal(ty: Type) Type { // TODO discards attributed/typeof - var base = ty.canonicalize(.standard); - switch (base.specifier) { - .complex_float, .complex_double, .complex_long_double, .complex_float80, .complex_float128 => { - base.specifier = @enumFromInt(@intFromEnum(base.specifier) - 5); - return base; + var base_ty = ty.canonicalize(.standard); + switch (base_ty.specifier) { + .complex_float16, .complex_float, .complex_double, .complex_long_double, .complex_float128 => { + base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) - 5); + return base_ty; }, .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128 => { - base.specifier = @enumFromInt(@intFromEnum(base.specifier) - 13); - return base; + base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) - 13); + return base_ty; }, .complex_bit_int => { - base.specifier = .bit_int; - return base; + base_ty.specifier = .bit_int; + return base_ty; }, else => return ty, } @@ -1348,19 +1352,19 @@ pub fn makeReal(ty: Type) Type { pub fn makeComplex(ty: Type) Type { // TODO discards attributed/typeof - var base = ty.canonicalize(.standard); - switch (base.specifier) { - .float, .double, .long_double, .float80, .float128 => { - base.specifier = @enumFromInt(@intFromEnum(base.specifier) + 5); - return base; + var base_ty = ty.canonicalize(.standard); + switch (base_ty.specifier) { + .float, .double, .long_double, .float128 => { + base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) + 5); + return base_ty; }, .char, .schar, .uchar, .short, .ushort, .int, .uint, .long, .ulong, .long_long, .ulong_long, .int128, .uint128 => { - base.specifier = @enumFromInt(@intFromEnum(base.specifier) + 13); - return base; + base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) + 13); + return base_ty; }, .bit_int => { - base.specifier = .complex_bit_int; - return base; + base_ty.specifier = .complex_bit_int; + return base_ty; }, else => return ty, } @@ -1541,13 +1545,12 @@ pub const Builder = struct { float, double, long_double, - float80, float128, complex, + complex_float16, complex_float, complex_double, complex_long_double, - complex_float80, complex_float128, pointer: *Type, @@ -1613,9 +1616,6 @@ pub const Builder = struct { .int128 => "__int128", .sint128 => "signed __int128", .uint128 => "unsigned __int128", - .bit_int => "_BitInt", - .sbit_int => "signed _BitInt", - .ubit_int => "unsigned _BitInt", .complex_char => "_Complex char", .complex_schar => "_Complex signed char", .complex_uchar => "_Complex unsigned char", @@ -1645,22 +1645,18 @@ pub const Builder = struct { .complex_int128 => "_Complex __int128", .complex_sint128 => "_Complex signed __int128", .complex_uint128 => "_Complex unsigned __int128", - .complex_bit_int => "_Complex _BitInt", - .complex_sbit_int => "_Complex signed _BitInt", - .complex_ubit_int => "_Complex unsigned _BitInt", .fp16 => "__fp16", .float16 => "_Float16", .float => "float", .double => "double", .long_double => "long double", - .float80 => "__float80", .float128 => "__float128", .complex => "_Complex", + .complex_float16 => "_Complex _Float16", .complex_float => "_Complex float", .complex_double => "_Complex double", .complex_long_double => "_Complex long double", - .complex_float80 => "_Complex __float80", .complex_float128 => "_Complex __float128", .attributed => |attributed| Builder.fromType(attributed.base).str(langopts), @@ -1757,19 +1753,20 @@ pub const Builder = struct { .complex_uint128 => ty.specifier = .complex_uint128, .bit_int, .sbit_int, .ubit_int, .complex_bit_int, .complex_ubit_int, .complex_sbit_int => |bits| { const unsigned = b.specifier == .ubit_int or b.specifier == .complex_ubit_int; + const complex_str = if (b.complex_tok != null) "_Complex " else ""; if (unsigned) { if (bits < 1) { - try p.errStr(.unsigned_bit_int_too_small, b.bit_int_tok.?, b.specifier.str(p.comp.langopts).?); + try p.errStr(.unsigned_bit_int_too_small, b.bit_int_tok.?, complex_str); return Type.invalid; } } else { if (bits < 2) { - try p.errStr(.signed_bit_int_too_small, b.bit_int_tok.?, b.specifier.str(p.comp.langopts).?); + try p.errStr(.signed_bit_int_too_small, b.bit_int_tok.?, complex_str); return Type.invalid; } } if (bits > Compilation.bit_int_max_bits) { - try p.errStr(.bit_int_too_big, b.bit_int_tok.?, b.specifier.str(p.comp.langopts).?); + try p.errStr(if (unsigned) .unsigned_bit_int_too_big else .signed_bit_int_too_big, b.bit_int_tok.?, complex_str); return Type.invalid; } ty.specifier = if (b.complex_tok != null) .complex_bit_int else .bit_int; @@ -1784,12 +1781,11 @@ pub const Builder = struct { .float => ty.specifier = .float, .double => ty.specifier = .double, .long_double => ty.specifier = .long_double, - .float80 => ty.specifier = .float80, .float128 => ty.specifier = .float128, + .complex_float16 => ty.specifier = .complex_float16, .complex_float => ty.specifier = .complex_float, .complex_double => ty.specifier = .complex_double, .complex_long_double => ty.specifier = .complex_long_double, - .complex_float80 => ty.specifier = .complex_float80, .complex_float128 => ty.specifier = .complex_float128, .complex => { try p.errTok(.plain_complex, p.tok_i - 1); @@ -1907,6 +1903,7 @@ pub const Builder = struct { /// Try to combine type from typedef, returns true if successful. pub fn combineTypedef(b: *Builder, p: *Parser, typedef_ty: Type, name_tok: TokenIndex) bool { + if (typedef_ty.is(.invalid)) return false; b.error_on_invalid = true; defer b.error_on_invalid = false; @@ -2094,6 +2091,7 @@ pub const Builder = struct { }, .long => b.specifier = switch (b.specifier) { .none => .long, + .double => .long_double, .long => .long_long, .unsigned => .ulong, .signed => .long, @@ -2106,6 +2104,7 @@ pub const Builder = struct { .complex_long => .complex_long_long, .complex_slong => .complex_slong_long, .complex_ulong => .complex_ulong_long, + .complex_double => .complex_long_double, else => return b.cannotCombine(p, source_tok), }, .int128 => b.specifier = switch (b.specifier) { @@ -2140,6 +2139,7 @@ pub const Builder = struct { }, .float16 => b.specifier = switch (b.specifier) { .none => .float16, + .complex => .complex_float16, else => return b.cannotCombine(p, source_tok), }, .float => b.specifier = switch (b.specifier) { @@ -2154,11 +2154,6 @@ pub const Builder = struct { .complex => .complex_double, else => return b.cannotCombine(p, source_tok), }, - .float80 => b.specifier = switch (b.specifier) { - .none => .float80, - .complex => .complex_float80, - else => return b.cannotCombine(p, source_tok), - }, .float128 => b.specifier = switch (b.specifier) { .none => .float128, .complex => .complex_float128, @@ -2166,10 +2161,10 @@ pub const Builder = struct { }, .complex => b.specifier = switch (b.specifier) { .none => .complex, + .float16 => .complex_float16, .float => .complex_float, .double => .complex_double, .long_double => .complex_long_double, - .float80 => .complex_float80, .float128 => .complex_float128, .char => .complex_char, .schar => .complex_schar, @@ -2207,7 +2202,6 @@ pub const Builder = struct { .complex_float, .complex_double, .complex_long_double, - .complex_float80, .complex_float128, .complex_char, .complex_schar, @@ -2294,13 +2288,12 @@ pub const Builder = struct { .float16 => .float16, .float => .float, .double => .double, - .float80 => .float80, .float128 => .float128, .long_double => .long_double, + .complex_float16 => .complex_float16, .complex_float => .complex_float, .complex_double => .complex_double, .complex_long_double => .complex_long_double, - .complex_float80 => .complex_float80, .complex_float128 => .complex_float128, .pointer => .{ .pointer = ty.data.sub_type }, @@ -2350,22 +2343,30 @@ pub const Builder = struct { } }; +/// Use with caution +pub fn base(ty: *Type) *Type { + return switch (ty.specifier) { + .typeof_type => ty.data.sub_type.base(), + .typeof_expr => ty.data.expr.ty.base(), + .attributed => ty.data.attributed.base.base(), + else => ty, + }; +} + pub fn getAttribute(ty: Type, comptime tag: Attribute.Tag) ?Attribute.ArgumentsForTag(tag) { - switch (ty.specifier) { - .typeof_type => return ty.data.sub_type.getAttribute(tag), - .typeof_expr => return ty.data.expr.ty.getAttribute(tag), - .attributed => { - for (ty.data.attributed.attributes) |attribute| { - if (attribute.tag == tag) return @field(attribute.args, @tagName(tag)); - } - return null; - }, - else => return null, + if (tag == .aligned) @compileError("use requestedAlignment"); + var it = Attribute.Iterator.initType(ty); + while (it.next()) |item| { + const attribute, _ = item; + if (attribute.tag == tag) return @field(attribute.args, @tagName(tag)); } + return null; } pub fn hasAttribute(ty: Type, tag: Attribute.Tag) bool { - for (ty.getAttributes()) |attr| { + var it = Attribute.Iterator.initType(ty); + while (it.next()) |item| { + const attr, _ = item; if (attr.tag == tag) return true; } return false; @@ -2489,6 +2490,8 @@ fn printPrologue(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts _ = try elem_ty.printPrologue(mapper, langopts, w); try w.writeAll("' values)"); }, + .bit_int => try w.print("{s} _BitInt({d})", .{ @tagName(ty.data.int.signedness), ty.data.int.bits }), + .complex_bit_int => try w.print("_Complex {s} _BitInt({d})", .{ @tagName(ty.data.int.signedness), ty.data.int.bits }), else => try w.writeAll(Builder.fromType(ty).str(langopts).?), } return true; @@ -2644,15 +2647,12 @@ pub fn dump(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: .attributed => { if (ty.isDecayed()) try w.writeAll("*d:"); try w.writeAll("attributed("); - try ty.data.attributed.base.dump(mapper, langopts, w); + try ty.data.attributed.base.canonicalize(.standard).dump(mapper, langopts, w); try w.writeAll(")"); }, - else => { - try w.writeAll(Builder.fromType(ty).str(langopts).?); - if (ty.specifier == .bit_int or ty.specifier == .complex_bit_int) { - try w.print("({d})", .{ty.data.int.bits}); - } - }, + .bit_int => try w.print("{s} _BitInt({d})", .{ @tagName(ty.data.int.signedness), ty.data.int.bits }), + .complex_bit_int => try w.print("_Complex {s} _BitInt({d})", .{ @tagName(ty.data.int.signedness), ty.data.int.bits }), + else => try w.writeAll(Builder.fromType(ty).str(langopts).?), } } diff --git a/lib/compiler/aro/aro/Value.zig b/lib/compiler/aro/aro/Value.zig index 2dd9a86abe..892a09b1d6 100644 --- a/lib/compiler/aro/aro/Value.zig +++ b/lib/compiler/aro/aro/Value.zig @@ -8,6 +8,7 @@ const BigIntSpace = Interner.Tag.Int.BigIntSpace; const Compilation = @import("Compilation.zig"); const Type = @import("Type.zig"); const target_util = @import("target.zig"); +const annex_g = @import("annex_g.zig"); const Value = @This(); @@ -41,6 +42,14 @@ pub fn is(v: Value, tag: std.meta.Tag(Interner.Key), comp: *const Compilation) b return comp.interner.get(v.ref()) == tag; } +pub fn isArithmetic(v: Value, comp: *const Compilation) bool { + if (v.opt_ref == .none) return false; + return switch (comp.interner.get(v.ref())) { + .int, .float, .complex => true, + else => false, + }; +} + /// Number of bits needed to hold `v`. /// Asserts that `v` is not negative pub fn minUnsignedBits(v: Value, comp: *const Compilation) usize { @@ -58,7 +67,7 @@ test "minUnsignedBits" { } }; - var comp = Compilation.init(std.testing.allocator); + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" }); comp.target = try std.zig.system.resolveTargetQuery(target_query); @@ -93,7 +102,7 @@ test "minSignedBits" { } }; - var comp = Compilation.init(std.testing.allocator); + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" }); comp.target = try std.zig.system.resolveTargetQuery(target_query); @@ -134,7 +143,7 @@ pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChang v.* = fromBool(!was_zero); if (was_zero or was_one) return .none; return .value_changed; - } else if (dest_ty.isUnsignedInt(comp) and v.compare(.lt, zero, comp)) { + } else if (dest_ty.isUnsignedInt(comp) and float_val < 0) { v.* = zero; return .out_of_range; } @@ -154,7 +163,7 @@ pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChang }; // The float is reduced in rational.setFloat, so we assert that denominator is equal to one - const big_one = std.math.big.int.Const{ .limbs = &.{1}, .positive = true }; + const big_one = BigIntConst{ .limbs = &.{1}, .positive = true }; assert(rational.q.toConst().eqlAbs(big_one)); if (is_negative) { @@ -179,6 +188,20 @@ pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChang /// `.none` value remains unchanged. pub fn intToFloat(v: *Value, dest_ty: Type, comp: *Compilation) !void { if (v.opt_ref == .none) return; + + if (dest_ty.isComplex()) { + const bits = dest_ty.bitSizeof(comp).?; + const cf: Interner.Key.Complex = switch (bits) { + 32 => .{ .cf16 = .{ v.toFloat(f16, comp), 0 } }, + 64 => .{ .cf32 = .{ v.toFloat(f32, comp), 0 } }, + 128 => .{ .cf64 = .{ v.toFloat(f64, comp), 0 } }, + 160 => .{ .cf80 = .{ v.toFloat(f80, comp), 0 } }, + 256 => .{ .cf128 = .{ v.toFloat(f128, comp), 0 } }, + else => unreachable, + }; + v.* = try intern(comp, .{ .complex = cf }); + return; + } const bits = dest_ty.bitSizeof(comp).?; return switch (comp.interner.get(v.ref()).int) { inline .u64, .i64 => |data| { @@ -207,40 +230,89 @@ pub fn intToFloat(v: *Value, dest_ty: Type, comp: *Compilation) !void { }; } +pub const IntCastChangeKind = enum { + /// value did not change + none, + /// Truncation occurred (e.g., i32 to i16) + truncated, + /// Sign conversion occurred (e.g., i32 to u32) + sign_changed, +}; + /// Truncates or extends bits based on type. /// `.none` value remains unchanged. -pub fn intCast(v: *Value, dest_ty: Type, comp: *Compilation) !void { - if (v.opt_ref == .none) return; - const bits: usize = @intCast(dest_ty.bitSizeof(comp).?); +pub fn intCast(v: *Value, dest_ty: Type, comp: *Compilation) !IntCastChangeKind { + if (v.opt_ref == .none) return .none; + + const dest_bits: usize = @intCast(dest_ty.bitSizeof(comp).?); + const dest_signed = dest_ty.signedness(comp) == .signed; + var space: BigIntSpace = undefined; const big = v.toBigInt(&space, comp); + const value_bits = big.bitCountTwosComp(); + + // if big is negative, then is signed. + const src_signed = !big.positive; + const sign_change = src_signed != dest_signed; const limbs = try comp.gpa.alloc( std.math.big.Limb, - std.math.big.int.calcTwosCompLimbCount(@max(big.bitCountTwosComp(), bits)), + std.math.big.int.calcTwosCompLimbCount(@max(value_bits, dest_bits)), ); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - result_bigint.truncate(big, dest_ty.signedness(comp), bits); + + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + result_bigint.truncate(big, dest_ty.signedness(comp), dest_bits); v.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); + + const truncation_occurred = value_bits > dest_bits; + if (truncation_occurred) { + return .truncated; + } else if (sign_change) { + return .sign_changed; + } else { + return .none; + } } /// Converts the stored value to a float of the specified type /// `.none` value remains unchanged. pub fn floatCast(v: *Value, dest_ty: Type, comp: *Compilation) !void { if (v.opt_ref == .none) return; - // TODO complex values - const bits = dest_ty.makeReal().bitSizeof(comp).?; - const f: Interner.Key.Float = switch (bits) { - 16 => .{ .f16 = v.toFloat(f16, comp) }, - 32 => .{ .f32 = v.toFloat(f32, comp) }, - 64 => .{ .f64 = v.toFloat(f64, comp) }, - 80 => .{ .f80 = v.toFloat(f80, comp) }, - 128 => .{ .f128 = v.toFloat(f128, comp) }, + const bits = dest_ty.bitSizeof(comp).?; + if (dest_ty.isComplex()) { + const cf: Interner.Key.Complex = switch (bits) { + 32 => .{ .cf16 = .{ v.toFloat(f16, comp), v.imag(f16, comp) } }, + 64 => .{ .cf32 = .{ v.toFloat(f32, comp), v.imag(f32, comp) } }, + 128 => .{ .cf64 = .{ v.toFloat(f64, comp), v.imag(f64, comp) } }, + 160 => .{ .cf80 = .{ v.toFloat(f80, comp), v.imag(f80, comp) } }, + 256 => .{ .cf128 = .{ v.toFloat(f128, comp), v.imag(f128, comp) } }, + else => unreachable, + }; + v.* = try intern(comp, .{ .complex = cf }); + } else { + const f: Interner.Key.Float = switch (bits) { + 16 => .{ .f16 = v.toFloat(f16, comp) }, + 32 => .{ .f32 = v.toFloat(f32, comp) }, + 64 => .{ .f64 = v.toFloat(f64, comp) }, + 80 => .{ .f80 = v.toFloat(f80, comp) }, + 128 => .{ .f128 = v.toFloat(f128, comp) }, + else => unreachable, + }; + v.* = try intern(comp, .{ .float = f }); + } +} + +pub fn imag(v: Value, comptime T: type, comp: *const Compilation) T { + return switch (comp.interner.get(v.ref())) { + .int => 0.0, + .float => 0.0, + .complex => |repr| switch (repr) { + inline else => |components| return @floatCast(components[1]), + }, else => unreachable, }; - v.* = try intern(comp, .{ .float = f }); } pub fn toFloat(v: Value, comptime T: type, comp: *const Compilation) T { @@ -252,6 +324,39 @@ pub fn toFloat(v: Value, comptime T: type, comp: *const Compilation) T { .float => |repr| switch (repr) { inline else => |data| @floatCast(data), }, + .complex => |repr| switch (repr) { + inline else => |components| @floatCast(components[0]), + }, + else => unreachable, + }; +} + +pub fn realPart(v: Value, comp: *Compilation) !Value { + if (v.opt_ref == .none) return v; + return switch (comp.interner.get(v.ref())) { + .int, .float => v, + .complex => |repr| Value.intern(comp, switch (repr) { + .cf16 => |components| .{ .float = .{ .f16 = components[0] } }, + .cf32 => |components| .{ .float = .{ .f32 = components[0] } }, + .cf64 => |components| .{ .float = .{ .f64 = components[0] } }, + .cf80 => |components| .{ .float = .{ .f80 = components[0] } }, + .cf128 => |components| .{ .float = .{ .f128 = components[0] } }, + }), + else => unreachable, + }; +} + +pub fn imaginaryPart(v: Value, comp: *Compilation) !Value { + if (v.opt_ref == .none) return v; + return switch (comp.interner.get(v.ref())) { + .int, .float => Value.zero, + .complex => |repr| Value.intern(comp, switch (repr) { + .cf16 => |components| .{ .float = .{ .f16 = components[1] } }, + .cf32 => |components| .{ .float = .{ .f32 = components[1] } }, + .cf64 => |components| .{ .float = .{ .f64 = components[1] } }, + .cf80 => |components| .{ .float = .{ .f80 = components[1] } }, + .cf128 => |components| .{ .float = .{ .f128 = components[1] } }, + }), else => unreachable, }; } @@ -298,11 +403,56 @@ pub fn isZero(v: Value, comp: *const Compilation) bool { inline .i64, .u64 => |data| return data == 0, .big_int => |data| return data.eqlZero(), }, + .complex => |repr| switch (repr) { + inline else => |data| return data[0] == 0.0 and data[1] == 0.0, + }, .bytes => return false, else => unreachable, } } +const IsInfKind = enum(i32) { + negative = -1, + finite = 0, + positive = 1, + unknown = std.math.maxInt(i32), +}; + +pub fn isInfSign(v: Value, comp: *const Compilation) IsInfKind { + if (v.opt_ref == .none) return .unknown; + return switch (comp.interner.get(v.ref())) { + .float => |repr| switch (repr) { + inline else => |data| if (std.math.isPositiveInf(data)) .positive else if (std.math.isNegativeInf(data)) .negative else .finite, + }, + else => .unknown, + }; +} +pub fn isInf(v: Value, comp: *const Compilation) bool { + if (v.opt_ref == .none) return false; + return switch (comp.interner.get(v.ref())) { + .float => |repr| switch (repr) { + inline else => |data| std.math.isInf(data), + }, + .complex => |repr| switch (repr) { + inline else => |components| std.math.isInf(components[0]) or std.math.isInf(components[1]), + }, + else => false, + }; +} + +pub fn isNan(v: Value, comp: *const Compilation) bool { + if (v.opt_ref == .none) return false; + return switch (comp.interner.get(v.ref())) { + .float => |repr| switch (repr) { + inline else => |data| std.math.isNan(data), + }, + .complex => |repr| switch (repr) { + inline else => |components| std.math.isNan(components[0]) or std.math.isNan(components[1]), + }, + else => false, + }; +} + /// Converts value to zero or one; /// `.none` value remains unchanged. pub fn boolCast(v: *Value, comp: *const Compilation) void { @@ -326,9 +476,45 @@ pub fn toInt(v: Value, comptime T: type, comp: *const Compilation) ?T { return big_int.to(T) catch null; } +const ComplexOp = enum { + add, + sub, +}; + +fn complexAddSub(lhs: Value, rhs: Value, comptime T: type, op: ComplexOp, comp: *Compilation) !Value { + const res_re = switch (op) { + .add => lhs.toFloat(T, comp) + rhs.toFloat(T, comp), + .sub => lhs.toFloat(T, comp) - rhs.toFloat(T, comp), + }; + const res_im = switch (op) { + .add => lhs.imag(T, comp) + rhs.imag(T, comp), + .sub => lhs.imag(T, comp) - rhs.imag(T, comp), + }; + + return switch (T) { + f16 => intern(comp, .{ .complex = .{ .cf16 = .{ res_re, res_im } } }), + f32 => intern(comp, .{ .complex = .{ .cf32 = .{ res_re, res_im } } }), + f64 => intern(comp, .{ .complex = .{ .cf64 = .{ res_re, res_im } } }), + f80 => intern(comp, .{ .complex = .{ .cf80 = .{ res_re, res_im } } }), + f128 => intern(comp, .{ .complex = .{ .cf128 = .{ res_re, res_im } } }), + else => unreachable, + }; +} + pub fn add(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { const bits: usize = @intCast(ty.bitSizeof(comp).?); if (ty.isFloat()) { + if (ty.isComplex()) { + res.* = switch (bits) { + 32 => try complexAddSub(lhs, rhs, f16, .add, comp), + 64 => try complexAddSub(lhs, rhs, f32, .add, comp), + 128 => try complexAddSub(lhs, rhs, f64, .add, comp), + 160 => try complexAddSub(lhs, rhs, f80, .add, comp), + 256 => try complexAddSub(lhs, rhs, f128, .add, comp), + else => unreachable, + }; + return false; + } const f: Interner.Key.Float = switch (bits) { 16 => .{ .f16 = lhs.toFloat(f16, comp) + rhs.toFloat(f16, comp) }, 32 => .{ .f32 = lhs.toFloat(f32, comp) + rhs.toFloat(f32, comp) }, @@ -350,7 +536,7 @@ pub fn add(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b std.math.big.int.calcTwosCompLimbCount(bits), ); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, ty.signedness(comp), bits); res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); @@ -361,6 +547,17 @@ pub fn add(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b pub fn sub(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { const bits: usize = @intCast(ty.bitSizeof(comp).?); if (ty.isFloat()) { + if (ty.isComplex()) { + res.* = switch (bits) { + 32 => try complexAddSub(lhs, rhs, f16, .sub, comp), + 64 => try complexAddSub(lhs, rhs, f32, .sub, comp), + 128 => try complexAddSub(lhs, rhs, f64, .sub, comp), + 160 => try complexAddSub(lhs, rhs, f80, .sub, comp), + 256 => try complexAddSub(lhs, rhs, f128, .sub, comp), + else => unreachable, + }; + return false; + } const f: Interner.Key.Float = switch (bits) { 16 => .{ .f16 = lhs.toFloat(f16, comp) - rhs.toFloat(f16, comp) }, 32 => .{ .f32 = lhs.toFloat(f32, comp) - rhs.toFloat(f32, comp) }, @@ -382,7 +579,7 @@ pub fn sub(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b std.math.big.int.calcTwosCompLimbCount(bits), ); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, ty.signedness(comp), bits); res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); @@ -393,6 +590,18 @@ pub fn sub(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b pub fn mul(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { const bits: usize = @intCast(ty.bitSizeof(comp).?); if (ty.isFloat()) { + if (ty.isComplex()) { + const cf: Interner.Key.Complex = switch (bits) { + 32 => .{ .cf16 = annex_g.complexFloatMul(f16, lhs.toFloat(f16, comp), lhs.imag(f16, comp), rhs.toFloat(f16, comp), rhs.imag(f16, comp)) }, + 64 => .{ .cf32 = annex_g.complexFloatMul(f32, lhs.toFloat(f32, comp), lhs.imag(f32, comp), rhs.toFloat(f32, comp), rhs.imag(f32, comp)) }, + 128 => .{ .cf64 = annex_g.complexFloatMul(f64, lhs.toFloat(f64, comp), lhs.imag(f64, comp), rhs.toFloat(f64, comp), rhs.imag(f64, comp)) }, + 160 => .{ .cf80 = annex_g.complexFloatMul(f80, lhs.toFloat(f80, comp), lhs.imag(f80, comp), rhs.toFloat(f80, comp), rhs.imag(f80, comp)) }, + 256 => .{ .cf128 = annex_g.complexFloatMul(f128, lhs.toFloat(f128, comp), lhs.imag(f128, comp), rhs.toFloat(f128, comp), rhs.imag(f128, comp)) }, + else => unreachable, + }; + res.* = try intern(comp, .{ .complex = cf }); + return false; + } const f: Interner.Key.Float = switch (bits) { 16 => .{ .f16 = lhs.toFloat(f16, comp) * rhs.toFloat(f16, comp) }, 32 => .{ .f32 = lhs.toFloat(f32, comp) * rhs.toFloat(f32, comp) }, @@ -438,6 +647,18 @@ pub fn mul(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b pub fn div(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { const bits: usize = @intCast(ty.bitSizeof(comp).?); if (ty.isFloat()) { + if (ty.isComplex()) { + const cf: Interner.Key.Complex = switch (bits) { + 32 => .{ .cf16 = annex_g.complexFloatDiv(f16, lhs.toFloat(f16, comp), lhs.imag(f16, comp), rhs.toFloat(f16, comp), rhs.imag(f16, comp)) }, + 64 => .{ .cf32 = annex_g.complexFloatDiv(f32, lhs.toFloat(f32, comp), lhs.imag(f32, comp), rhs.toFloat(f32, comp), rhs.imag(f32, comp)) }, + 128 => .{ .cf64 = annex_g.complexFloatDiv(f64, lhs.toFloat(f64, comp), lhs.imag(f64, comp), rhs.toFloat(f64, comp), rhs.imag(f64, comp)) }, + 160 => .{ .cf80 = annex_g.complexFloatDiv(f80, lhs.toFloat(f80, comp), lhs.imag(f80, comp), rhs.toFloat(f80, comp), rhs.imag(f80, comp)) }, + 256 => .{ .cf128 = annex_g.complexFloatDiv(f128, lhs.toFloat(f128, comp), lhs.imag(f128, comp), rhs.toFloat(f128, comp), rhs.imag(f128, comp)) }, + else => unreachable, + }; + res.* = try intern(comp, .{ .complex = cf }); + return false; + } const f: Interner.Key.Float = switch (bits) { 16 => .{ .f16 = lhs.toFloat(f16, comp) / rhs.toFloat(f16, comp) }, 32 => .{ .f32 = lhs.toFloat(f32, comp) / rhs.toFloat(f32, comp) }, @@ -491,11 +712,11 @@ pub fn rem(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value { const signedness = ty.signedness(comp); if (signedness == .signed) { - var spaces: [3]BigIntSpace = undefined; - const min_val = BigIntMutable.init(&spaces[0].limbs, ty.minInt(comp)).toConst(); - const negative = BigIntMutable.init(&spaces[1].limbs, -1).toConst(); - const big_one = BigIntMutable.init(&spaces[2].limbs, 1).toConst(); - if (lhs_bigint.eql(min_val) and rhs_bigint.eql(negative)) { + var spaces: [2]BigIntSpace = undefined; + const min_val = try Value.minInt(ty, comp); + const negative = BigIntMutable.init(&spaces[0].limbs, -1).toConst(); + const big_one = BigIntMutable.init(&spaces[1].limbs, 1).toConst(); + if (lhs.compare(.eq, min_val, comp) and rhs_bigint.eql(negative)) { return .{}; } else if (rhs_bigint.order(big_one).compare(.lt)) { // lhs - @divTrunc(lhs, rhs) * rhs @@ -542,7 +763,7 @@ pub fn bitOr(lhs: Value, rhs: Value, comp: *Compilation) !Value { @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), ); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitOr(lhs_bigint, rhs_bigint); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); @@ -554,12 +775,13 @@ pub fn bitXor(lhs: Value, rhs: Value, comp: *Compilation) !Value { const lhs_bigint = lhs.toBigInt(&lhs_space, comp); const rhs_bigint = rhs.toBigInt(&rhs_space, comp); + const extra = @intFromBool(lhs_bigint.positive != rhs_bigint.positive); const limbs = try comp.gpa.alloc( std.math.big.Limb, - @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + extra, ); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitXor(lhs_bigint, rhs_bigint); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); @@ -571,12 +793,18 @@ pub fn bitAnd(lhs: Value, rhs: Value, comp: *Compilation) !Value { const lhs_bigint = lhs.toBigInt(&lhs_space, comp); const rhs_bigint = rhs.toBigInt(&rhs_space, comp); - const limbs = try comp.gpa.alloc( - std.math.big.Limb, - @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), - ); + const limb_count = if (lhs_bigint.positive and rhs_bigint.positive) + @min(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + else if (lhs_bigint.positive) + lhs_bigint.limbs.len + else if (rhs_bigint.positive) + rhs_bigint.limbs.len + else + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1; + + const limbs = try comp.gpa.alloc(std.math.big.Limb, limb_count); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitAnd(lhs_bigint, rhs_bigint); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); @@ -592,7 +820,7 @@ pub fn bitNot(val: Value, ty: Type, comp: *Compilation) !Value { std.math.big.int.calcTwosCompLimbCount(bits), ); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitNotWrap(val_bigint, ty.signedness(comp), bits); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); @@ -606,9 +834,9 @@ pub fn shl(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b const bits: usize = @intCast(ty.bitSizeof(comp).?); if (shift > bits) { if (lhs_bigint.positive) { - res.* = try intern(comp, .{ .int = .{ .u64 = ty.maxInt(comp) } }); + res.* = try Value.maxInt(ty, comp); } else { - res.* = try intern(comp, .{ .int = .{ .i64 = ty.minInt(comp) } }); + res.* = try Value.minInt(ty, comp); } return true; } @@ -618,7 +846,7 @@ pub fn shl(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, ); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.shiftLeft(lhs_bigint, shift); const signedness = ty.signedness(comp); @@ -652,12 +880,25 @@ pub fn shr(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value { std.math.big.int.calcTwosCompLimbCount(bits), ); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.shiftRight(lhs_bigint, shift); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); } +pub fn complexConj(val: Value, ty: Type, comp: *Compilation) !Value { + const bits = ty.bitSizeof(comp).?; + const cf: Interner.Key.Complex = switch (bits) { + 32 => .{ .cf16 = .{ val.toFloat(f16, comp), -val.imag(f16, comp) } }, + 64 => .{ .cf32 = .{ val.toFloat(f32, comp), -val.imag(f32, comp) } }, + 128 => .{ .cf64 = .{ val.toFloat(f64, comp), -val.imag(f64, comp) } }, + 160 => .{ .cf80 = .{ val.toFloat(f80, comp), -val.imag(f80, comp) } }, + 256 => .{ .cf128 = .{ val.toFloat(f128, comp), -val.imag(f128, comp) } }, + else => unreachable, + }; + return intern(comp, .{ .complex = cf }); +} + pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *const Compilation) bool { if (op == .eq) { return lhs.opt_ref == rhs.opt_ref; @@ -672,6 +913,12 @@ pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *cons const rhs_f128 = rhs.toFloat(f128, comp); return std.math.compare(lhs_f128, op, rhs_f128); } + if (lhs_key == .complex or rhs_key == .complex) { + assert(op == .neq); + const real_equal = std.math.compare(lhs.toFloat(f128, comp), .eq, rhs.toFloat(f128, comp)); + const imag_equal = std.math.compare(lhs.imag(f128, comp), .eq, rhs.imag(f128, comp)); + return !real_equal or !imag_equal; + } var lhs_bigint_space: BigIntSpace = undefined; var rhs_bigint_space: BigIntSpace = undefined; @@ -680,6 +927,42 @@ pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *cons return lhs_bigint.order(rhs_bigint).compare(op); } +fn twosCompIntLimit(limit: std.math.big.int.TwosCompIntLimit, ty: Type, comp: *Compilation) !Value { + const signedness = ty.signedness(comp); + if (limit == .min and signedness == .unsigned) return Value.zero; + const mag_bits: usize = @intCast(ty.bitSizeof(comp).?); + switch (mag_bits) { + inline 8, 16, 32, 64 => |bits| { + if (limit == .min) return Value.int(@as(i64, std.math.minInt(std.meta.Int(.signed, bits))), comp); + return switch (signedness) { + inline else => |sign| Value.int(std.math.maxInt(std.meta.Int(sign, bits)), comp), + }; + }, + else => {}, + } + + const sign_bits = @intFromBool(signedness == .signed); + const total_bits = mag_bits + sign_bits; + + const limbs = try comp.gpa.alloc( + std.math.big.Limb, + std.math.big.int.calcTwosCompLimbCount(total_bits), + ); + defer comp.gpa.free(limbs); + + var result_bigint: BigIntMutable = .{ .limbs = limbs, .positive = undefined, .len = undefined }; + result_bigint.setTwosCompIntLimit(limit, signedness, mag_bits); + return Value.intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); +} + +pub fn minInt(ty: Type, comp: *Compilation) !Value { + return twosCompIntLimit(.min, ty, comp); +} + +pub fn maxInt(ty: Type, comp: *Compilation) !Value { + return twosCompIntLimit(.max, ty, comp); +} + pub fn print(v: Value, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w).Error!void { if (ty.is(.bool)) { return w.writeAll(if (v.isZero(comp)) "false" else "true"); @@ -696,6 +979,10 @@ pub fn print(v: Value, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w inline else => |x| return w.print("{d}", .{@as(f64, @floatCast(x))}), }, .bytes => |b| return printString(b, ty, comp, w), + .complex => |repr| switch (repr) { + .cf32 => |components| return w.print("{d} + {d}i", .{ @round(@as(f64, @floatCast(components[0])) * 1000000) / 1000000, @round(@as(f64, @floatCast(components[1])) * 1000000) / 1000000 }), + inline else => |components| return w.print("{d} + {d}i", .{ @as(f64, @floatCast(components[0])), @as(f64, @floatCast(components[1])) }), + }, else => unreachable, // not a value } } @@ -703,26 +990,44 @@ pub fn print(v: Value, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w pub fn printString(bytes: []const u8, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w).Error!void { const size: Compilation.CharUnitSize = @enumFromInt(ty.elemType().sizeof(comp).?); const without_null = bytes[0 .. bytes.len - @intFromEnum(size)]; + try w.writeByte('"'); switch (size) { - inline .@"1", .@"2" => |sz| { - const data_slice: []const sz.Type() = @alignCast(std.mem.bytesAsSlice(sz.Type(), without_null)); - const formatter = if (sz == .@"1") std.zig.fmtEscapes(data_slice) else std.unicode.fmtUtf16Le(data_slice); - try w.print("\"{}\"", .{formatter}); + .@"1" => try w.print("{}", .{std.zig.fmtEscapes(without_null)}), + .@"2" => { + var items: [2]u16 = undefined; + var i: usize = 0; + while (i < without_null.len) { + @memcpy(std.mem.sliceAsBytes(items[0..1]), without_null[i..][0..2]); + i += 2; + const is_surrogate = std.unicode.utf16IsHighSurrogate(items[0]); + if (is_surrogate and i < without_null.len) { + @memcpy(std.mem.sliceAsBytes(items[1..2]), without_null[i..][0..2]); + if (std.unicode.utf16DecodeSurrogatePair(&items)) |decoded| { + i += 2; + try w.print("{u}", .{decoded}); + } else |_| { + try w.print("\\x{x}", .{items[0]}); + } + } else if (is_surrogate) { + try w.print("\\x{x}", .{items[0]}); + } else { + try w.print("{u}", .{items[0]}); + } + } }, .@"4" => { - try w.writeByte('"'); - const data_slice = std.mem.bytesAsSlice(u32, without_null); - var buf: [4]u8 = undefined; - for (data_slice) |item| { - if (item <= std.math.maxInt(u21) and std.unicode.utf8ValidCodepoint(@intCast(item))) { - const codepoint: u21 = @intCast(item); - const written = std.unicode.utf8Encode(codepoint, &buf) catch unreachable; - try w.print("{s}", .{buf[0..written]}); + var item: [1]u32 = undefined; + const data_slice = std.mem.sliceAsBytes(item[0..1]); + for (0..@divExact(without_null.len, 4)) |n| { + @memcpy(data_slice, without_null[n * 4 ..][0..4]); + if (item[0] <= std.math.maxInt(u21) and std.unicode.utf8ValidCodepoint(@intCast(item[0]))) { + const codepoint: u21 = @intCast(item[0]); + try w.print("{u}", .{codepoint}); } else { - try w.print("\\x{x}", .{item}); + try w.print("\\x{x}", .{item[0]}); } } - try w.writeByte('"'); }, } + try w.writeByte('"'); } diff --git a/lib/compiler/aro/aro/annex_g.zig b/lib/compiler/aro/aro/annex_g.zig new file mode 100644 index 0000000000..56765ee353 --- /dev/null +++ b/lib/compiler/aro/aro/annex_g.zig @@ -0,0 +1,118 @@ +//! Complex arithmetic algorithms from C99 Annex G + +const std = @import("std"); +const copysign = std.math.copysign; +const ilogb = std.math.ilogb; +const inf = std.math.inf; +const isFinite = std.math.isFinite; +const isInf = std.math.isInf; +const isNan = std.math.isNan; +const isPositiveZero = std.math.isPositiveZero; +const scalbn = std.math.scalbn; + +/// computes floating point z*w where a_param, b_param are real, imaginary parts of z and c_param, d_param are real, imaginary parts of w +pub fn complexFloatMul(comptime T: type, a_param: T, b_param: T, c_param: T, d_param: T) [2]T { + var a = a_param; + var b = b_param; + var c = c_param; + var d = d_param; + + const ac = a * c; + const bd = b * d; + const ad = a * d; + const bc = b * c; + var x = ac - bd; + var y = ad + bc; + if (isNan(x) and isNan(y)) { + var recalc = false; + if (isInf(a) or isInf(b)) { + // lhs infinite + // Box the infinity and change NaNs in the other factor to 0 + a = copysign(if (isInf(a)) @as(T, 1.0) else @as(T, 0.0), a); + b = copysign(if (isInf(b)) @as(T, 1.0) else @as(T, 0.0), b); + if (isNan(c)) c = copysign(@as(T, 0.0), c); + if (isNan(d)) d = copysign(@as(T, 0.0), d); + recalc = true; + } + if (isInf(c) or isInf(d)) { + // rhs infinite + // Box the infinity and change NaNs in the other factor to 0 + c = copysign(if (isInf(c)) @as(T, 1.0) else @as(T, 0.0), c); + d = copysign(if (isInf(d)) @as(T, 1.0) else @as(T, 0.0), d); + if (isNan(a)) a = copysign(@as(T, 0.0), a); + if (isNan(b)) b = copysign(@as(T, 0.0), b); + recalc = true; + } + if (!recalc and (isInf(ac) or isInf(bd) or isInf(ad) or isInf(bc))) { + // Recover infinities from overflow by changing NaN's to 0 + if (isNan(a)) a = copysign(@as(T, 0.0), a); + if (isNan(b)) b = copysign(@as(T, 0.0), b); + if (isNan(c)) c = copysign(@as(T, 0.0), c); + if (isNan(d)) d = copysign(@as(T, 0.0), d); + } + if (recalc) { + x = inf(T) * (a * c - b * d); + y = inf(T) * (a * d + b * c); + } + } + return .{ x, y }; +} + +/// computes floating point z / w where a_param, b_param are real, imaginary parts of z and c_param, d_param are real, imaginary parts of w +pub fn complexFloatDiv(comptime T: type, a_param: T, b_param: T, c_param: T, d_param: T) [2]T { + var a = a_param; + var b = b_param; + var c = c_param; + var d = d_param; + var denom_logb: i32 = 0; + const max_cd = @max(@abs(c), @abs(d)); + if (isFinite(max_cd)) { + if (max_cd == 0) { + denom_logb = std.math.minInt(i32) + 1; + c = 0; + d = 0; + } else { + denom_logb = ilogb(max_cd); + c = scalbn(c, -denom_logb); + d = scalbn(d, -denom_logb); + } + } + const denom = c * c + d * d; + var x = scalbn((a * c + b * d) / denom, -denom_logb); + var y = scalbn((b * c - a * d) / denom, -denom_logb); + if (isNan(x) and isNan(y)) { + if (isPositiveZero(denom) and (!isNan(a) or !isNan(b))) { + x = copysign(inf(T), c) * a; + y = copysign(inf(T), c) * b; + } else if ((isInf(a) or isInf(b)) and isFinite(c) and isFinite(d)) { + a = copysign(if (isInf(a)) @as(T, 1.0) else @as(T, 0.0), a); + b = copysign(if (isInf(b)) @as(T, 1.0) else @as(T, 0.0), b); + x = inf(T) * (a * c + b * d); + y = inf(T) * (b * c - a * d); + } else if (isInf(max_cd) and isFinite(a) and isFinite(b)) { + c = copysign(if (isInf(c)) @as(T, 1.0) else @as(T, 0.0), c); + d = copysign(if (isInf(d)) @as(T, 1.0) else @as(T, 0.0), d); + x = 0.0 * (a * c + b * d); + y = 0.0 * (b * c - a * d); + } + } + return .{ x, y }; +} + +test complexFloatMul { + // Naive algorithm would produce NaN + NaNi instead of inf + NaNi + const result = complexFloatMul(f64, inf(f64), std.math.nan(f64), 2, 0); + try std.testing.expect(isInf(result[0])); + try std.testing.expect(isNan(result[1])); +} + +test complexFloatDiv { + // Naive algorithm would produce NaN + NaNi instead of inf + NaNi + var result = complexFloatDiv(f64, inf(f64), std.math.nan(f64), 2, 0); + try std.testing.expect(isInf(result[0])); + try std.testing.expect(isNan(result[1])); + + result = complexFloatDiv(f64, 2.0, 2.0, 0.0, 0.0); + try std.testing.expect(isInf(result[0])); + try std.testing.expect(isInf(result[1])); +} diff --git a/lib/compiler/aro/aro/features.zig b/lib/compiler/aro/aro/features.zig index d66ba7cabc..fdc49b722b 100644 --- a/lib/compiler/aro/aro/features.zig +++ b/lib/compiler/aro/aro/features.zig @@ -45,7 +45,7 @@ pub fn hasFeature(comp: *Compilation, ext: []const u8) bool { .c_static_assert = comp.langopts.standard.atLeast(.c11), .c_thread_local = comp.langopts.standard.atLeast(.c11) and target_util.isTlsSupported(comp.target), }; - inline for (std.meta.fields(@TypeOf(list))) |f| { + inline for (@typeInfo(@TypeOf(list)).@"struct".fields) |f| { if (std.mem.eql(u8, f.name, ext)) return @field(list, f.name); } return false; @@ -69,7 +69,7 @@ pub fn hasExtension(comp: *Compilation, ext: []const u8) bool { .matrix_types = false, // TODO .matrix_types_scalar_division = false, // TODO }; - inline for (std.meta.fields(@TypeOf(list))) |f| { + inline for (@typeInfo(@TypeOf(list)).@"struct".fields) |f| { if (std.mem.eql(u8, f.name, ext)) return @field(list, f.name); } return false; diff --git a/lib/compiler/aro/aro/record_layout.zig b/lib/compiler/aro/aro/record_layout.zig index 2009a29bc9..da0517d9fc 100644 --- a/lib/compiler/aro/aro/record_layout.zig +++ b/lib/compiler/aro/aro/record_layout.zig @@ -19,6 +19,13 @@ const OngoingBitfield = struct { unused_size_bits: u64, }; +pub const Error = error{Overflow}; + +fn alignForward(addr: u64, alignment: u64) !u64 { + const forward_addr = try std.math.add(u64, addr, alignment - 1); + return std.mem.alignBackward(u64, forward_addr, alignment); +} + const SysVContext = struct { /// Does the record have an __attribute__((packed)) annotation. attr_packed: bool, @@ -36,14 +43,8 @@ const SysVContext = struct { comp: *const Compilation, fn init(ty: Type, comp: *const Compilation, pragma_pack: ?u8) SysVContext { - var pack_value: ?u64 = null; - if (pragma_pack) |pak| { - pack_value = pak * BITS_PER_BYTE; - } - var req_align: u29 = BITS_PER_BYTE; - if (ty.requestedAlignment(comp)) |aln| { - req_align = aln * BITS_PER_BYTE; - } + const pack_value: ?u64 = if (pragma_pack) |pak| @as(u64, pak) * BITS_PER_BYTE else null; + const req_align = @as(u32, (ty.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE; return SysVContext{ .attr_packed = ty.hasAttribute(.@"packed"), .max_field_align_bits = pack_value, @@ -55,7 +56,7 @@ const SysVContext = struct { }; } - fn layoutFields(self: *SysVContext, rec: *const Record) void { + fn layoutFields(self: *SysVContext, rec: *const Record) !void { for (rec.fields, 0..) |*fld, fld_indx| { if (fld.ty.specifier == .invalid) continue; const type_layout = computeLayout(fld.ty, self.comp); @@ -65,12 +66,12 @@ const SysVContext = struct { field_attrs = attrs[fld_indx]; } if (self.comp.target.isMinGW()) { - fld.layout = self.layoutMinGWField(fld, field_attrs, type_layout); + fld.layout = try self.layoutMinGWField(fld, field_attrs, type_layout); } else { if (fld.isRegularField()) { - fld.layout = self.layoutRegularField(field_attrs, type_layout); + fld.layout = try self.layoutRegularField(field_attrs, type_layout); } else { - fld.layout = self.layoutBitField(field_attrs, type_layout, fld.isNamed(), fld.specifiedBitWidth()); + fld.layout = try self.layoutBitField(field_attrs, type_layout, fld.isNamed(), fld.specifiedBitWidth()); } } } @@ -99,8 +100,8 @@ const SysVContext = struct { field: *const Field, field_attrs: ?[]const Attribute, field_layout: TypeLayout, - ) FieldLayout { - const annotation_alignment_bits = BITS_PER_BYTE * (Type.annotationAlignment(self.comp, field_attrs) orelse 1); + ) !FieldLayout { + const annotation_alignment_bits = BITS_PER_BYTE * @as(u32, (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(field_attrs)) orelse 1)); const is_attr_packed = self.attr_packed or isPacked(field_attrs); const ignore_type_alignment = ignoreTypeAlignment(is_attr_packed, field.bit_width, self.ongoing_bitfield, field_layout); @@ -157,7 +158,7 @@ const SysVContext = struct { field_alignment_bits: u64, is_named: bool, width: u64, - ) FieldLayout { + ) !FieldLayout { std.debug.assert(width <= ty_size_bits); // validated in parser // In a union, the size of the underlying type does not affect the size of the union. @@ -194,8 +195,8 @@ const SysVContext = struct { .unused_size_bits = ty_size_bits - width, }; } - const offset_bits = std.mem.alignForward(u64, self.size_bits, field_alignment_bits); - self.size_bits = if (width == 0) offset_bits else offset_bits + ty_size_bits; + const offset_bits = try alignForward(self.size_bits, field_alignment_bits); + self.size_bits = if (width == 0) offset_bits else try std.math.add(u64, offset_bits, ty_size_bits); if (!is_named) return .{}; return .{ .offset_bits = offset_bits, @@ -207,16 +208,16 @@ const SysVContext = struct { self: *SysVContext, ty_size_bits: u64, field_alignment_bits: u64, - ) FieldLayout { + ) !FieldLayout { self.ongoing_bitfield = null; // A struct field starts at the next offset in the struct that is properly // aligned with respect to the start of the struct. See test case 0033. // A union field always starts at offset 0. - const offset_bits = if (self.is_union) 0 else std.mem.alignForward(u64, self.size_bits, field_alignment_bits); + const offset_bits = if (self.is_union) 0 else try alignForward(self.size_bits, field_alignment_bits); // Set the size of the record to the maximum of the current size and the end of // the field. See test case 0034. - self.size_bits = @max(self.size_bits, offset_bits + ty_size_bits); + self.size_bits = @max(self.size_bits, try std.math.add(u64, offset_bits, ty_size_bits)); return .{ .offset_bits = offset_bits, @@ -228,7 +229,7 @@ const SysVContext = struct { self: *SysVContext, fld_attrs: ?[]const Attribute, fld_layout: TypeLayout, - ) FieldLayout { + ) !FieldLayout { var fld_align_bits = fld_layout.field_alignment_bits; // If the struct or the field is packed, then the alignment of the underlying type is @@ -239,8 +240,8 @@ const SysVContext = struct { // The field alignment can be increased by __attribute__((aligned)) annotations on the // field. See test case 0085. - if (Type.annotationAlignment(self.comp, fld_attrs)) |anno| { - fld_align_bits = @max(fld_align_bits, anno * BITS_PER_BYTE); + if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| { + fld_align_bits = @max(fld_align_bits, @as(u32, anno) * BITS_PER_BYTE); } // #pragma pack takes precedence over all other attributes. See test cases 0084 and @@ -251,12 +252,12 @@ const SysVContext = struct { // A struct field starts at the next offset in the struct that is properly // aligned with respect to the start of the struct. - const offset_bits = if (self.is_union) 0 else std.mem.alignForward(u64, self.size_bits, fld_align_bits); + const offset_bits = if (self.is_union) 0 else try alignForward(self.size_bits, fld_align_bits); const size_bits = fld_layout.size_bits; // The alignment of a record is the maximum of its field alignments. See test cases // 0084, 0085, 0086. - self.size_bits = @max(self.size_bits, offset_bits + size_bits); + self.size_bits = @max(self.size_bits, try std.math.add(u64, offset_bits, size_bits)); self.aligned_bits = @max(self.aligned_bits, fld_align_bits); return .{ @@ -271,7 +272,7 @@ const SysVContext = struct { fld_layout: TypeLayout, is_named: bool, bit_width: u64, - ) FieldLayout { + ) !FieldLayout { const ty_size_bits = fld_layout.size_bits; var ty_fld_algn_bits: u32 = fld_layout.field_alignment_bits; @@ -301,7 +302,7 @@ const SysVContext = struct { const attr_packed = self.attr_packed or isPacked(fld_attrs); const has_packing_annotation = attr_packed or self.max_field_align_bits != null; - const annotation_alignment: u32 = if (Type.annotationAlignment(self.comp, fld_attrs)) |anno| anno * BITS_PER_BYTE else 1; + const annotation_alignment = if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| @as(u32, anno) * BITS_PER_BYTE else 1; const first_unused_bit: u64 = if (self.is_union) 0 else self.size_bits; var field_align_bits: u64 = 1; @@ -322,7 +323,7 @@ const SysVContext = struct { // - the alignment of the type is larger than its size, // then it is aligned to the type's field alignment. See test case 0083. if (!has_packing_annotation) { - const start_bit = std.mem.alignForward(u64, first_unused_bit, field_align_bits); + const start_bit = try alignForward(first_unused_bit, field_align_bits); const does_field_cross_boundary = start_bit % ty_fld_algn_bits + bit_width > ty_size_bits; @@ -349,8 +350,8 @@ const SysVContext = struct { } } - const offset_bits = std.mem.alignForward(u64, first_unused_bit, field_align_bits); - self.size_bits = @max(self.size_bits, offset_bits + bit_width); + const offset_bits = try alignForward(first_unused_bit, field_align_bits); + self.size_bits = @max(self.size_bits, try std.math.add(u64, offset_bits, bit_width)); // Unnamed fields do not contribute to the record alignment except on a few targets. // See test case 0079. @@ -419,10 +420,7 @@ const MsvcContext = struct { // The required alignment can be increased by adding a __declspec(align) // annotation. See test case 0023. - var must_align: u29 = BITS_PER_BYTE; - if (ty.requestedAlignment(comp)) |req_align| { - must_align = req_align * BITS_PER_BYTE; - } + const must_align = @as(u32, (ty.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE; return MsvcContext{ .req_align_bits = must_align, .pointer_align_bits = must_align, @@ -436,15 +434,15 @@ const MsvcContext = struct { }; } - fn layoutField(self: *MsvcContext, fld: *const Field, fld_attrs: ?[]const Attribute) FieldLayout { + fn layoutField(self: *MsvcContext, fld: *const Field, fld_attrs: ?[]const Attribute) !FieldLayout { const type_layout = computeLayout(fld.ty, self.comp); // The required alignment of the field is the maximum of the required alignment of the // underlying type and the __declspec(align) annotation on the field itself. // See test case 0028. var req_align = type_layout.required_alignment_bits; - if (Type.annotationAlignment(self.comp, fld_attrs)) |anno| { - req_align = @max(anno * BITS_PER_BYTE, req_align); + if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| { + req_align = @max(@as(u32, anno) * BITS_PER_BYTE, req_align); } // The required alignment of a record is the maximum of the required alignments of its @@ -480,7 +478,7 @@ const MsvcContext = struct { } } - fn layoutBitField(self: *MsvcContext, ty_size_bits: u64, field_align: u32, bit_width: u32) FieldLayout { + fn layoutBitField(self: *MsvcContext, ty_size_bits: u64, field_align: u32, bit_width: u32) !FieldLayout { if (bit_width == 0) { // A zero-sized bit-field that does not follow a non-zero-sized bit-field does not affect // the overall layout of the record. Even in a union where the order would otherwise @@ -522,7 +520,7 @@ const MsvcContext = struct { self.pointer_align_bits = @max(self.pointer_align_bits, p_align); self.field_align_bits = @max(self.field_align_bits, field_align); - const offset_bits = std.mem.alignForward(u64, self.size_bits, field_align); + const offset_bits = try alignForward(self.size_bits, field_align); self.size_bits = if (bit_width == 0) offset_bits else offset_bits + ty_size_bits; break :bits offset_bits; @@ -534,7 +532,7 @@ const MsvcContext = struct { return .{ .offset_bits = offset_bits, .size_bits = bit_width }; } - fn layoutRegularField(self: *MsvcContext, size_bits: u64, field_align: u32) FieldLayout { + fn layoutRegularField(self: *MsvcContext, size_bits: u64, field_align: u32) !FieldLayout { self.contains_non_bitfield = true; self.ongoing_bitfield = null; // The alignment of the field affects both the pointer alignment and the field @@ -543,7 +541,7 @@ const MsvcContext = struct { self.field_align_bits = @max(self.field_align_bits, field_align); const offset_bits = switch (self.is_union) { true => 0, - false => std.mem.alignForward(u64, self.size_bits, field_align), + false => try alignForward(self.size_bits, field_align), }; self.size_bits = @max(self.size_bits, offset_bits + size_bits); return .{ .offset_bits = offset_bits, .size_bits = size_bits }; @@ -569,14 +567,14 @@ const MsvcContext = struct { } }; -pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pack: ?u8) void { +pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pack: ?u8) Error!void { switch (comp.langopts.emulate) { .gcc, .clang => { var context = SysVContext.init(ty, comp, pragma_pack); - context.layoutFields(rec); + try context.layoutFields(rec); - context.size_bits = std.mem.alignForward(u64, context.size_bits, context.aligned_bits); + context.size_bits = try alignForward(context.size_bits, context.aligned_bits); rec.type_layout = .{ .size_bits = context.size_bits, @@ -594,7 +592,7 @@ pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pac field_attrs = attrs[fld_indx]; } - fld.layout = context.layoutField(fld, field_attrs); + fld.layout = try context.layoutField(fld, field_attrs); } if (context.size_bits == 0) { // As an extension, MSVC allows records that only contain zero-sized bitfields and empty @@ -602,7 +600,7 @@ pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pac // ensure that there are no zero-sized records. context.handleZeroSizedRecord(); } - context.size_bits = std.mem.alignForward(u64, context.size_bits, context.pointer_align_bits); + context.size_bits = try alignForward(context.size_bits, context.pointer_align_bits); rec.type_layout = .{ .size_bits = context.size_bits, .field_alignment_bits = context.field_align_bits, diff --git a/lib/compiler/aro/aro/target.zig b/lib/compiler/aro/aro/target.zig index 407ef4dfd0..7b2e1576bf 100644 --- a/lib/compiler/aro/aro/target.zig +++ b/lib/compiler/aro/aro/target.zig @@ -35,10 +35,7 @@ pub fn intMaxType(target: std.Target) Type { /// intptr_t for this target pub fn intPtrType(target: std.Target) Type { - switch (target.os.tag) { - .haiku => return .{ .specifier = .long }, - else => {}, - } + if (target.os.tag == .haiku) return .{ .specifier = .long }; switch (target.cpu.arch) { .aarch64, .aarch64_be => switch (target.os.tag) { @@ -127,6 +124,14 @@ pub fn int64Type(target: std.Target) Type { return .{ .specifier = .long_long }; } +pub fn float80Type(target: std.Target) ?Type { + switch (target.cpu.arch) { + .x86, .x86_64 => return .{ .specifier = .long_double }, + else => {}, + } + return null; +} + /// This function returns 1 if function alignment is not observable or settable. pub fn defaultFunctionAlignment(target: std.Target) u8 { return switch (target.cpu.arch) { @@ -474,6 +479,7 @@ pub fn get32BitArchVariant(target: std.Target) ?std.Target { .kalimba, .lanai, .wasm32, + .spirv, .spirv32, .loongarch32, .dxil, @@ -544,6 +550,7 @@ pub fn get64BitArchVariant(target: std.Target) ?std.Target { .powerpcle => copy.cpu.arch = .powerpc64le, .riscv32 => copy.cpu.arch = .riscv64, .sparc => copy.cpu.arch = .sparc64, + .spirv => copy.cpu.arch = .spirv64, .spirv32 => copy.cpu.arch = .spirv64, .thumb => copy.cpu.arch = .aarch64, .thumbeb => copy.cpu.arch = .aarch64_be, @@ -599,6 +606,7 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 { .xtensa => "xtensa", .nvptx => "nvptx", .nvptx64 => "nvptx64", + .spirv => "spirv", .spirv32 => "spirv32", .spirv64 => "spirv64", .kalimba => "kalimba", @@ -646,9 +654,10 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 { .ios => "ios", .tvos => "tvos", .watchos => "watchos", - .visionos => "xros", .driverkit => "driverkit", .shadermodel => "shadermodel", + .visionos => "xros", + .serenity => "serenity", .opencl, .opengl, .vulkan, @@ -707,6 +716,7 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 { .callable => "callable", .mesh => "mesh", .amplification => "amplification", + .ohos => "openhos", }; writer.writeAll(llvm_abi) catch unreachable; return stream.getWritten(); diff --git a/lib/compiler/aro/aro/text_literal.zig b/lib/compiler/aro/aro/text_literal.zig index 1c5d592982..d9f6b2a88b 100644 --- a/lib/compiler/aro/aro/text_literal.zig +++ b/lib/compiler/aro/aro/text_literal.zig @@ -71,7 +71,7 @@ pub const Kind = enum { pub fn maxCodepoint(kind: Kind, comp: *const Compilation) u21 { return @intCast(switch (kind) { .char => std.math.maxInt(u7), - .wide => @min(0x10FFFF, comp.types.wchar.maxInt(comp)), + .wide => @min(0x10FFFF, comp.wcharMax()), .utf_8 => std.math.maxInt(u7), .utf_16 => std.math.maxInt(u16), .utf_32 => 0x10FFFF, @@ -83,7 +83,7 @@ pub const Kind = enum { pub fn maxInt(kind: Kind, comp: *const Compilation) u32 { return @intCast(switch (kind) { .char, .utf_8 => std.math.maxInt(u8), - .wide => comp.types.wchar.maxInt(comp), + .wide => comp.wcharMax(), .utf_16 => std.math.maxInt(u16), .utf_32 => std.math.maxInt(u32), .unterminated => unreachable, diff --git a/lib/compiler/aro/aro/toolchains/Linux.zig b/lib/compiler/aro/aro/toolchains/Linux.zig index 36ab916b10..a7d8c71bef 100644 --- a/lib/compiler/aro/aro/toolchains/Linux.zig +++ b/lib/compiler/aro/aro/toolchains/Linux.zig @@ -423,7 +423,7 @@ test Linux { defer arena_instance.deinit(); const arena = arena_instance.allocator(); - var comp = Compilation.init(std.testing.allocator); + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); comp.environment = .{ .path = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", diff --git a/lib/compiler/aro/backend/Interner.zig b/lib/compiler/aro/backend/Interner.zig index 45b6e51fd9..631ec8ee16 100644 --- a/lib/compiler/aro/backend/Interner.zig +++ b/lib/compiler/aro/backend/Interner.zig @@ -34,6 +34,7 @@ const KeyAdapter = struct { pub const Key = union(enum) { int_ty: u16, float_ty: u16, + complex_ty: u16, ptr_ty, noreturn_ty, void_ty, @@ -62,6 +63,7 @@ pub const Key = union(enum) { } }, float: Float, + complex: Complex, bytes: []const u8, pub const Float = union(enum) { @@ -71,6 +73,13 @@ pub const Key = union(enum) { f80: f80, f128: f128, }; + pub const Complex = union(enum) { + cf16: [2]f16, + cf32: [2]f32, + cf64: [2]f64, + cf80: [2]f80, + cf128: [2]f128, + }; pub fn hash(key: Key) u32 { var hasher = Hash.init(0); @@ -89,6 +98,12 @@ pub const Key = union(enum) { @as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(data))), @bitCast(data)), ), }, + .complex => |repr| switch (repr) { + inline else => |data| std.hash.autoHash( + &hasher, + @as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(data))), @bitCast(data)), + ), + }, .int => |repr| { var space: Tag.Int.BigIntSpace = undefined; const big = repr.toBigInt(&space); @@ -154,6 +169,14 @@ pub const Key = union(enum) { 128 => return .f128, else => unreachable, }, + .complex_ty => |bits| switch (bits) { + 16 => return .cf16, + 32 => return .cf32, + 64 => return .cf64, + 80 => return .cf80, + 128 => return .cf128, + else => unreachable, + }, .ptr_ty => return .ptr, .func_ty => return .func, .noreturn_ty => return .noreturn, @@ -199,6 +222,11 @@ pub const Ref = enum(u32) { zero = max - 16, one = max - 17, null = max - 18, + cf16 = max - 19, + cf32 = max - 20, + cf64 = max - 21, + cf80 = max - 22, + cf128 = max - 23, _, }; @@ -224,6 +252,11 @@ pub const OptRef = enum(u32) { zero = max - 16, one = max - 17, null = max - 18, + cf16 = max - 19, + cf32 = max - 20, + cf64 = max - 21, + cf80 = max - 22, + cf128 = max - 23, _, }; @@ -232,6 +265,8 @@ pub const Tag = enum(u8) { int_ty, /// `data` is `u16` float_ty, + /// `data` is `u16` + complex_ty, /// `data` is index to `Array` array_ty, /// `data` is index to `Vector` @@ -254,6 +289,16 @@ pub const Tag = enum(u8) { f80, /// `data` is `F128` f128, + /// `data` is `CF16` + cf16, + /// `data` is `CF32` + cf32, + /// `data` is `CF64` + cf64, + /// `data` is `CF80` + cf80, + /// `data` is `CF128` + cf128, /// `data` is `Bytes` bytes, /// `data` is `Record` @@ -354,6 +399,134 @@ pub const Tag = enum(u8) { } }; + pub const CF16 = struct { + piece0: u32, + + pub fn get(self: CF16) [2]f16 { + const real: f16 = @bitCast(@as(u16, @truncate(self.piece0 >> 16))); + const imag: f16 = @bitCast(@as(u16, @truncate(self.piece0))); + return .{ + real, + imag, + }; + } + + fn pack(val: [2]f16) CF16 { + const real: u16 = @bitCast(val[0]); + const imag: u16 = @bitCast(val[1]); + return .{ + .piece0 = (@as(u32, real) << 16) | @as(u32, imag), + }; + } + }; + + pub const CF32 = struct { + piece0: u32, + piece1: u32, + + pub fn get(self: CF32) [2]f32 { + return .{ + @bitCast(self.piece0), + @bitCast(self.piece1), + }; + } + + fn pack(val: [2]f32) CF32 { + return .{ + .piece0 = @bitCast(val[0]), + .piece1 = @bitCast(val[1]), + }; + } + }; + + pub const CF64 = struct { + piece0: u32, + piece1: u32, + piece2: u32, + piece3: u32, + + pub fn get(self: CF64) [2]f64 { + return .{ + (F64{ .piece0 = self.piece0, .piece1 = self.piece1 }).get(), + (F64{ .piece0 = self.piece2, .piece1 = self.piece3 }).get(), + }; + } + + fn pack(val: [2]f64) CF64 { + const real = F64.pack(val[0]); + const imag = F64.pack(val[1]); + return .{ + .piece0 = real.piece0, + .piece1 = real.piece1, + .piece2 = imag.piece0, + .piece3 = imag.piece1, + }; + } + }; + + /// TODO pack into 5 pieces + pub const CF80 = struct { + piece0: u32, + piece1: u32, + piece2: u32, // u16 part, top bits + piece3: u32, + piece4: u32, + piece5: u32, // u16 part, top bits + + pub fn get(self: CF80) [2]f80 { + return .{ + (F80{ .piece0 = self.piece0, .piece1 = self.piece1, .piece2 = self.piece2 }).get(), + (F80{ .piece0 = self.piece3, .piece1 = self.piece4, .piece2 = self.piece5 }).get(), + }; + } + + fn pack(val: [2]f80) CF80 { + const real = F80.pack(val[0]); + const imag = F80.pack(val[1]); + return .{ + .piece0 = real.piece0, + .piece1 = real.piece1, + .piece2 = real.piece2, + .piece3 = imag.piece0, + .piece4 = imag.piece1, + .piece5 = imag.piece2, + }; + } + }; + + pub const CF128 = struct { + piece0: u32, + piece1: u32, + piece2: u32, + piece3: u32, + piece4: u32, + piece5: u32, + piece6: u32, + piece7: u32, + + pub fn get(self: CF128) [2]f128 { + return .{ + (F128{ .piece0 = self.piece0, .piece1 = self.piece1, .piece2 = self.piece2, .piece3 = self.piece3 }).get(), + (F128{ .piece0 = self.piece4, .piece1 = self.piece5, .piece2 = self.piece6, .piece3 = self.piece7 }).get(), + }; + } + + fn pack(val: [2]f128) CF128 { + const real = F128.pack(val[0]); + const imag = F128.pack(val[1]); + return .{ + .piece0 = real.piece0, + .piece1 = real.piece1, + .piece2 = real.piece2, + .piece3 = real.piece3, + .piece4 = imag.piece0, + .piece5 = imag.piece1, + .piece6 = imag.piece2, + .piece7 = imag.piece3, + }; + } + }; + pub const Bytes = struct { strings_index: u32, len: u32, @@ -407,6 +580,12 @@ pub fn put(i: *Interner, gpa: Allocator, key: Key) !Ref { .data = bits, }); }, + .complex_ty => |bits| { + i.items.appendAssumeCapacity(.{ + .tag = .complex_ty, + .data = bits, + }); + }, .array_ty => |info| { const split_len = PackedU64.init(info.len); i.items.appendAssumeCapacity(.{ @@ -493,6 +672,28 @@ pub fn put(i: *Interner, gpa: Allocator, key: Key) !Ref { .data = try i.addExtra(gpa, Tag.F128.pack(data)), }), }, + .complex => |repr| switch (repr) { + .cf16 => |data| i.items.appendAssumeCapacity(.{ + .tag = .cf16, + .data = try i.addExtra(gpa, Tag.CF16.pack(data)), + }), + .cf32 => |data| i.items.appendAssumeCapacity(.{ + .tag = .cf32, + .data = try i.addExtra(gpa, Tag.CF32.pack(data)), + }), + .cf64 => |data| i.items.appendAssumeCapacity(.{ + .tag = .cf64, + .data = try i.addExtra(gpa, Tag.CF64.pack(data)), + }), + .cf80 => |data| i.items.appendAssumeCapacity(.{ + .tag = .cf80, + .data = try i.addExtra(gpa, Tag.CF80.pack(data)), + }), + .cf128 => |data| i.items.appendAssumeCapacity(.{ + .tag = .cf128, + .data = try i.addExtra(gpa, Tag.CF128.pack(data)), + }), + }, .bytes => |bytes| { const strings_index: u32 = @intCast(i.strings.items.len); try i.strings.appendSlice(gpa, bytes); @@ -564,6 +765,10 @@ pub fn get(i: *const Interner, ref: Ref) Key { .zero => return .{ .int = .{ .u64 = 0 } }, .one => return .{ .int = .{ .u64 = 1 } }, .null => return .null, + .cf16 => return .{ .complex_ty = 16 }, + .cf32 => return .{ .complex_ty = 32 }, + .cf64 => return .{ .complex_ty = 64 }, + .cf80 => return .{ .complex_ty = 80 }, else => {}, } @@ -572,6 +777,7 @@ pub fn get(i: *const Interner, ref: Ref) Key { return switch (item.tag) { .int_ty => .{ .int_ty = @intCast(data) }, .float_ty => .{ .float_ty = @intCast(data) }, + .complex_ty => .{ .complex_ty = @intCast(data) }, .array_ty => { const array_ty = i.extraData(Tag.Array, data); return .{ .array_ty = .{ @@ -612,6 +818,26 @@ pub fn get(i: *const Interner, ref: Ref) Key { const float = i.extraData(Tag.F128, data); return .{ .float = .{ .f128 = float.get() } }; }, + .cf16 => { + const components = i.extraData(Tag.CF16, data); + return .{ .complex = .{ .cf16 = components.get() } }; + }, + .cf32 => { + const components = i.extraData(Tag.CF32, data); + return .{ .complex = .{ .cf32 = components.get() } }; + }, + .cf64 => { + const components = i.extraData(Tag.CF64, data); + return .{ .complex = .{ .cf64 = components.get() } }; + }, + .cf80 => { + const components = i.extraData(Tag.CF80, data); + return .{ .complex = .{ .cf80 = components.get() } }; + }, + .cf128 => { + const components = i.extraData(Tag.CF128, data); + return .{ .complex = .{ .cf128 = components.get() } }; + }, .bytes => { const bytes = i.extraData(Tag.Bytes, data); return .{ .bytes = i.strings.items[bytes.strings_index..][0..bytes.len] }; diff --git a/lib/compiler/aro/backend/Ir.zig b/lib/compiler/aro/backend/Ir.zig index 15c153e8f1..e694a23c9a 100644 --- a/lib/compiler/aro/backend/Ir.zig +++ b/lib/compiler/aro/backend/Ir.zig @@ -37,6 +37,7 @@ pub const Builder = struct { for (b.decls.values()) |*decl| { decl.deinit(b.gpa); } + b.decls.deinit(b.gpa); b.arena.deinit(); b.instructions.deinit(b.gpa); b.body.deinit(b.gpa); diff --git a/lib/compiler/aro/backend/Object.zig b/lib/compiler/aro/backend/Object.zig index b42ad4bdcb..98355e88b6 100644 --- a/lib/compiler/aro/backend/Object.zig +++ b/lib/compiler/aro/backend/Object.zig @@ -16,7 +16,7 @@ pub fn create(gpa: Allocator, target: std.Target) !*Object { pub fn deinit(obj: *Object) void { switch (obj.format) { - .elf => @as(*Elf, @fieldParentPtr("obj", obj)).deinit(), + .elf => @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).deinit(), else => unreachable, } } @@ -32,7 +32,7 @@ pub const Section = union(enum) { pub fn getSection(obj: *Object, section: Section) !*std.ArrayList(u8) { switch (obj.format) { - .elf => return @as(*Elf, @fieldParentPtr("obj", obj)).getSection(section), + .elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).getSection(section), else => unreachable, } } @@ -53,21 +53,21 @@ pub fn declareSymbol( size: u64, ) ![]const u8 { switch (obj.format) { - .elf => return @as(*Elf, @fieldParentPtr("obj", obj)).declareSymbol(section, name, linkage, @"type", offset, size), + .elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).declareSymbol(section, name, linkage, @"type", offset, size), else => unreachable, } } pub fn addRelocation(obj: *Object, name: []const u8, section: Section, address: u64, addend: i64) !void { switch (obj.format) { - .elf => return @as(*Elf, @fieldParentPtr("obj", obj)).addRelocation(name, section, address, addend), + .elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).addRelocation(name, section, address, addend), else => unreachable, } } pub fn finish(obj: *Object, file: std.fs.File) !void { switch (obj.format) { - .elf => return @as(*Elf, @fieldParentPtr("obj", obj)).finish(file), + .elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).finish(file), else => unreachable, } } diff --git a/lib/compiler/aro_translate_c.zig b/lib/compiler/aro_translate_c.zig index 692786c2be..4255989416 100644 --- a/lib/compiler/aro_translate_c.zig +++ b/lib/compiler/aro_translate_c.zig @@ -731,7 +731,6 @@ fn transType(c: *Context, scope: *Scope, raw_ty: Type, qual_handling: Type.QualH .float => return ZigTag.type.create(c.arena, "f32"), .double => return ZigTag.type.create(c.arena, "f64"), .long_double => return ZigTag.type.create(c.arena, "c_longdouble"), - .float80 => return ZigTag.type.create(c.arena, "f80"), .float128 => return ZigTag.type.create(c.arena, "f128"), .@"enum" => { const enum_decl = ty.data.@"enum"; @@ -1799,7 +1798,7 @@ pub fn main() !void { const args = try std.process.argsAlloc(arena); - var aro_comp = aro.Compilation.init(gpa); + var aro_comp = aro.Compilation.init(gpa, std.fs.cwd()); defer aro_comp.deinit(); var tree = translate(gpa, &aro_comp, args) catch |err| switch (err) { diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig index c09801096c..4159ad03e3 100644 --- a/lib/compiler/resinator/main.zig +++ b/lib/compiler/resinator/main.zig @@ -126,7 +126,7 @@ pub fn main() !void { defer aro_arena_state.deinit(); const aro_arena = aro_arena_state.allocator(); - var comp = aro.Compilation.init(aro_arena); + var comp = aro.Compilation.init(aro_arena, std.fs.cwd()); defer comp.deinit(); var argv = std.ArrayList([]const u8).init(comp.gpa); diff --git a/lib/compiler/resinator/preprocess.zig b/lib/compiler/resinator/preprocess.zig index 3d6912a7c0..c5b4b1edea 100644 --- a/lib/compiler/resinator/preprocess.zig +++ b/lib/compiler/resinator/preprocess.zig @@ -59,7 +59,7 @@ pub fn preprocess( if (hasAnyErrors(comp)) return error.PreprocessError; - try pp.prettyPrintTokens(writer); + try pp.prettyPrintTokens(writer, .result_only); if (maybe_dependencies_list) |dependencies_list| { for (comp.sources.values()) |comp_source| { |
