From 82236a502959d39b97c783dc409a9a5b720e6e8b Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 20 Dec 2020 14:50:48 +0100 Subject: stage2 ARM: implement basic binary bitwise operations --- src/codegen.zig | 39 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 3b0a383a71..01deb13c0e 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -764,6 +764,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .arg => return self.genArg(inst.castTag(.arg).?), .assembly => return self.genAsm(inst.castTag(.assembly).?), .bitcast => return self.genBitCast(inst.castTag(.bitcast).?), + .bitand => return self.genBitAnd(inst.castTag(.bitand).?), + .bitor => return self.genBitOr(inst.castTag(.bitor).?), .block => return self.genBlock(inst.castTag(.block).?), .br => return self.genBr(inst.castTag(.br).?), .breakpoint => return self.genBreakpoint(inst.src), @@ -799,6 +801,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .unwrap_optional => return self.genUnwrapOptional(inst.castTag(.unwrap_optional).?), .wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), .varptr => return self.genVarPtr(inst.castTag(.varptr).?), + .xor => return self.genXor(inst.castTag(.xor).?), } } @@ -1009,6 +1012,36 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } + fn genBitAnd(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + // No side effects, so if it's unreferenced, do nothing. + if (inst.base.isUnused()) + return MCValue.dead; + switch (arch) { + .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bitand), + else => return self.fail(inst.base.src, "TODO implement bitwise and for {}", .{self.target.cpu.arch}), + } + } + + fn genBitOr(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + // No side effects, so if it's unreferenced, do nothing. + if (inst.base.isUnused()) + return MCValue.dead; + switch (arch) { + .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bitor), + else => return self.fail(inst.base.src, "TODO implement bitwise or for {}", .{self.target.cpu.arch}), + } + } + + fn genXor(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + // No side effects, so if it's unreferenced, do nothing. + if (inst.base.isUnused()) + return MCValue.dead; + switch (arch) { + .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .xor), + else => return self.fail(inst.base.src, "TODO implement xor for {}", .{self.target.cpu.arch}), + } + } + fn genUnwrapOptional(self: *Self, inst: *ir.Inst.UnOp) !MCValue { // No side effects, so if it's unreferenced, do nothing. if (inst.base.isUnused()) @@ -1251,13 +1284,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.rsb(.al, dst_reg, dst_reg, operand).toU32()); } }, - .booland => { + .booland, .bitand => { writeInt(u32, try self.code.addManyAsArray(4), Instruction.@"and"(.al, dst_reg, dst_reg, operand).toU32()); }, - .boolor => { + .boolor, .bitor => { writeInt(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, dst_reg, dst_reg, operand).toU32()); }, - .not => { + .not, .xor => { writeInt(u32, try self.code.addManyAsArray(4), Instruction.eor(.al, dst_reg, dst_reg, operand).toU32()); }, else => unreachable, // not a binary instruction -- cgit v1.2.3 From 133da8692e80532797dd91b32539cf2175280a95 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 30 Dec 2020 19:57:11 -0700 Subject: stage2: rework Type Payload layout Add `Type.castTag` and note that it is preferable to call than `Type.cast`. This matches other abstractions in the codebase. Added a convenience function `Type.Tag.create` which really cleans up the callsites of creating `Type` objects. `Type` payloads can now share types. This is in preparation for another improvement that I want to do. --- src/Compilation.zig | 8 +- src/Module.zig | 127 +++++------ src/astgen.zig | 2 +- src/codegen.zig | 2 +- src/type.zig | 613 +++++++++++++++++++++++++++++----------------------- src/value.zig | 35 ++- src/zir.zig | 2 +- src/zir_sema.zig | 30 +-- 8 files changed, 424 insertions(+), 395 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index 8a0a6ee58d..11c8303fac 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -825,9 +825,11 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation { const root_scope = rs: { if (mem.endsWith(u8, root_pkg.root_src_path, ".zig")) { - const struct_payload = try gpa.create(Type.Payload.EmptyStruct); const root_scope = try gpa.create(Module.Scope.File); - struct_payload.* = .{ .scope = &root_scope.root_container }; + const struct_ty = try Type.Tag.empty_struct.create( + gpa, + &root_scope.root_container, + ); root_scope.* = .{ // TODO this is duped so it can be freed in Container.deinit .sub_file_path = try gpa.dupe(u8, root_pkg.root_src_path), @@ -838,7 +840,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation { .root_container = .{ .file_scope = root_scope, .decls = .{}, - .ty = Type.initPayload(&struct_payload.base), + .ty = struct_ty, }, }; break :rs &root_scope.base; diff --git a/src/Module.zig b/src/Module.zig index 089bc51893..3e937fe49b 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -562,7 +562,7 @@ pub const Scope = struct { pub fn deinit(self: *Container, gpa: *Allocator) void { self.decls.deinit(gpa); // TODO either Container of File should have an arena for sub_file_path and ty - gpa.destroy(self.ty.cast(Type.Payload.EmptyStruct).?); + gpa.destroy(self.ty.castTag(.empty_struct).?); gpa.free(self.file_scope.sub_file_path); self.* = undefined; } @@ -2528,12 +2528,11 @@ pub fn analyzeImport(self: *Module, scope: *Scope, src: usize, target_string: [] } // TODO Scope.Container arena for ty and sub_file_path - const struct_payload = try self.gpa.create(Type.Payload.EmptyStruct); - errdefer self.gpa.destroy(struct_payload); const file_scope = try self.gpa.create(Scope.File); errdefer self.gpa.destroy(file_scope); + const struct_ty = try Type.Tag.empty_struct.create(self.gpa, &file_scope.root_container); + errdefer self.gpa.destroy(struct_ty.castTag(.empty_struct).?); - struct_payload.* = .{ .scope = &file_scope.root_container }; file_scope.* = .{ .sub_file_path = resolved_path, .source = .{ .unloaded = {} }, @@ -2543,7 +2542,7 @@ pub fn analyzeImport(self: *Module, scope: *Scope, src: usize, target_string: [] .root_container = .{ .file_scope = file_scope, .decls = .{}, - .ty = Type.initPayload(&struct_payload.base), + .ty = struct_ty, }, }; self.analyzeContainer(&file_scope.root_container) catch |err| switch (err) { @@ -2564,7 +2563,7 @@ pub fn cmpNumeric( lhs: *Inst, rhs: *Inst, op: std.math.CompareOperator, -) !*Inst { +) InnerError!*Inst { assert(lhs.ty.isNumeric()); assert(rhs.ty.isNumeric()); @@ -2738,15 +2737,14 @@ fn wrapOptional(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*In } fn makeIntType(self: *Module, scope: *Scope, signed: bool, bits: u16) !Type { - if (signed) { - const int_payload = try scope.arena().create(Type.Payload.IntSigned); - int_payload.* = .{ .bits = bits }; - return Type.initPayload(&int_payload.base); - } else { - const int_payload = try scope.arena().create(Type.Payload.IntUnsigned); - int_payload.* = .{ .bits = bits }; - return Type.initPayload(&int_payload.base); - } + const int_payload = try scope.arena().create(Type.Payload.Bits); + int_payload.* = .{ + .base = .{ + .tag = if (signed) .int_signed else .int_unsigned, + }, + .data = bits, + }; + return Type.initPayload(&int_payload.base); } pub fn resolvePeerTypes(self: *Module, scope: *Scope, instructions: []*Inst) !Type { @@ -2829,7 +2827,7 @@ pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst // T to ?T if (dest_type.zigTypeTag() == .Optional) { - var buf: Type.Payload.PointerSimple = undefined; + var buf: Type.Payload.ElemType = undefined; const child_type = dest_type.optionalChild(&buf); if (child_type.eql(inst.ty)) { return self.wrapOptional(scope, dest_type, inst); @@ -3225,7 +3223,7 @@ pub fn simplePtrType(self: *Module, scope: *Scope, src: usize, elem_ty: Type, mu // TODO stage1 type inference bug const T = Type.Tag; - const type_payload = try scope.arena().create(Type.Payload.PointerSimple); + const type_payload = try scope.arena().create(Type.Payload.ElemType); type_payload.* = .{ .base = .{ .tag = switch (size) { @@ -3235,7 +3233,7 @@ pub fn simplePtrType(self: *Module, scope: *Scope, src: usize, elem_ty: Type, mu .Slice => if (mutable) T.mut_slice else T.const_slice, }, }, - .pointee_type = elem_ty, + .data = elem_ty, }; return Type.initPayload(&type_payload.base); } @@ -3257,8 +3255,7 @@ pub fn ptrType( assert(host_size == 0 or bit_offset < host_size * 8); // TODO check if type can be represented by simplePtrType - const type_payload = try scope.arena().create(Type.Payload.Pointer); - type_payload.* = .{ + return Type.Tag.pointer.create(scope.arena(), .{ .pointee_type = elem_ty, .sentinel = sentinel, .@"align" = @"align", @@ -3268,95 +3265,73 @@ pub fn ptrType( .mutable = mutable, .@"volatile" = @"volatile", .size = size, - }; - return Type.initPayload(&type_payload.base); + }); } pub fn optionalType(self: *Module, scope: *Scope, child_type: Type) Allocator.Error!Type { - return Type.initPayload(switch (child_type.tag()) { - .single_const_pointer => blk: { - const payload = try scope.arena().create(Type.Payload.PointerSimple); - payload.* = .{ - .base = .{ .tag = .optional_single_const_pointer }, - .pointee_type = child_type.elemType(), - }; - break :blk &payload.base; - }, - .single_mut_pointer => blk: { - const payload = try scope.arena().create(Type.Payload.PointerSimple); - payload.* = .{ - .base = .{ .tag = .optional_single_mut_pointer }, - .pointee_type = child_type.elemType(), - }; - break :blk &payload.base; - }, - else => blk: { - const payload = try scope.arena().create(Type.Payload.Optional); - payload.* = .{ - .child_type = child_type, - }; - break :blk &payload.base; - }, - }); + switch (child_type.tag()) { + .single_const_pointer => return Type.Tag.optional_single_const_pointer.create( + scope.arena(), + child_type.elemType(), + ), + .single_mut_pointer => return Type.Tag.optional_single_mut_pointer.create( + scope.arena(), + child_type.elemType(), + ), + else => return Type.Tag.optional.create(scope.arena(), child_type), + } } -pub fn arrayType(self: *Module, scope: *Scope, len: u64, sentinel: ?Value, elem_type: Type) Allocator.Error!Type { +pub fn arrayType( + self: *Module, + scope: *Scope, + len: u64, + sentinel: ?Value, + elem_type: Type, +) Allocator.Error!Type { if (elem_type.eql(Type.initTag(.u8))) { if (sentinel) |some| { if (some.eql(Value.initTag(.zero))) { - const payload = try scope.arena().create(Type.Payload.Array_u8_Sentinel0); - payload.* = .{ - .len = len, - }; - return Type.initPayload(&payload.base); + return Type.Tag.array_u8_sentinel_0.create(scope.arena(), len); } } else { - const payload = try scope.arena().create(Type.Payload.Array_u8); - payload.* = .{ - .len = len, - }; - return Type.initPayload(&payload.base); + return Type.Tag.array_u8.create(scope.arena(), len); } } if (sentinel) |some| { - const payload = try scope.arena().create(Type.Payload.ArraySentinel); - payload.* = .{ + return Type.Tag.array_sentinel.create(scope.arena(), .{ .len = len, .sentinel = some, .elem_type = elem_type, - }; - return Type.initPayload(&payload.base); + }); } - const payload = try scope.arena().create(Type.Payload.Array); - payload.* = .{ + return Type.Tag.array.create(scope.arena(), .{ .len = len, .elem_type = elem_type, - }; - return Type.initPayload(&payload.base); + }); } -pub fn errorUnionType(self: *Module, scope: *Scope, error_set: Type, payload: Type) Allocator.Error!Type { +pub fn errorUnionType( + self: *Module, + scope: *Scope, + error_set: Type, + payload: Type, +) Allocator.Error!Type { assert(error_set.zigTypeTag() == .ErrorSet); if (error_set.eql(Type.initTag(.anyerror)) and payload.eql(Type.initTag(.void))) { return Type.initTag(.anyerror_void_error_union); } - const result = try scope.arena().create(Type.Payload.ErrorUnion); - result.* = .{ + return Type.Tag.error_union.create(scope.arena(), .{ .error_set = error_set, .payload = payload, - }; - return Type.initPayload(&result.base); + }); } pub fn anyframeType(self: *Module, scope: *Scope, return_type: Type) Allocator.Error!Type { - const result = try scope.arena().create(Type.Payload.AnyFrame); - result.* = .{ - .return_type = return_type, - }; - return Type.initPayload(&result.base); + return Type.Tag.anyframe_T.create(scope.arena(), return_type); } pub fn dumpInst(self: *Module, scope: *Scope, inst: *Inst) void { diff --git a/src/astgen.zig b/src/astgen.zig index 672fe343e2..1fc8a0d19e 100644 --- a/src/astgen.zig +++ b/src/astgen.zig @@ -2723,7 +2723,7 @@ fn rlWrap(mod: *Module, scope: *Scope, rl: ResultLoc, result: *zir.Inst) InnerEr return mod.fail(scope, result.src, "TODO implement rlWrap .bitcasted_ptr", .{}); }, .inferred_ptr => |alloc| { - return mod.fail(scope, result.src, "TODO implement rlWrap .inferred_ptr", .{}); + return addZIRBinOp(mod, scope, result.src, .store, &alloc.base, result); }, .block_ptr => |block_ptr| { return mod.fail(scope, result.src, "TODO implement rlWrap .block_ptr", .{}); diff --git a/src/codegen.zig b/src/codegen.zig index 01deb13c0e..d98a87a440 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -3262,7 +3262,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (typed_value.val.isNull()) return MCValue{ .immediate = 0 }; - var buf: Type.Payload.PointerSimple = undefined; + var buf: Type.Payload.ElemType = undefined; return self.genTypedValue(src, .{ .ty = typed_value.ty.optionalChild(&buf), .val = typed_value.val, diff --git a/src/type.zig b/src/type.zig index f08408738a..ce237f89c7 100644 --- a/src/type.zig +++ b/src/type.zig @@ -112,18 +112,39 @@ pub const Type = extern union { } } + /// Prefer `castTag` to this. pub fn cast(self: Type, comptime T: type) ?*T { - if (self.tag_if_small_enough < Tag.no_payload_count) + if (@hasField(T, "base_tag")) { + return base.castTag(T.base_tag); + } + if (self.tag_if_small_enough < Tag.no_payload_count) { return null; + } + inline for (@typeInfo(Tag).Enum.fields) |field| { + if (field.value < Tag.no_payload_count) + continue; + const t = @intToEnum(Tag, field.value); + if (self.ptr_otherwise.tag == t) { + if (T == t.Type()) { + return @fieldParentPtr(T, "base", self.ptr_otherwise); + } + return null; + } + } + unreachable; + } - const expected_tag = std.meta.fieldInfo(T, "base").default_value.?.tag; - if (self.ptr_otherwise.tag != expected_tag) + pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() { + if (self.tag_if_small_enough < Tag.no_payload_count) return null; - return @fieldParentPtr(T, "base", self.ptr_otherwise); + if (self.ptr_otherwise.tag == t) + return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise); + + return null; } - pub fn castPointer(self: Type) ?*Payload.PointerSimple { + pub fn castPointer(self: Type) ?*Payload.ElemType { return switch (self.tag()) { .single_const_pointer, .single_mut_pointer, @@ -135,7 +156,8 @@ pub const Type = extern union { .mut_slice, .optional_single_const_pointer, .optional_single_mut_pointer, - => @fieldParentPtr(Payload.PointerSimple, "base", self.ptr_otherwise), + => self.cast(Payload.ElemType), + else => null, }; } @@ -165,7 +187,7 @@ pub const Type = extern union { // Hot path for common case: if (a.castPointer()) |a_payload| { if (b.castPointer()) |b_payload| { - return a.tag() == b.tag() and eql(a_payload.pointee_type, b_payload.pointee_type); + return a.tag() == b.tag() and eql(a_payload.data, b_payload.data); } } const is_slice_a = isSlice(a); @@ -230,8 +252,8 @@ pub const Type = extern union { return true; }, .Optional => { - var buf_a: Payload.PointerSimple = undefined; - var buf_b: Payload.PointerSimple = undefined; + var buf_a: Payload.ElemType = undefined; + var buf_b: Payload.ElemType = undefined; return a.optionalChild(&buf_a).eql(b.optionalChild(&buf_b)); }, .Float, @@ -294,7 +316,7 @@ pub const Type = extern union { } }, .Optional => { - var buf: Payload.PointerSimple = undefined; + var buf: Payload.ElemType = undefined; std.hash.autoHash(&hasher, self.optionalChild(&buf).hash()); }, .Float, @@ -364,68 +386,64 @@ pub const Type = extern union { .@"anyframe", => unreachable, - .array_u8_sentinel_0 => return self.copyPayloadShallow(allocator, Payload.Array_u8_Sentinel0), - .array_u8 => return self.copyPayloadShallow(allocator, Payload.Array_u8), + .array_u8, + .array_u8_sentinel_0, + => return self.copyPayloadShallow(allocator, Payload.Len), + + .single_const_pointer, + .single_mut_pointer, + .many_const_pointer, + .many_mut_pointer, + .c_const_pointer, + .c_mut_pointer, + .const_slice, + .mut_slice, + .optional, + .optional_single_mut_pointer, + .optional_single_const_pointer, + .anyframe_T, + => return self.copyPayloadShallow(allocator, Payload.ElemType), + + .int_signed, + .int_unsigned, + => return self.copyPayloadShallow(allocator, Payload.Bits), + .array => { - const payload = @fieldParentPtr(Payload.Array, "base", self.ptr_otherwise); - const new_payload = try allocator.create(Payload.Array); - new_payload.* = .{ - .base = payload.base, + const payload = self.castTag(.array).?.data; + return Tag.array.create(allocator, .{ .len = payload.len, .elem_type = try payload.elem_type.copy(allocator), - }; - return Type{ .ptr_otherwise = &new_payload.base }; + }); }, .array_sentinel => { - const payload = @fieldParentPtr(Payload.ArraySentinel, "base", self.ptr_otherwise); - const new_payload = try allocator.create(Payload.ArraySentinel); - new_payload.* = .{ - .base = payload.base, + const payload = self.castTag(.array_sentinel).?.data; + return Tag.array_sentinel.create(allocator, .{ .len = payload.len, .sentinel = try payload.sentinel.copy(allocator), .elem_type = try payload.elem_type.copy(allocator), - }; - return Type{ .ptr_otherwise = &new_payload.base }; + }); }, - .int_signed => return self.copyPayloadShallow(allocator, Payload.IntSigned), - .int_unsigned => return self.copyPayloadShallow(allocator, Payload.IntUnsigned), .function => { - const payload = @fieldParentPtr(Payload.Function, "base", self.ptr_otherwise); - const new_payload = try allocator.create(Payload.Function); + const payload = self.castTag(.function).?.data; const param_types = try allocator.alloc(Type, payload.param_types.len); for (payload.param_types) |param_type, i| { param_types[i] = try param_type.copy(allocator); } - new_payload.* = .{ - .base = payload.base, + return Tag.function.create(allocator, .{ .return_type = try payload.return_type.copy(allocator), .param_types = param_types, .cc = payload.cc, - }; - return Type{ .ptr_otherwise = &new_payload.base }; + }); }, - .optional => return self.copyPayloadSingleField(allocator, Payload.Optional, "child_type"), - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => return self.copyPayloadSingleField(allocator, Payload.PointerSimple, "pointee_type"), - .anyframe_T => return self.copyPayloadSingleField(allocator, Payload.AnyFrame, "return_type"), - .pointer => { - const payload = @fieldParentPtr(Payload.Pointer, "base", self.ptr_otherwise); - const new_payload = try allocator.create(Payload.Pointer); - new_payload.* = .{ - .base = payload.base, - + const payload = self.castTag(.pointer).?.data; + const sent: ?Value = if (payload.sentinel) |some| + try some.copy(allocator) + else + null; + return Tag.pointer.create(allocator, .{ .pointee_type = try payload.pointee_type.copy(allocator), - .sentinel = if (payload.sentinel) |some| try some.copy(allocator) else null, + .sentinel = sent, .@"align" = payload.@"align", .bit_offset = payload.bit_offset, .host_size = payload.host_size, @@ -433,41 +451,28 @@ pub const Type = extern union { .mutable = payload.mutable, .@"volatile" = payload.@"volatile", .size = payload.size, - }; - return Type{ .ptr_otherwise = &new_payload.base }; + }); }, .error_union => { - const payload = @fieldParentPtr(Payload.ErrorUnion, "base", self.ptr_otherwise); - const new_payload = try allocator.create(Payload.ErrorUnion); - new_payload.* = .{ - .base = payload.base, - + const payload = self.castTag(.error_union).?.data; + return Tag.error_union.create(allocator, .{ .error_set = try payload.error_set.copy(allocator), .payload = try payload.payload.copy(allocator), - }; - return Type{ .ptr_otherwise = &new_payload.base }; + }); }, - .error_set => return self.copyPayloadShallow(allocator, Payload.ErrorSet), - .error_set_single => return self.copyPayloadShallow(allocator, Payload.ErrorSetSingle), - .empty_struct => return self.copyPayloadShallow(allocator, Payload.EmptyStruct), + .error_set => return self.copyPayloadShallow(allocator, Payload.Decl), + .error_set_single => return self.copyPayloadShallow(allocator, Payload.Name), + .empty_struct => return self.copyPayloadShallow(allocator, Payload.ContainerScope), } } fn copyPayloadShallow(self: Type, allocator: *Allocator, comptime T: type) error{OutOfMemory}!Type { - const payload = @fieldParentPtr(T, "base", self.ptr_otherwise); + const payload = self.cast(T).?; const new_payload = try allocator.create(T); new_payload.* = payload.*; return Type{ .ptr_otherwise = &new_payload.base }; } - fn copyPayloadSingleField(self: Type, allocator: *Allocator, comptime T: type, comptime field_name: []const u8) error{OutOfMemory}!Type { - const payload = @fieldParentPtr(T, "base", self.ptr_otherwise); - const new_payload = try allocator.create(T); - new_payload.base = payload.base; - @field(new_payload, field_name) = try @field(payload, field_name).copy(allocator); - return Type{ .ptr_otherwise = &new_payload.base }; - } - pub fn format( self: Type, comptime fmt: []const u8, @@ -527,7 +532,7 @@ pub const Type = extern union { .fn_ccc_void_no_args => return out_stream.writeAll("fn() callconv(.C) void"), .single_const_pointer_to_comptime_int => return out_stream.writeAll("*const comptime_int"), .function => { - const payload = @fieldParentPtr(Payload.Function, "base", ty.ptr_otherwise); + const payload = ty.castTag(.function).?.data; try out_stream.writeAll("fn("); for (payload.param_types) |param_type, i| { if (i != 0) try out_stream.writeAll(", "); @@ -539,108 +544,108 @@ pub const Type = extern union { }, .anyframe_T => { - const payload = @fieldParentPtr(Payload.AnyFrame, "base", ty.ptr_otherwise); + const return_type = ty.castTag(.anyframe_T).?.data; try out_stream.print("anyframe->", .{}); - ty = payload.return_type; + ty = return_type; continue; }, .array_u8 => { - const payload = @fieldParentPtr(Payload.Array_u8, "base", ty.ptr_otherwise); - return out_stream.print("[{}]u8", .{payload.len}); + const len = ty.castTag(.array_u8).?.data; + return out_stream.print("[{}]u8", .{len}); }, .array_u8_sentinel_0 => { - const payload = @fieldParentPtr(Payload.Array_u8_Sentinel0, "base", ty.ptr_otherwise); - return out_stream.print("[{}:0]u8", .{payload.len}); + const len = ty.castTag(.array_u8_sentinel_0).?.data; + return out_stream.print("[{}:0]u8", .{len}); }, .array => { - const payload = @fieldParentPtr(Payload.Array, "base", ty.ptr_otherwise); + const payload = ty.castTag(.array).?.data; try out_stream.print("[{}]", .{payload.len}); ty = payload.elem_type; continue; }, .array_sentinel => { - const payload = @fieldParentPtr(Payload.ArraySentinel, "base", ty.ptr_otherwise); + const payload = ty.castTag(.array_sentinel).?.data; try out_stream.print("[{}:{}]", .{ payload.len, payload.sentinel }); ty = payload.elem_type; continue; }, .single_const_pointer => { - const payload = @fieldParentPtr(Payload.PointerSimple, "base", ty.ptr_otherwise); + const pointee_type = ty.castTag(.single_const_pointer).?.data; try out_stream.writeAll("*const "); - ty = payload.pointee_type; + ty = pointee_type; continue; }, .single_mut_pointer => { - const payload = @fieldParentPtr(Payload.PointerSimple, "base", ty.ptr_otherwise); + const pointee_type = ty.castTag(.single_mut_pointer).?.data; try out_stream.writeAll("*"); - ty = payload.pointee_type; + ty = pointee_type; continue; }, .many_const_pointer => { - const payload = @fieldParentPtr(Payload.PointerSimple, "base", ty.ptr_otherwise); + const pointee_type = ty.castTag(.many_const_pointer).?.data; try out_stream.writeAll("[*]const "); - ty = payload.pointee_type; + ty = pointee_type; continue; }, .many_mut_pointer => { - const payload = @fieldParentPtr(Payload.PointerSimple, "base", ty.ptr_otherwise); + const pointee_type = ty.castTag(.many_mut_pointer).?.data; try out_stream.writeAll("[*]"); - ty = payload.pointee_type; + ty = pointee_type; continue; }, .c_const_pointer => { - const payload = @fieldParentPtr(Payload.PointerSimple, "base", ty.ptr_otherwise); + const pointee_type = ty.castTag(.c_const_pointer).?.data; try out_stream.writeAll("[*c]const "); - ty = payload.pointee_type; + ty = pointee_type; continue; }, .c_mut_pointer => { - const payload = @fieldParentPtr(Payload.PointerSimple, "base", ty.ptr_otherwise); + const pointee_type = ty.castTag(.c_mut_pointer).?.data; try out_stream.writeAll("[*c]"); - ty = payload.pointee_type; + ty = pointee_type; continue; }, .const_slice => { - const payload = @fieldParentPtr(Payload.PointerSimple, "base", ty.ptr_otherwise); + const pointee_type = ty.castTag(.const_slice).?.data; try out_stream.writeAll("[]const "); - ty = payload.pointee_type; + ty = pointee_type; continue; }, .mut_slice => { - const payload = @fieldParentPtr(Payload.PointerSimple, "base", ty.ptr_otherwise); + const pointee_type = ty.castTag(.mut_slice).?.data; try out_stream.writeAll("[]"); - ty = payload.pointee_type; + ty = pointee_type; continue; }, .int_signed => { - const payload = @fieldParentPtr(Payload.IntSigned, "base", ty.ptr_otherwise); - return out_stream.print("i{}", .{payload.bits}); + const bits = ty.castTag(.int_signed).?.data; + return out_stream.print("i{d}", .{bits}); }, .int_unsigned => { - const payload = @fieldParentPtr(Payload.IntUnsigned, "base", ty.ptr_otherwise); - return out_stream.print("u{}", .{payload.bits}); + const bits = ty.castTag(.int_unsigned).?.data; + return out_stream.print("u{d}", .{bits}); }, .optional => { - const payload = @fieldParentPtr(Payload.Optional, "base", ty.ptr_otherwise); + const child_type = ty.castTag(.optional).?.data; try out_stream.writeByte('?'); - ty = payload.child_type; + ty = child_type; continue; }, .optional_single_const_pointer => { - const payload = @fieldParentPtr(Payload.PointerSimple, "base", ty.ptr_otherwise); + const pointee_type = ty.castTag(.optional_single_const_pointer).?.data; try out_stream.writeAll("?*const "); - ty = payload.pointee_type; + ty = pointee_type; continue; }, .optional_single_mut_pointer => { - const payload = @fieldParentPtr(Payload.PointerSimple, "base", ty.ptr_otherwise); + const pointee_type = ty.castTag(.optional_single_mut_pointer).?.data; try out_stream.writeAll("?*"); - ty = payload.pointee_type; + ty = pointee_type; continue; }, .pointer => { - const payload = @fieldParentPtr(Payload.Pointer, "base", ty.ptr_otherwise); + const payload = ty.castTag(.pointer).?.data; if (payload.sentinel) |some| switch (payload.size) { .One, .C => unreachable, .Many => try out_stream.print("[*:{}]", .{some}), @@ -652,10 +657,10 @@ pub const Type = extern union { .Slice => try out_stream.writeAll("[]"), } if (payload.@"align" != 0) { - try out_stream.print("align({}", .{payload.@"align"}); + try out_stream.print("align({d}", .{payload.@"align"}); if (payload.bit_offset != 0) { - try out_stream.print(":{}:{}", .{ payload.bit_offset, payload.host_size }); + try out_stream.print(":{d}:{d}", .{ payload.bit_offset, payload.host_size }); } try out_stream.writeAll(") "); } @@ -667,19 +672,19 @@ pub const Type = extern union { continue; }, .error_union => { - const payload = @fieldParentPtr(Payload.ErrorUnion, "base", ty.ptr_otherwise); + const payload = ty.castTag(.error_union).?.data; try payload.error_set.format("", .{}, out_stream); try out_stream.writeAll("!"); ty = payload.payload; continue; }, .error_set => { - const payload = @fieldParentPtr(Payload.ErrorSet, "base", ty.ptr_otherwise); - return out_stream.writeAll(std.mem.spanZ(payload.decl.name)); + const decl = ty.castTag(.error_set).?.data; + return out_stream.writeAll(std.mem.spanZ(decl.name)); }, .error_set_single => { - const payload = @fieldParentPtr(Payload.ErrorSetSingle, "base", ty.ptr_otherwise); - return out_stream.print("error{{{}}}", .{payload.name}); + const name = ty.castTag(.error_set_single).?.data; + return out_stream.print("error{{{s}}}", .{name}); }, } unreachable; @@ -784,11 +789,10 @@ pub const Type = extern union { .array => self.elemType().hasCodeGenBits() and self.arrayLen() != 0, .array_u8 => self.arrayLen() != 0, .array_sentinel, .single_const_pointer, .single_mut_pointer, .many_const_pointer, .many_mut_pointer, .c_const_pointer, .c_mut_pointer, .const_slice, .mut_slice, .pointer => self.elemType().hasCodeGenBits(), - .int_signed => self.cast(Payload.IntSigned).?.bits != 0, - .int_unsigned => self.cast(Payload.IntUnsigned).?.bits != 0, + .int_signed, .int_unsigned => self.cast(Payload.Bits).?.data != 0, .error_union => { - const payload = self.cast(Payload.ErrorUnion).?; + const payload = self.castTag(.error_union).?.data; return payload.error_set.hasCodeGenBits() or payload.payload.hasCodeGenBits(); }, @@ -855,7 +859,7 @@ pub const Type = extern union { => return @divExact(target.cpu.arch.ptrBitWidth(), 8), .pointer => { - const payload = @fieldParentPtr(Payload.Pointer, "base", self.ptr_otherwise); + const payload = self.castTag(.pointer).?.data; if (payload.@"align" != 0) return payload.@"align"; return @divExact(target.cpu.arch.ptrBitWidth(), 8); @@ -885,18 +889,12 @@ pub const Type = extern union { .array, .array_sentinel => return self.elemType().abiAlignment(target), .int_signed, .int_unsigned => { - const bits: u16 = if (self.cast(Payload.IntSigned)) |pl| - pl.bits - else if (self.cast(Payload.IntUnsigned)) |pl| - pl.bits - else - unreachable; - + const bits: u16 = self.cast(Payload.Bits).?.data; return std.math.ceilPowerOfTwoPromote(u16, (bits + 7) / 8); }, .optional => { - var buf: Payload.PointerSimple = undefined; + var buf: Payload.ElemType = undefined; const child_type = self.optionalChild(&buf); if (!child_type.hasCodeGenBits()) return 1; @@ -907,7 +905,7 @@ pub const Type = extern union { }, .error_union => { - const payload = self.cast(Payload.ErrorUnion).?; + const payload = self.castTag(.error_union).?.data; if (!payload.error_set.hasCodeGenBits()) { return payload.payload.abiAlignment(target); } else if (!payload.payload.hasCodeGenBits()) { @@ -955,16 +953,19 @@ pub const Type = extern union { .bool, => return 1, - .array_u8 => @fieldParentPtr(Payload.Array_u8_Sentinel0, "base", self.ptr_otherwise).len, - .array_u8_sentinel_0 => @fieldParentPtr(Payload.Array_u8_Sentinel0, "base", self.ptr_otherwise).len + 1, + .array_u8 => self.castTag(.array_u8).?.data, + .array_u8_sentinel_0 => self.castTag(.array_u8_sentinel_0).?.data + 1, .array => { - const payload = @fieldParentPtr(Payload.Array, "base", self.ptr_otherwise); + const payload = self.castTag(.array).?.data; const elem_size = std.math.max(payload.elem_type.abiAlignment(target), payload.elem_type.abiSize(target)); return payload.len * elem_size; }, .array_sentinel => { - const payload = @fieldParentPtr(Payload.ArraySentinel, "base", self.ptr_otherwise); - const elem_size = std.math.max(payload.elem_type.abiAlignment(target), payload.elem_type.abiSize(target)); + const payload = self.castTag(.array_sentinel).?.data; + const elem_size = std.math.max( + payload.elem_type.abiAlignment(target), + payload.elem_type.abiSize(target), + ); return (payload.len + 1) * elem_size; }, .i16, .u16 => return 2, @@ -1022,18 +1023,12 @@ pub const Type = extern union { => return 2, // TODO revisit this when we have the concept of the error tag type .int_signed, .int_unsigned => { - const bits: u16 = if (self.cast(Payload.IntSigned)) |pl| - pl.bits - else if (self.cast(Payload.IntUnsigned)) |pl| - pl.bits - else - unreachable; - + const bits: u16 = self.cast(Payload.Bits).?.data; return std.math.ceilPowerOfTwoPromote(u16, (bits + 7) / 8); }, .optional => { - var buf: Payload.PointerSimple = undefined; + var buf: Payload.ElemType = undefined; const child_type = self.optionalChild(&buf); if (!child_type.hasCodeGenBits()) return 1; @@ -1048,7 +1043,7 @@ pub const Type = extern union { }, .error_union => { - const payload = self.cast(Payload.ErrorUnion).?; + const payload = self.castTag(.error_union).?.data; if (!payload.error_set.hasCodeGenBits() and !payload.payload.hasCodeGenBits()) { return 0; } else if (!payload.error_set.hasCodeGenBits()) { @@ -1132,7 +1127,7 @@ pub const Type = extern union { .single_const_pointer_to_comptime_int, => true, - .pointer => self.cast(Payload.Pointer).?.size == .One, + .pointer => self.castTag(.pointer).?.data.size == .One, }; } @@ -1214,7 +1209,7 @@ pub const Type = extern union { .single_const_pointer_to_comptime_int, => .One, - .pointer => self.cast(Payload.Pointer).?.size, + .pointer => self.castTag(.pointer).?.data.size, }; } @@ -1289,7 +1284,7 @@ pub const Type = extern union { .const_slice_u8, => true, - .pointer => self.cast(Payload.Pointer).?.size == .Slice, + .pointer => self.castTag(.pointer).?.data.size == .Slice, }; } @@ -1364,7 +1359,7 @@ pub const Type = extern union { .const_slice, => true, - .pointer => !self.cast(Payload.Pointer).?.mutable, + .pointer => !self.castTag(.pointer).?.data.mutable, }; } @@ -1438,7 +1433,7 @@ pub const Type = extern union { => false, .pointer => { - const payload = @fieldParentPtr(Payload.Pointer, "base", self.ptr_otherwise); + const payload = self.castTag(.pointer).?.data; return payload.@"volatile"; }, }; @@ -1514,7 +1509,7 @@ pub const Type = extern union { => false, .pointer => { - const payload = @fieldParentPtr(Payload.Pointer, "base", self.ptr_otherwise); + const payload = self.castTag(.pointer).?.data; return payload.@"allowzero"; }, }; @@ -1525,7 +1520,7 @@ pub const Type = extern union { switch (self.tag()) { .optional_single_const_pointer, .optional_single_mut_pointer => return true, .optional => { - var buf: Payload.PointerSimple = undefined; + var buf: Payload.ElemType = undefined; const child_type = self.optionalChild(&buf); // optionals of zero sized pointers behave like bools if (!child_type.hasCodeGenBits()) return false; @@ -1563,7 +1558,7 @@ pub const Type = extern union { => return false, .Optional => { - var buf: Payload.PointerSimple = undefined; + var buf: Payload.ElemType = undefined; return ty.optionalChild(&buf).isValidVarType(is_extern); }, .Pointer, .Array => ty = ty.elemType(), @@ -1631,8 +1626,8 @@ pub const Type = extern union { .empty_struct, => unreachable, - .array => self.cast(Payload.Array).?.elem_type, - .array_sentinel => self.cast(Payload.ArraySentinel).?.elem_type, + .array => self.castTag(.array).?.data.elem_type, + .array_sentinel => self.castTag(.array_sentinel).?.data.elem_type, .single_const_pointer, .single_mut_pointer, .many_const_pointer, @@ -1641,28 +1636,29 @@ pub const Type = extern union { .c_mut_pointer, .const_slice, .mut_slice, - => self.castPointer().?.pointee_type, + => self.castPointer().?.data, .array_u8, .array_u8_sentinel_0, .const_slice_u8 => Type.initTag(.u8), .single_const_pointer_to_comptime_int => Type.initTag(.comptime_int), - .pointer => self.cast(Payload.Pointer).?.pointee_type, + .pointer => self.castTag(.pointer).?.data.pointee_type, }; } /// Asserts that the type is an optional. - pub fn optionalChild(self: Type, buf: *Payload.PointerSimple) Type { + /// Resulting `Type` will have inner memory referencing `buf`. + pub fn optionalChild(self: Type, buf: *Payload.ElemType) Type { return switch (self.tag()) { - .optional => self.cast(Payload.Optional).?.child_type, + .optional => self.castTag(.optional).?.data, .optional_single_mut_pointer => { buf.* = .{ .base = .{ .tag = .single_mut_pointer }, - .pointee_type = self.castPointer().?.pointee_type, + .data = self.castPointer().?.data, }; return Type.initPayload(&buf.base); }, .optional_single_const_pointer => { buf.* = .{ .base = .{ .tag = .single_const_pointer }, - .pointee_type = self.castPointer().?.pointee_type, + .data = self.castPointer().?.data, }; return Type.initPayload(&buf.base); }, @@ -1673,23 +1669,16 @@ pub const Type = extern union { /// Asserts that the type is an optional. /// Same as `optionalChild` but allocates the buffer if needed. pub fn optionalChildAlloc(self: Type, allocator: *Allocator) !Type { - return switch (self.tag()) { - .optional => self.cast(Payload.Optional).?.child_type, - .optional_single_mut_pointer, .optional_single_const_pointer => { - const payload = try allocator.create(Payload.PointerSimple); - payload.* = .{ - .base = .{ - .tag = if (self.tag() == .optional_single_const_pointer) - .single_const_pointer - else - .single_mut_pointer, - }, - .pointee_type = self.castPointer().?.pointee_type, - }; - return Type.initPayload(&payload.base); + switch (self.tag()) { + .optional => return self.castTag(.optional).?.data, + .optional_single_mut_pointer => { + return Tag.single_mut_pointer.create(allocator, self.castPointer().?.data); + }, + .optional_single_const_pointer => { + return Tag.single_const_pointer.create(allocator, self.castPointer().?.data); }, else => unreachable, - }; + } } /// Asserts the type is an array or vector. @@ -1759,10 +1748,10 @@ pub const Type = extern union { .empty_struct, => unreachable, - .array => self.cast(Payload.Array).?.len, - .array_sentinel => self.cast(Payload.ArraySentinel).?.len, - .array_u8 => self.cast(Payload.Array_u8).?.len, - .array_u8_sentinel_0 => self.cast(Payload.Array_u8_Sentinel0).?.len, + .array => self.castTag(.array).?.data.len, + .array_sentinel => self.castTag(.array_sentinel).?.data.len, + .array_u8 => self.castTag(.array_u8).?.data, + .array_u8_sentinel_0 => self.castTag(.array_u8_sentinel_0).?.data, }; } @@ -1836,8 +1825,8 @@ pub const Type = extern union { .array_u8, => return null, - .pointer => return self.cast(Payload.Pointer).?.sentinel, - .array_sentinel => return self.cast(Payload.ArraySentinel).?.sentinel, + .pointer => return self.castTag(.pointer).?.data.sentinel, + .array_sentinel => return self.castTag(.array_sentinel).?.data.sentinel, .array_u8_sentinel_0 => return Value.initTag(.zero), }; } @@ -2048,8 +2037,14 @@ pub const Type = extern union { .empty_struct, => unreachable, - .int_unsigned => .{ .signedness = .unsigned, .bits = self.cast(Payload.IntUnsigned).?.bits }, - .int_signed => .{ .signedness = .signed, .bits = self.cast(Payload.IntSigned).?.bits }, + .int_unsigned => .{ + .signedness = .unsigned, + .bits = self.castTag(.int_unsigned).?.data, + }, + .int_signed => .{ + .signedness = .signed, + .bits = self.castTag(.int_signed).?.data, + }, .u8 => .{ .signedness = .unsigned, .bits = 8 }, .i8 => .{ .signedness = .signed, .bits = 8 }, .u16 => .{ .signedness = .unsigned, .bits = 16 }, @@ -2178,7 +2173,7 @@ pub const Type = extern union { .fn_void_no_args => 0, .fn_naked_noreturn_no_args => 0, .fn_ccc_void_no_args => 0, - .function => @fieldParentPtr(Payload.Function, "base", self.ptr_otherwise).param_types.len, + .function => self.castTag(.function).?.data.param_types.len, .f16, .f32, @@ -2254,7 +2249,7 @@ pub const Type = extern union { .fn_naked_noreturn_no_args => return, .fn_ccc_void_no_args => return, .function => { - const payload = @fieldParentPtr(Payload.Function, "base", self.ptr_otherwise); + const payload = self.castTag(.function).?.data; std.mem.copy(Type, types, payload.param_types); }, @@ -2327,7 +2322,7 @@ pub const Type = extern union { pub fn fnParamType(self: Type, index: usize) Type { switch (self.tag()) { .function => { - const payload = @fieldParentPtr(Payload.Function, "base", self.ptr_otherwise); + const payload = self.castTag(.function).?.data; return payload.param_types[index]; }, @@ -2410,7 +2405,7 @@ pub const Type = extern union { .fn_ccc_void_no_args, => Type.initTag(.void), - .function => @fieldParentPtr(Payload.Function, "base", self.ptr_otherwise).return_type, + .function => self.castTag(.function).?.data.return_type, .f16, .f32, @@ -2484,7 +2479,7 @@ pub const Type = extern union { .fn_void_no_args => .Unspecified, .fn_naked_noreturn_no_args => .Naked, .fn_ccc_void_no_args => .C, - .function => @fieldParentPtr(Payload.Function, "base", self.ptr_otherwise).cc, + .function => self.castTag(.function).?.data.cc, .f16, .f32, @@ -2760,15 +2755,8 @@ pub const Type = extern union { .@"null" => return Value.initTag(.null_value), .@"undefined" => return Value.initTag(.undef), - .int_unsigned => { - if (ty.cast(Payload.IntUnsigned).?.bits == 0) { - return Value.initTag(.zero); - } else { - return null; - } - }, - .int_signed => { - if (ty.cast(Payload.IntSigned).?.bits == 0) { + .int_unsigned, .int_signed => { + if (ty.cast(Payload.Bits).?.data == 0) { return Value.initTag(.zero); } else { return null; @@ -2787,12 +2775,11 @@ pub const Type = extern union { .single_const_pointer, .single_mut_pointer, => { - const ptr = ty.castPointer().?; - ty = ptr.pointee_type; + ty = ty.castPointer().?.data; continue; }, .pointer => { - ty = ty.cast(Payload.Pointer).?.pointee_type; + ty = ty.castTag(.pointer).?.data.pointee_type; continue; }, }; @@ -2869,7 +2856,7 @@ pub const Type = extern union { .c_mut_pointer, => return true, - .pointer => self.cast(Payload.Pointer).?.size == .C, + .pointer => self.castTag(.pointer).?.data.size == .C, }; } @@ -2950,7 +2937,7 @@ pub const Type = extern union { .pointer, => unreachable, - .empty_struct => self.cast(Type.Payload.EmptyStruct).?.scope, + .empty_struct => self.castTag(.empty_struct).?.data, }; } @@ -3105,117 +3092,195 @@ pub const Type = extern union { pub const last_no_payload_tag = Tag.const_slice_u8; pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; - }; - pub const Payload = struct { - tag: Tag, + pub fn Type(comptime t: Tag) type { + return switch (t) { + .u8, + .i8, + .u16, + .i16, + .u32, + .i32, + .u64, + .i64, + .usize, + .isize, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .f16, + .f32, + .f64, + .f128, + .c_void, + .bool, + .void, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .noreturn, + .enum_literal, + .@"null", + .@"undefined", + .fn_noreturn_no_args, + .fn_void_no_args, + .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, + .single_const_pointer_to_comptime_int, + .anyerror_void_error_union, + .@"anyframe", + .const_slice_u8, + => @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"), + + .array_u8, + .array_u8_sentinel_0, + => Payload.Len, + + .single_const_pointer, + .single_mut_pointer, + .many_const_pointer, + .many_mut_pointer, + .c_const_pointer, + .c_mut_pointer, + .const_slice, + .mut_slice, + .optional, + .optional_single_mut_pointer, + .optional_single_const_pointer, + .anyframe_T, + => Payload.ElemType, + + .int_signed, + .int_unsigned, + => Payload.Bits, + + .array => Payload.Array, + .array_sentinel => Payload.ArraySentinel, + .pointer => Payload.Pointer, + .function => Payload.Function, + .error_union => Payload.ErrorUnion, + .error_set => Payload.Decl, + .error_set_single => Payload.Name, + .empty_struct => Payload.ContainerScope, + }; + } - pub const Array_u8_Sentinel0 = struct { - base: Payload = Payload{ .tag = .array_u8_sentinel_0 }, + pub fn create(comptime t: Tag, ally: *Allocator, data: Data(t)) error{OutOfMemory}!Type { + const ptr = try ally.create(t.Type()); + ptr.* = .{ + .base = .{ .tag = t }, + .data = data, + }; + return Type{ .ptr_otherwise = &ptr.base }; + } - len: u64, - }; + pub fn Data(comptime t: Tag) type { + return std.meta.fieldInfo(t.Type(), "data").field_type; + } + }; - pub const Array_u8 = struct { - base: Payload = Payload{ .tag = .array_u8 }, + /// The sub-types are named after what fields they contain. + pub const Payload = struct { + tag: Tag, - len: u64, + pub const Len = struct { + base: Payload, + data: u64, }; pub const Array = struct { - base: Payload = Payload{ .tag = .array }, + pub const base_tag = Tag.array; - len: u64, - elem_type: Type, + base: Payload = Payload{ .tag = base_tag }, + data: struct { + len: u64, + elem_type: Type, + }, }; pub const ArraySentinel = struct { - base: Payload = Payload{ .tag = .array_sentinel }, + pub const base_tag = Tag.array_sentinel; - len: u64, - sentinel: Value, - elem_type: Type, + base: Payload = Payload{ .tag = base_tag }, + data: struct { + len: u64, + sentinel: Value, + elem_type: Type, + }, }; - pub const PointerSimple = struct { + pub const ElemType = struct { base: Payload, - - pointee_type: Type, + data: Type, }; - pub const IntSigned = struct { - base: Payload = Payload{ .tag = .int_signed }, - - bits: u16, - }; - - pub const IntUnsigned = struct { - base: Payload = Payload{ .tag = .int_unsigned }, - - bits: u16, + pub const Bits = struct { + base: Payload, + data: u16, }; pub const Function = struct { - base: Payload = Payload{ .tag = .function }, - - param_types: []Type, - return_type: Type, - cc: std.builtin.CallingConvention, - }; - - pub const Optional = struct { - base: Payload = Payload{ .tag = .optional }, + pub const base_tag = Tag.function; - child_type: Type, + base: Payload = Payload{ .tag = base_tag }, + data: struct { + param_types: []Type, + return_type: Type, + cc: std.builtin.CallingConvention, + }, }; pub const Pointer = struct { - base: Payload = .{ .tag = .pointer }, - - pointee_type: Type, - sentinel: ?Value, - /// If zero use pointee_type.AbiAlign() - @"align": u32, - bit_offset: u16, - host_size: u16, - @"allowzero": bool, - mutable: bool, - @"volatile": bool, - size: std.builtin.TypeInfo.Pointer.Size, + pub const base_tag = Tag.pointer; + + base: Payload = Payload{ .tag = base_tag }, + data: struct { + pointee_type: Type, + sentinel: ?Value, + /// If zero use pointee_type.AbiAlign() + @"align": u32, + bit_offset: u16, + host_size: u16, + @"allowzero": bool, + mutable: bool, + @"volatile": bool, + size: std.builtin.TypeInfo.Pointer.Size, + }, }; pub const ErrorUnion = struct { - base: Payload = .{ .tag = .error_union }, + pub const base_tag = Tag.error_union; - error_set: Type, - payload: Type, - }; - - pub const AnyFrame = struct { - base: Payload = .{ .tag = .anyframe_T }, - - return_type: Type, + base: Payload = Payload{ .tag = base_tag }, + data: struct { + error_set: Type, + payload: Type, + }, }; - pub const ErrorSet = struct { - base: Payload = .{ .tag = .error_set }, - - decl: *Module.Decl, + pub const Decl = struct { + base: Payload, + data: *Module.Decl, }; - pub const ErrorSetSingle = struct { - base: Payload = .{ .tag = .error_set_single }, - + pub const Name = struct { + base: Payload, /// memory is owned by `Module` - name: []const u8, + data: []const u8, }; /// Mostly used for namespace like structs with zero fields. /// Most commonly used for files. - pub const EmptyStruct = struct { - base: Payload = .{ .tag = .empty_struct }, - - scope: *Module.Scope.Container, + pub const ContainerScope = struct { + base: Payload, + data: *Module.Scope.Container, }; }; }; diff --git a/src/value.zig b/src/value.zig index 53c4b2d540..f26c8d8772 100644 --- a/src/value.zig +++ b/src/value.zig @@ -440,21 +440,18 @@ pub const Value = extern union { .int_type => { const payload = self.cast(Payload.IntType).?; - if (payload.signed) { - const new = try allocator.create(Type.Payload.IntSigned); - new.* = .{ .bits = payload.bits }; - return Type.initPayload(&new.base); - } else { - const new = try allocator.create(Type.Payload.IntUnsigned); - new.* = .{ .bits = payload.bits }; - return Type.initPayload(&new.base); - } + const new = try allocator.create(Type.Payload.Bits); + new.* = .{ + .base = .{ + .tag = if (payload.signed) .int_signed else .int_unsigned, + }, + .data = payload.bits, + }; + return Type.initPayload(&new.base); }, .error_set => { const payload = self.cast(Payload.ErrorSet).?; - const new = try allocator.create(Type.Payload.ErrorSet); - new.* = .{ .decl = payload.decl }; - return Type.initPayload(&new.base); + return Type.Tag.error_set.create(allocator, payload.decl); }, .undef, @@ -1321,13 +1318,13 @@ pub const Value = extern union { }, .int_type => { const payload = self.cast(Payload.IntType).?; - if (payload.signed) { - var new = Type.Payload.IntSigned{ .bits = payload.bits }; - return Type.initPayload(&new.base).hash(); - } else { - var new = Type.Payload.IntUnsigned{ .bits = payload.bits }; - return Type.initPayload(&new.base).hash(); - } + var int_payload = Type.Payload.Bits{ + .base = .{ + .tag = if (payload.signed) .int_signed else .int_unsigned, + }, + .data = payload.bits, + }; + return Type.initPayload(&int_payload.base).hash(); }, .empty_struct_value, diff --git a/src/zir.zig b/src/zir.zig index 47e1abc24b..bd9ab2c538 100644 --- a/src/zir.zig +++ b/src/zir.zig @@ -2785,7 +2785,7 @@ const EmitZIR = struct { } }, .Optional => { - var buf: Type.Payload.PointerSimple = undefined; + var buf: Type.Payload.ElemType = undefined; const inst = try self.arena.allocator.create(Inst.UnOp); inst.* = .{ .base = .{ diff --git a/src/zir_sema.zig b/src/zir_sema.zig index 3a9f511373..8960f3ba4d 100644 --- a/src/zir_sema.zig +++ b/src/zir_sema.zig @@ -480,14 +480,11 @@ fn analyzeInstStr(mod: *Module, scope: *Scope, str_inst: *zir.Inst.Str) InnerErr errdefer new_decl_arena.deinit(); const arena_bytes = try new_decl_arena.allocator.dupe(u8, str_inst.positionals.bytes); - const ty_payload = try scope.arena().create(Type.Payload.Array_u8_Sentinel0); - ty_payload.* = .{ .len = arena_bytes.len }; - const bytes_payload = try scope.arena().create(Value.Payload.Bytes); bytes_payload.* = .{ .data = arena_bytes }; const new_decl = try mod.createAnonymousDecl(scope, &new_decl_arena, .{ - .ty = Type.initPayload(&ty_payload.base), + .ty = try Type.Tag.array_u8_sentinel_0.create(scope.arena(), arena_bytes.len), .val = Value.initPayload(&bytes_payload.base), }); return mod.analyzeDeclRef(scope, str_inst.base.src, new_decl); @@ -952,13 +949,12 @@ fn analyzeInstFnType(mod: *Module, scope: *Scope, fntype: *zir.Inst.FnType) Inne param_types[i] = resolved; } - const payload = try arena.create(Type.Payload.Function); - payload.* = .{ + const fn_ty = try Type.Tag.function.create(arena, .{ .cc = fntype.kw_args.cc, .return_type = return_type, .param_types = param_types, - }; - return mod.constType(scope, fntype.base.src, Type.initPayload(&payload.base)); + }); + return mod.constType(scope, fntype.base.src, fn_ty); } fn analyzeInstPrimitive(mod: *Module, scope: *Scope, primitive: *zir.Inst.Primitive) InnerError!*Inst { @@ -1062,11 +1058,10 @@ fn analyzeInstFieldPtr(mod: *Module, scope: *Scope, fieldptr: *zir.Inst.FieldPtr const ref_payload = try scope.arena().create(Value.Payload.RefVal); ref_payload.* = .{ .val = Value.initPayload(&error_payload.base) }; - const result_type = if (child_type.tag() == .anyerror) blk: { - const result_payload = try scope.arena().create(Type.Payload.ErrorSetSingle); - result_payload.* = .{ .name = entry.key }; - break :blk Type.initPayload(&result_payload.base); - } else child_type; + const result_type = if (child_type.tag() == .anyerror) + try Type.Tag.error_set_single.create(scope.arena(), entry.key) + else + child_type; return mod.constInst(scope, fieldptr.base.src, .{ .ty = try mod.simplePtrType(scope, fieldptr.base.src, result_type, false, .One), @@ -1195,15 +1190,10 @@ fn analyzeInstElemPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.ElemPtr) Inne // @intCast here because it would have been impossible to construct a value that // required a larger index. const elem_ptr = try array_ptr_val.elemPtr(scope.arena(), @intCast(usize, index_u64)); - - const type_payload = try scope.arena().create(Type.Payload.PointerSimple); - type_payload.* = .{ - .base = .{ .tag = .single_const_pointer }, - .pointee_type = elem_ty.elemType().elemType(), - }; + const pointee_type = elem_ty.elemType().elemType(); return mod.constInst(scope, inst.base.src, .{ - .ty = Type.initPayload(&type_payload.base), + .ty = try Type.Tag.single_const_pointer.create(scope.arena(), pointee_type), .val = elem_ptr, }); } -- cgit v1.2.3 From 3f7d9b5fc19e4081236b3b63aebbc80e1b17f5b5 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 30 Dec 2020 22:31:56 -0700 Subject: stage2: rework Value Payload layout This is the same as the previous commit but for Value instead of Type. Add `Value.castTag` and note that it is preferable to call than `Value.cast`. This matches other abstractions in the codebase. Added a convenience function `Value.Tag.create` which really cleans up the callsites of creating `Value` objects. `Value` tags can now share payload types. This is in preparation for another improvement that I want to do. --- src/Compilation.zig | 11 +- src/Module.zig | 193 +++++++---------- src/astgen.zig | 26 +-- src/codegen.zig | 52 ++--- src/codegen/c.zig | 36 ++-- src/codegen/wasm.zig | 6 +- src/link/Elf.zig | 2 +- src/llvm_backend.zig | 8 +- src/type.zig | 43 +--- src/value.zig | 599 +++++++++++++++++++++++++++++++-------------------- src/zir.zig | 34 +-- src/zir_sema.zig | 78 +++---- 12 files changed, 573 insertions(+), 515 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index 11c8303fac..cd3db84ec2 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1457,11 +1457,12 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor .complete, .codegen_failure_retryable => { const module = self.bin_file.options.module.?; - if (decl.typed_value.most_recent.typed_value.val.cast(Value.Payload.Function)) |payload| { - switch (payload.func.analysis) { - .queued => module.analyzeFnBody(decl, payload.func) catch |err| switch (err) { + if (decl.typed_value.most_recent.typed_value.val.castTag(.function)) |payload| { + const func = payload.data; + switch (func.analysis) { + .queued => module.analyzeFnBody(decl, func) catch |err| switch (err) { error.AnalysisFail => { - assert(payload.func.analysis != .in_progress); + assert(func.analysis != .in_progress); continue; }, error.OutOfMemory => return error.OutOfMemory, @@ -1475,7 +1476,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor var decl_arena = decl.typed_value.most_recent.arena.?.promote(module.gpa); defer decl.typed_value.most_recent.arena.?.* = decl_arena.state; log.debug("analyze liveness of {}\n", .{decl.name}); - try liveness.analyze(module.gpa, &decl_arena.allocator, payload.func.analysis.success); + try liveness.analyze(module.gpa, &decl_arena.allocator, func.analysis.success); } assert(decl.typed_value.most_recent.typed_value.ty.hasCodeGenBits()); diff --git a/src/Module.zig b/src/Module.zig index 3e937fe49b..ca0718c3d5 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1092,16 +1092,12 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { tvm.deinit(self.gpa); } - const value_payload = try decl_arena.allocator.create(Value.Payload.ExternFn); - value_payload.* = .{ .decl = decl }; + const fn_val = try Value.Tag.extern_fn.create(&decl_arena.allocator, decl); decl_arena_state.* = decl_arena.state; decl.typed_value = .{ .most_recent = .{ - .typed_value = .{ - .ty = fn_type, - .val = Value.initPayload(&value_payload.base), - }, + .typed_value = .{ .ty = fn_type, .val = fn_val }, .arena = decl_arena_state, }, }; @@ -1187,7 +1183,10 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { .analysis = .{ .queued = fn_zir }, .owner_decl = decl, }; - fn_payload.* = .{ .func = new_func }; + fn_payload.* = .{ + .base = .{ .tag = .function }, + .data = new_func, + }; var prev_type_has_bits = false; var type_changed = true; @@ -1375,7 +1374,6 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { } const new_variable = try decl_arena.allocator.create(Var); - const var_payload = try decl_arena.allocator.create(Value.Payload.Variable); new_variable.* = .{ .owner_decl = decl, .init = var_info.val orelse undefined, @@ -1383,14 +1381,14 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { .is_mutable = is_mutable, .is_threadlocal = is_threadlocal, }; - var_payload.* = .{ .variable = new_variable }; + const var_val = try Value.Tag.variable.create(&decl_arena.allocator, new_variable); decl_arena_state.* = decl_arena.state; decl.typed_value = .{ .most_recent = .{ .typed_value = .{ .ty = var_info.ty, - .val = Value.initPayload(&var_payload.base), + .val = var_val, }, .arena = decl_arena_state, }, @@ -2232,52 +2230,43 @@ pub fn constBool(self: *Module, scope: *Scope, src: usize, v: bool) !*Inst { } pub fn constIntUnsigned(self: *Module, scope: *Scope, src: usize, ty: Type, int: u64) !*Inst { - const int_payload = try scope.arena().create(Value.Payload.Int_u64); - int_payload.* = .{ .int = int }; - return self.constInst(scope, src, .{ .ty = ty, - .val = Value.initPayload(&int_payload.base), + .val = try Value.Tag.int_u64.create(scope.arena(), int), }); } pub fn constIntSigned(self: *Module, scope: *Scope, src: usize, ty: Type, int: i64) !*Inst { - const int_payload = try scope.arena().create(Value.Payload.Int_i64); - int_payload.* = .{ .int = int }; - return self.constInst(scope, src, .{ .ty = ty, - .val = Value.initPayload(&int_payload.base), + .val = try Value.Tag.int_i64.create(scope.arena(), int), }); } pub fn constIntBig(self: *Module, scope: *Scope, src: usize, ty: Type, big_int: BigIntConst) !*Inst { - const val_payload = if (big_int.positive) blk: { + if (big_int.positive) { if (big_int.to(u64)) |x| { return self.constIntUnsigned(scope, src, ty, x); } else |err| switch (err) { error.NegativeIntoUnsigned => unreachable, error.TargetTooSmall => {}, // handled below } - const big_int_payload = try scope.arena().create(Value.Payload.IntBigPositive); - big_int_payload.* = .{ .limbs = big_int.limbs }; - break :blk &big_int_payload.base; - } else blk: { + return self.constInst(scope, src, .{ + .ty = ty, + .val = try Value.Tag.int_big_positive.create(scope.arena(), big_int.limbs), + }); + } else { if (big_int.to(i64)) |x| { return self.constIntSigned(scope, src, ty, x); } else |err| switch (err) { error.NegativeIntoUnsigned => unreachable, error.TargetTooSmall => {}, // handled below } - const big_int_payload = try scope.arena().create(Value.Payload.IntBigNegative); - big_int_payload.* = .{ .limbs = big_int.limbs }; - break :blk &big_int_payload.base; - }; - - return self.constInst(scope, src, .{ - .ty = ty, - .val = Value.initPayload(val_payload), - }); + return self.constInst(scope, src, .{ + .ty = ty, + .val = try Value.Tag.int_big_negative.create(scope.arena(), big_int.limbs), + }); + } } pub fn createAnonymousDecl( @@ -2346,26 +2335,20 @@ pub fn analyzeDeclRef(self: *Module, scope: *Scope, src: usize, decl: *Decl) Inn if (decl_tv.val.tag() == .variable) { return self.analyzeVarRef(scope, src, decl_tv); } - const ty = try self.simplePtrType(scope, src, decl_tv.ty, false, .One); - const val_payload = try scope.arena().create(Value.Payload.DeclRef); - val_payload.* = .{ .decl = decl }; - return self.constInst(scope, src, .{ - .ty = ty, - .val = Value.initPayload(&val_payload.base), + .ty = try self.simplePtrType(scope, src, decl_tv.ty, false, .One), + .val = try Value.Tag.decl_ref.create(scope.arena(), decl), }); } fn analyzeVarRef(self: *Module, scope: *Scope, src: usize, tv: TypedValue) InnerError!*Inst { - const variable = tv.val.cast(Value.Payload.Variable).?.variable; + const variable = tv.val.castTag(.variable).?.data; const ty = try self.simplePtrType(scope, src, tv.ty, variable.is_mutable, .One); if (!variable.is_mutable and !variable.is_extern) { - const val_payload = try scope.arena().create(Value.Payload.RefVal); - val_payload.* = .{ .val = variable.init }; return self.constInst(scope, src, .{ .ty = ty, - .val = Value.initPayload(&val_payload.base), + .val = try Value.Tag.ref_val.create(scope.arena(), variable.init), }); } @@ -3107,17 +3090,11 @@ pub fn intAdd(allocator: *Allocator, lhs: Value, rhs: Value) !Value { result_bigint.add(lhs_bigint, rhs_bigint); const result_limbs = result_bigint.limbs[0..result_bigint.len]; - const val_payload = if (result_bigint.positive) blk: { - const val_payload = try allocator.create(Value.Payload.IntBigPositive); - val_payload.* = .{ .limbs = result_limbs }; - break :blk &val_payload.base; - } else blk: { - const val_payload = try allocator.create(Value.Payload.IntBigNegative); - val_payload.* = .{ .limbs = result_limbs }; - break :blk &val_payload.base; - }; - - return Value.initPayload(val_payload); + if (result_bigint.positive) { + return Value.Tag.int_big_positive.create(allocator, result_limbs); + } else { + return Value.Tag.int_big_negative.create(allocator, result_limbs); + } } pub fn intSub(allocator: *Allocator, lhs: Value, rhs: Value) !Value { @@ -3135,85 +3112,81 @@ pub fn intSub(allocator: *Allocator, lhs: Value, rhs: Value) !Value { result_bigint.sub(lhs_bigint, rhs_bigint); const result_limbs = result_bigint.limbs[0..result_bigint.len]; - const val_payload = if (result_bigint.positive) blk: { - const val_payload = try allocator.create(Value.Payload.IntBigPositive); - val_payload.* = .{ .limbs = result_limbs }; - break :blk &val_payload.base; - } else blk: { - const val_payload = try allocator.create(Value.Payload.IntBigNegative); - val_payload.* = .{ .limbs = result_limbs }; - break :blk &val_payload.base; - }; - - return Value.initPayload(val_payload); + if (result_bigint.positive) { + return Value.Tag.int_big_positive.create(allocator, result_limbs); + } else { + return Value.Tag.int_big_negative.create(allocator, result_limbs); + } } -pub fn floatAdd(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs: Value, rhs: Value) !Value { - var bit_count = switch (float_type.tag()) { - .comptime_float => 128, - else => float_type.floatBits(self.getTarget()), - }; - - const allocator = scope.arena(); - const val_payload = switch (bit_count) { - 16 => { - return self.fail(scope, src, "TODO Implement addition for soft floats", .{}); +pub fn floatAdd( + self: *Module, + scope: *Scope, + float_type: Type, + src: usize, + lhs: Value, + rhs: Value, +) !Value { + const arena = scope.arena(); + switch (float_type.tag()) { + .f16 => { + @panic("TODO add __trunctfhf2 to compiler-rt"); + //const lhs_val = lhs.toFloat(f16); + //const rhs_val = rhs.toFloat(f16); + //return Value.Tag.float_16.create(arena, lhs_val + rhs_val); }, - 32 => blk: { + .f32 => { const lhs_val = lhs.toFloat(f32); const rhs_val = rhs.toFloat(f32); - const val_payload = try allocator.create(Value.Payload.Float_32); - val_payload.* = .{ .val = lhs_val + rhs_val }; - break :blk &val_payload.base; + return Value.Tag.float_32.create(arena, lhs_val + rhs_val); }, - 64 => blk: { + .f64 => { const lhs_val = lhs.toFloat(f64); const rhs_val = rhs.toFloat(f64); - const val_payload = try allocator.create(Value.Payload.Float_64); - val_payload.* = .{ .val = lhs_val + rhs_val }; - break :blk &val_payload.base; + return Value.Tag.float_64.create(arena, lhs_val + rhs_val); }, - 128 => { - return self.fail(scope, src, "TODO Implement addition for big floats", .{}); + .f128, .comptime_float, .c_longdouble => { + const lhs_val = lhs.toFloat(f128); + const rhs_val = rhs.toFloat(f128); + return Value.Tag.float_128.create(arena, lhs_val + rhs_val); }, else => unreachable, - }; - - return Value.initPayload(val_payload); + } } -pub fn floatSub(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs: Value, rhs: Value) !Value { - var bit_count = switch (float_type.tag()) { - .comptime_float => 128, - else => float_type.floatBits(self.getTarget()), - }; - - const allocator = scope.arena(); - const val_payload = switch (bit_count) { - 16 => { - return self.fail(scope, src, "TODO Implement substraction for soft floats", .{}); +pub fn floatSub( + self: *Module, + scope: *Scope, + float_type: Type, + src: usize, + lhs: Value, + rhs: Value, +) !Value { + const arena = scope.arena(); + switch (float_type.tag()) { + .f16 => { + @panic("TODO add __trunctfhf2 to compiler-rt"); + //const lhs_val = lhs.toFloat(f16); + //const rhs_val = rhs.toFloat(f16); + //return Value.Tag.float_16.create(arena, lhs_val - rhs_val); }, - 32 => blk: { + .f32 => { const lhs_val = lhs.toFloat(f32); const rhs_val = rhs.toFloat(f32); - const val_payload = try allocator.create(Value.Payload.Float_32); - val_payload.* = .{ .val = lhs_val - rhs_val }; - break :blk &val_payload.base; + return Value.Tag.float_32.create(arena, lhs_val - rhs_val); }, - 64 => blk: { + .f64 => { const lhs_val = lhs.toFloat(f64); const rhs_val = rhs.toFloat(f64); - const val_payload = try allocator.create(Value.Payload.Float_64); - val_payload.* = .{ .val = lhs_val - rhs_val }; - break :blk &val_payload.base; + return Value.Tag.float_64.create(arena, lhs_val - rhs_val); }, - 128 => { - return self.fail(scope, src, "TODO Implement substraction for big floats", .{}); + .f128, .comptime_float, .c_longdouble => { + const lhs_val = lhs.toFloat(f128); + const rhs_val = rhs.toFloat(f128); + return Value.Tag.float_128.create(arena, lhs_val - rhs_val); }, else => unreachable, - }; - - return Value.initPayload(val_payload); + } } pub fn simplePtrType(self: *Module, scope: *Scope, src: usize, elem_ty: Type, mutable: bool, size: std.builtin.TypeInfo.Pointer.Size) Allocator.Error!Type { diff --git a/src/astgen.zig b/src/astgen.zig index 1fc8a0d19e..c5261c4073 100644 --- a/src/astgen.zig +++ b/src/astgen.zig @@ -1956,13 +1956,13 @@ fn identifier(mod: *Module, scope: *Scope, rl: ResultLoc, ident: *ast.Node.OneTo 32 => if (is_signed) Value.initTag(.i32_type) else Value.initTag(.u32_type), 64 => if (is_signed) Value.initTag(.i64_type) else Value.initTag(.u64_type), else => { - const int_type_payload = try scope.arena().create(Value.Payload.IntType); - int_type_payload.* = .{ .signed = is_signed, .bits = bit_count }; - const result = try addZIRInstConst(mod, scope, src, .{ + return rlWrap(mod, scope, rl, try addZIRInstConst(mod, scope, src, .{ .ty = Type.initTag(.type), - .val = Value.initPayload(&int_type_payload.base), - }); - return rlWrap(mod, scope, rl, result); + .val = try Value.Tag.int_type.create(scope.arena(), .{ + .signed = is_signed, + .bits = bit_count, + }), + })); }, }; const result = try addZIRInstConst(mod, scope, src, .{ @@ -2062,11 +2062,9 @@ fn charLiteral(mod: *Module, scope: *Scope, node: *ast.Node.OneToken) !*zir.Inst }, }; - const int_payload = try scope.arena().create(Value.Payload.Int_u64); - int_payload.* = .{ .int = value }; return addZIRInstConst(mod, scope, src, .{ .ty = Type.initTag(.comptime_int), - .val = Value.initPayload(&int_payload.base), + .val = try Value.Tag.int_u64.create(scope.arena(), value), }); } @@ -2089,12 +2087,10 @@ fn integerLiteral(mod: *Module, scope: *Scope, int_lit: *ast.Node.OneToken) Inne prefixed_bytes[2..]; if (std.fmt.parseInt(u64, bytes, base)) |small_int| { - const int_payload = try arena.create(Value.Payload.Int_u64); - int_payload.* = .{ .int = small_int }; const src = tree.token_locs[int_lit.token].start; return addZIRInstConst(mod, scope, src, .{ .ty = Type.initTag(.comptime_int), - .val = Value.initPayload(&int_payload.base), + .val = try Value.Tag.int_u64.create(arena, small_int), }); } else |err| { return mod.failTok(scope, int_lit.token, "TODO implement int literals that don't fit in a u64", .{}); @@ -2109,15 +2105,13 @@ fn floatLiteral(mod: *Module, scope: *Scope, float_lit: *ast.Node.OneToken) Inne return mod.failTok(scope, float_lit.token, "TODO hex floats", .{}); } - const val = std.fmt.parseFloat(f128, bytes) catch |e| switch (e) { + const float_number = std.fmt.parseFloat(f128, bytes) catch |e| switch (e) { error.InvalidCharacter => unreachable, // validated by tokenizer }; - const float_payload = try arena.create(Value.Payload.Float_128); - float_payload.* = .{ .val = val }; const src = tree.token_locs[float_lit.token].start; return addZIRInstConst(mod, scope, src, .{ .ty = Type.initTag(.comptime_float), - .val = Value.initPayload(&float_payload.base), + .val = try Value.Tag.float_128.create(arena, float_number), }); } diff --git a/src/codegen.zig b/src/codegen.zig index d98a87a440..f978115ebc 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -137,7 +137,7 @@ pub fn generateSymbol( }, .Array => { // TODO populate .debug_info for the array - if (typed_value.val.cast(Value.Payload.Bytes)) |payload| { + if (typed_value.val.castTag(.bytes)) |payload| { if (typed_value.ty.sentinel()) |sentinel| { try code.ensureCapacity(code.items.len + payload.data.len + 1); code.appendSliceAssumeCapacity(payload.data); @@ -168,8 +168,8 @@ pub fn generateSymbol( }, .Pointer => { // TODO populate .debug_info for the pointer - if (typed_value.val.cast(Value.Payload.DeclRef)) |payload| { - const decl = payload.decl; + if (typed_value.val.castTag(.decl_ref)) |payload| { + const decl = payload.data; if (decl.analysis != .complete) return error.AnalysisFail; // TODO handle the dependency of this symbol on the decl's vaddr. // If the decl changes vaddr, then this symbol needs to get regenerated. @@ -432,7 +432,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { @panic("Attempted to compile for architecture that was disabled by build configuration"); } - const module_fn = typed_value.val.cast(Value.Payload.Function).?.func; + const module_fn = typed_value.val.castTag(.function).?.data; const fn_type = module_fn.owner_decl.typed_value.most_recent.typed_value.ty; @@ -1579,9 +1579,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - if (inst.func.cast(ir.Inst.Constant)) |func_inst| { - if (func_inst.val.cast(Value.Payload.Function)) |func_val| { - const func = func_val.func; + if (inst.func.value()) |func_value| { + if (func_value.castTag(.function)) |func_payload| { + const func = func_payload.data; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); @@ -1607,9 +1607,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .riscv64 => { if (info.args.len > 0) return self.fail(inst.base.src, "TODO implement fn args for {}", .{self.target.cpu.arch}); - if (inst.func.cast(ir.Inst.Constant)) |func_inst| { - if (func_inst.val.cast(Value.Payload.Function)) |func_val| { - const func = func_val.func; + if (inst.func.value()) |func_value| { + if (func_value.castTag(.function)) |func_payload| { + const func = func_payload.data; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); @@ -1631,12 +1631,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } }, .spu_2 => { - if (inst.func.cast(ir.Inst.Constant)) |func_inst| { + if (inst.func.value()) |func_value| { if (info.args.len != 0) { return self.fail(inst.base.src, "TODO implement call with more than 0 parameters", .{}); } - if (func_inst.val.cast(Value.Payload.Function)) |func_val| { - const func = func_val.func; + if (func_value.castTag(.function)) |func_payload| { + const func = func_payload.data; const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; break :blk @intCast(u16, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * 2); @@ -1705,9 +1705,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - if (inst.func.cast(ir.Inst.Constant)) |func_inst| { - if (func_inst.val.cast(Value.Payload.Function)) |func_val| { - const func = func_val.func; + if (inst.func.value()) |func_value| { + if (func_value.castTag(.function)) |func_payload| { + const func = func_payload.data; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { @@ -1766,9 +1766,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - if (inst.func.cast(ir.Inst.Constant)) |func_inst| { - if (func_inst.val.cast(Value.Payload.Function)) |func_val| { - const func = func_val.func; + if (inst.func.value()) |func_value| { + if (func_value.castTag(.function)) |func_payload| { + const func = func_payload.data; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { @@ -1825,9 +1825,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - if (inst.func.cast(ir.Inst.Constant)) |func_inst| { - if (func_inst.val.cast(Value.Payload.Function)) |func_val| { - const func = func_val.func; + if (inst.func.value()) |func_value| { + if (func_value.castTag(.function)) |func_payload| { + const func = func_payload.data; const text_segment = &macho_file.load_commands.items[macho_file.text_segment_cmd_index.?].Segment; const got = &text_segment.sections.items[macho_file.got_section_index.?]; const got_addr = got.addr + func.owner_decl.link.macho.offset_table_index * @sizeOf(u64); @@ -3223,20 +3223,20 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const ptr_bytes: u64 = @divExact(ptr_bits, 8); switch (typed_value.ty.zigTypeTag()) { .Pointer => { - if (typed_value.val.cast(Value.Payload.DeclRef)) |payload| { + if (typed_value.val.castTag(.decl_ref)) |payload| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const decl = payload.decl; + const decl = payload.data; const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; return MCValue{ .memory = got_addr }; } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const decl = payload.decl; + const decl = payload.data; const text_segment = &macho_file.load_commands.items[macho_file.text_segment_cmd_index.?].Segment; const got = &text_segment.sections.items[macho_file.got_section_index.?]; const got_addr = got.addr + decl.link.macho.offset_table_index * ptr_bytes; return MCValue{ .memory = got_addr }; } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const decl = payload.decl; + const decl = payload.data; const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes; return MCValue{ .memory = got_addr }; } else { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 828061fac6..7cd4479bd9 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -138,25 +138,25 @@ fn renderValue( .undef, .zero => try writer.writeAll("0"), .one => try writer.writeAll("1"), .decl_ref => { - const decl_ref_payload = val.cast(Value.Payload.DeclRef).?; + const decl = val.castTag(.decl_ref).?.data; // Determine if we must pointer cast. - const decl_tv = decl_ref_payload.decl.typed_value.most_recent.typed_value; + const decl_tv = decl.typed_value.most_recent.typed_value; if (t.eql(decl_tv.ty)) { - try writer.print("&{s}", .{decl_ref_payload.decl.name}); + try writer.print("&{s}", .{decl.name}); } else { try writer.writeAll("("); try renderType(ctx, writer, t); - try writer.print(")&{s}", .{decl_ref_payload.decl.name}); + try writer.print(")&{s}", .{decl.name}); } }, .function => { - const payload = val.cast(Value.Payload.Function).?; - try writer.print("{s}", .{payload.func.owner_decl.name}); + const func = val.castTag(.function).?.data; + try writer.print("{s}", .{func.owner_decl.name}); }, .extern_fn => { - const payload = val.cast(Value.Payload.ExternFn).?; - try writer.print("{s}", .{payload.decl.name}); + const decl = val.castTag(.extern_fn).?.data; + try writer.print("{s}", .{decl.name}); }, else => |e| return ctx.fail( ctx.decl.src(), @@ -169,7 +169,7 @@ fn renderValue( switch (val.tag()) { .undef, .empty_struct_value, .empty_array => try writer.writeAll("{}"), .bytes => { - const bytes = val.cast(Value.Payload.Bytes).?.data; + const bytes = val.castTag(.bytes).?.data; // TODO: make our own C string escape instead of using {Z} try writer.print("\"{Z}\"", .{bytes}); }, @@ -209,7 +209,7 @@ fn renderFunctionSignature( switch (tv.val.tag()) { .extern_fn => break :blk true, .function => { - const func = tv.val.cast(Value.Payload.Function).?.func; + const func = tv.val.castTag(.function).?.data; break :blk ctx.module.decl_exports.contains(func.owner_decl); }, else => unreachable, @@ -268,13 +268,13 @@ pub fn generate(file: *C, module: *Module, decl: *Decl) !void { ctx.deinit(); } - if (tv.val.cast(Value.Payload.Function)) |func_payload| { + if (tv.val.castTag(.function)) |func_payload| { const writer = file.main.writer(); try renderFunctionSignature(&ctx, writer, decl); try writer.writeAll(" {"); - const func: *Module.Fn = func_payload.func; + const func: *Module.Fn = func_payload.data; const instructions = func.analysis.success.instructions; if (instructions.len > 0) { try writer.writeAll("\n"); @@ -480,10 +480,10 @@ fn genCall(ctx: *Context, file: *C, inst: *Inst.Call) !?[]u8 { const writer = file.main.writer(); const header = file.header.buf.writer(); if (inst.func.castTag(.constant)) |func_inst| { - const fn_decl = if (func_inst.val.cast(Value.Payload.ExternFn)) |extern_fn| - extern_fn.decl - else if (func_inst.val.cast(Value.Payload.Function)) |func_val| - func_val.func.owner_decl + const fn_decl = if (func_inst.val.castTag(.extern_fn)) |extern_fn| + extern_fn.data + else if (func_inst.val.castTag(.function)) |func_payload| + func_payload.data.owner_decl else unreachable; @@ -513,8 +513,8 @@ fn genCall(ctx: *Context, file: *C, inst: *Inst.Call) !?[]u8 { if (i > 0) { try writer.writeAll(", "); } - if (arg.cast(Inst.Constant)) |con| { - try renderValue(ctx, writer, arg.ty, con.val); + if (arg.value()) |val| { + try renderValue(ctx, writer, arg.ty, val); } else { const val = try ctx.resolveInst(arg); try writer.print("{}", .{val}); diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 7297ea1d54..c7ad59f5d1 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -62,7 +62,7 @@ pub fn genCode(buf: *ArrayList(u8), decl: *Decl) !void { // Write instructions // TODO: check for and handle death of instructions const tv = decl.typed_value.most_recent.typed_value; - const mod_fn = tv.val.cast(Value.Payload.Function).?.func; + const mod_fn = tv.val.castTag(.function).?.data; for (mod_fn.analysis.success.instructions) |inst| try genInst(buf, decl, inst); // Write 'end' opcode @@ -125,8 +125,8 @@ fn genRet(buf: *ArrayList(u8), decl: *Decl, inst: *Inst.UnOp) !void { fn genCall(buf: *ArrayList(u8), decl: *Decl, inst: *Inst.Call) !void { const func_inst = inst.func.castTag(.constant).?; - const func_val = func_inst.val.cast(Value.Payload.Function).?; - const target = func_val.func.owner_decl; + const func = func_inst.val.castTag(.function).?.data; + const target = func.owner_decl; const target_ty = target.typed_value.most_recent.typed_value.ty; if (inst.args.len != 0) return error.TODOImplementMoreWasmCodegen; diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 25b883f8c6..4b2b95fc72 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2183,7 +2183,7 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { for (zir_dumps) |fn_name| { if (mem.eql(u8, mem.spanZ(decl.name), fn_name)) { std.debug.print("\n{}\n", .{decl.name}); - typed_value.val.cast(Value.Payload.Function).?.func.dump(module.*); + typed_value.val.castTag(.function).?.data.dump(module.*); } } } diff --git a/src/llvm_backend.zig b/src/llvm_backend.zig index 77aa4d3bd5..294e6f5400 100644 --- a/src/llvm_backend.zig +++ b/src/llvm_backend.zig @@ -280,7 +280,7 @@ pub const LLVMIRModule = struct { fn gen(self: *LLVMIRModule, module: *Module, typed_value: TypedValue, src: usize) !void { switch (typed_value.ty.zigTypeTag()) { .Fn => { - const func = typed_value.val.cast(Value.Payload.Function).?.func; + const func = typed_value.val.castTag(.function).?.data; const llvm_func = try self.resolveLLVMFunction(func); @@ -314,9 +314,9 @@ pub const LLVMIRModule = struct { } fn genCall(self: *LLVMIRModule, inst: *Inst.Call) !void { - if (inst.func.cast(Inst.Constant)) |func_inst| { - if (func_inst.val.cast(Value.Payload.Function)) |func_val| { - const func = func_val.func; + if (inst.func.value()) |func_value| { + if (func_value.castTag(.function)) |func_payload| { + const func = func_payload.data; const zig_fn_type = func.owner_decl.typed_value.most_recent.typed_value.ty; const llvm_fn = try self.resolveLLVMFunction(func); diff --git a/src/type.zig b/src/type.zig index ce237f89c7..9d834a19f2 100644 --- a/src/type.zig +++ b/src/type.zig @@ -733,11 +733,7 @@ pub const Type = extern union { .single_const_pointer_to_comptime_int => return Value.initTag(.single_const_pointer_to_comptime_int_type), .const_slice_u8 => return Value.initTag(.const_slice_u8_type), .enum_literal => return Value.initTag(.enum_literal_type), - else => { - const ty_payload = try allocator.create(Value.Payload.Ty); - ty_payload.* = .{ .ty = self }; - return Value.initPayload(&ty_payload.base); - }, + else => return Value.Tag.ty.create(allocator, self), } } @@ -2951,11 +2947,8 @@ pub const Type = extern union { } if ((info.bits - 1) <= std.math.maxInt(u6)) { - const payload = try arena.allocator.create(Value.Payload.Int_i64); - payload.* = .{ - .int = -(@as(i64, 1) << @truncate(u6, info.bits - 1)), - }; - return Value.initPayload(&payload.base); + const n: i64 = -(@as(i64, 1) << @truncate(u6, info.bits - 1)); + return Value.Tag.int_i64.create(&arena.allocator, n); } var res = try std.math.big.int.Managed.initSet(&arena.allocator, 1); @@ -2964,13 +2957,9 @@ pub const Type = extern union { const res_const = res.toConst(); if (res_const.positive) { - const val_payload = try arena.allocator.create(Value.Payload.IntBigPositive); - val_payload.* = .{ .limbs = res_const.limbs }; - return Value.initPayload(&val_payload.base); + return Value.Tag.int_big_positive.create(&arena.allocator, res_const.limbs); } else { - const val_payload = try arena.allocator.create(Value.Payload.IntBigNegative); - val_payload.* = .{ .limbs = res_const.limbs }; - return Value.initPayload(&val_payload.base); + return Value.Tag.int_big_negative.create(&arena.allocator, res_const.limbs); } } @@ -2980,17 +2969,11 @@ pub const Type = extern union { const info = self.intInfo(target); if (info.signedness == .signed and (info.bits - 1) <= std.math.maxInt(u6)) { - const payload = try arena.allocator.create(Value.Payload.Int_i64); - payload.* = .{ - .int = (@as(i64, 1) << @truncate(u6, info.bits - 1)) - 1, - }; - return Value.initPayload(&payload.base); + const n: i64 = (@as(i64, 1) << @truncate(u6, info.bits - 1)) - 1; + return Value.Tag.int_i64.create(&arena.allocator, n); } else if (info.signedness == .signed and info.bits <= std.math.maxInt(u6)) { - const payload = try arena.allocator.create(Value.Payload.Int_u64); - payload.* = .{ - .int = (@as(u64, 1) << @truncate(u6, info.bits)) - 1, - }; - return Value.initPayload(&payload.base); + const n: u64 = (@as(u64, 1) << @truncate(u6, info.bits)) - 1; + return Value.Tag.int_u64.create(&arena.allocator, n); } var res = try std.math.big.int.Managed.initSet(&arena.allocator, 1); @@ -3003,13 +2986,9 @@ pub const Type = extern union { const res_const = res.toConst(); if (res_const.positive) { - const val_payload = try arena.allocator.create(Value.Payload.IntBigPositive); - val_payload.* = .{ .limbs = res_const.limbs }; - return Value.initPayload(&val_payload.base); + return Value.Tag.int_big_positive.create(&arena.allocator, res_const.limbs); } else { - const val_payload = try arena.allocator.create(Value.Payload.IntBigNegative); - val_payload.* = .{ .limbs = res_const.limbs }; - return Value.initPayload(&val_payload.base); + return Value.Tag.int_big_negative.create(&arena.allocator, res_const.limbs); } } diff --git a/src/value.zig b/src/value.zig index f26c8d8772..91b21511d4 100644 --- a/src/value.zig +++ b/src/value.zig @@ -84,11 +84,16 @@ pub const Value = extern union { function, extern_fn, variable, + /// Represents a pointer to another immutable value. ref_val, + /// Represents a pointer to a decl, not the value of the decl. decl_ref, elem_ptr, + /// A slice of u8 whose memory is managed externally. bytes, - repeated, // the value is a value repeated some number of times + /// This value is repeated some number of times. The amount of times to repeat + /// is stored externally. + repeated, float_16, float_32, float_64, @@ -99,6 +104,106 @@ pub const Value = extern union { pub const last_no_payload_tag = Tag.bool_false; pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; + + pub fn Type(comptime t: Tag) type { + return switch (t) { + .u8_type, + .i8_type, + .u16_type, + .i16_type, + .u32_type, + .i32_type, + .u64_type, + .i64_type, + .usize_type, + .isize_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f128_type, + .c_void_type, + .bool_type, + .void_type, + .type_type, + .anyerror_type, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + .null_type, + .undefined_type, + .fn_noreturn_no_args_type, + .fn_void_no_args_type, + .fn_naked_noreturn_no_args_type, + .fn_ccc_void_no_args_type, + .single_const_pointer_to_comptime_int_type, + .const_slice_u8_type, + .enum_literal_type, + .anyframe_type, + .undef, + .zero, + .one, + .void_value, + .unreachable_value, + .empty_struct_value, + .empty_array, + .null_value, + .bool_true, + .bool_false, + => @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"), + + .int_big_positive, + .int_big_negative, + => Payload.BigInt, + + .extern_fn, + .decl_ref, + => Payload.Decl, + + .ref_val, + .repeated, + => Payload.SubValue, + + .bytes, + .enum_literal, + => Payload.Bytes, + + .ty => Payload.Ty, + .int_type => Payload.IntType, + .int_u64 => Payload.U64, + .int_i64 => Payload.I64, + .function => Payload.Function, + .variable => Payload.Variable, + .elem_ptr => Payload.ElemPtr, + .float_16 => Payload.Float_16, + .float_32 => Payload.Float_32, + .float_64 => Payload.Float_64, + .float_128 => Payload.Float_128, + .error_set => Payload.ErrorSet, + .@"error" => Payload.Error, + }; + } + + pub fn create(comptime t: Tag, ally: *Allocator, data: Data(t)) error{OutOfMemory}!Value { + const ptr = try ally.create(t.Type()); + ptr.* = .{ + .base = .{ .tag = t }, + .data = data, + }; + return Value{ .ptr_otherwise = &ptr.base }; + } + + pub fn Data(comptime t: Tag) type { + return std.meta.fieldInfo(t.Type(), "data").field_type; + } }; pub fn initTag(small_tag: Tag) Value { @@ -119,15 +224,36 @@ pub const Value = extern union { } } + /// Prefer `castTag` to this. pub fn cast(self: Value, comptime T: type) ?*T { - if (self.tag_if_small_enough < Tag.no_payload_count) + if (@hasField(T, "base_tag")) { + return base.castTag(T.base_tag); + } + if (self.tag_if_small_enough < Tag.no_payload_count) { return null; + } + inline for (@typeInfo(Tag).Enum.fields) |field| { + if (field.value < Tag.no_payload_count) + continue; + const t = @intToEnum(Tag, field.value); + if (self.ptr_otherwise.tag == t) { + if (T == t.Type()) { + return @fieldParentPtr(T, "base", self.ptr_otherwise); + } + return null; + } + } + unreachable; + } - const expected_tag = std.meta.fieldInfo(T, "base").default_value.?.tag; - if (self.ptr_otherwise.tag != expected_tag) + pub fn castTag(self: Value, comptime t: Tag) ?*t.Type() { + if (self.tag_if_small_enough < Tag.no_payload_count) return null; - return @fieldParentPtr(T, "base", self.ptr_otherwise); + if (self.ptr_otherwise.tag == t) + return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise); + + return null; } pub fn copy(self: Value, allocator: *Allocator) error{OutOfMemory}!Value { @@ -188,17 +314,17 @@ pub const Value = extern union { => unreachable, .ty => { - const payload = @fieldParentPtr(Payload.Ty, "base", self.ptr_otherwise); + const payload = self.castTag(.ty).?; const new_payload = try allocator.create(Payload.Ty); new_payload.* = .{ .base = payload.base, - .ty = try payload.ty.copy(allocator), + .data = try payload.data.copy(allocator), }; return Value{ .ptr_otherwise = &new_payload.base }; }, .int_type => return self.copyPayloadShallow(allocator, Payload.IntType), - .int_u64 => return self.copyPayloadShallow(allocator, Payload.Int_u64), - .int_i64 => return self.copyPayloadShallow(allocator, Payload.Int_i64), + .int_u64 => return self.copyPayloadShallow(allocator, Payload.U64), + .int_i64 => return self.copyPayloadShallow(allocator, Payload.I64), .int_big_positive => { @panic("TODO implement copying of big ints"); }, @@ -206,35 +332,37 @@ pub const Value = extern union { @panic("TODO implement copying of big ints"); }, .function => return self.copyPayloadShallow(allocator, Payload.Function), - .extern_fn => return self.copyPayloadShallow(allocator, Payload.ExternFn), + .extern_fn => return self.copyPayloadShallow(allocator, Payload.Decl), .variable => return self.copyPayloadShallow(allocator, Payload.Variable), .ref_val => { - const payload = @fieldParentPtr(Payload.RefVal, "base", self.ptr_otherwise); - const new_payload = try allocator.create(Payload.RefVal); + const payload = self.castTag(.ref_val).?; + const new_payload = try allocator.create(Payload.SubValue); new_payload.* = .{ .base = payload.base, - .val = try payload.val.copy(allocator), + .data = try payload.data.copy(allocator), }; return Value{ .ptr_otherwise = &new_payload.base }; }, - .decl_ref => return self.copyPayloadShallow(allocator, Payload.DeclRef), + .decl_ref => return self.copyPayloadShallow(allocator, Payload.Decl), .elem_ptr => { - const payload = @fieldParentPtr(Payload.ElemPtr, "base", self.ptr_otherwise); + const payload = self.castTag(.elem_ptr).?; const new_payload = try allocator.create(Payload.ElemPtr); new_payload.* = .{ .base = payload.base, - .array_ptr = try payload.array_ptr.copy(allocator), - .index = payload.index, + .data = .{ + .array_ptr = try payload.data.array_ptr.copy(allocator), + .index = payload.data.index, + }, }; return Value{ .ptr_otherwise = &new_payload.base }; }, .bytes => return self.copyPayloadShallow(allocator, Payload.Bytes), .repeated => { - const payload = @fieldParentPtr(Payload.Repeated, "base", self.ptr_otherwise); - const new_payload = try allocator.create(Payload.Repeated); + const payload = self.castTag(.repeated).?; + const new_payload = try allocator.create(Payload.SubValue); new_payload.* = .{ .base = payload.base, - .val = try payload.val.copy(allocator), + .data = try payload.data.copy(allocator), }; return Value{ .ptr_otherwise = &new_payload.base }; }, @@ -243,7 +371,7 @@ pub const Value = extern union { .float_64 => return self.copyPayloadShallow(allocator, Payload.Float_64), .float_128 => return self.copyPayloadShallow(allocator, Payload.Float_128), .enum_literal => { - const payload = @fieldParentPtr(Payload.Bytes, "base", self.ptr_otherwise); + const payload = self.castTag(.enum_literal).?; const new_payload = try allocator.create(Payload.Bytes); new_payload.* = .{ .base = payload.base, @@ -259,7 +387,7 @@ pub const Value = extern union { } fn copyPayloadShallow(self: Value, allocator: *Allocator, comptime T: type) error{OutOfMemory}!Value { - const payload = @fieldParentPtr(T, "base", self.ptr_otherwise); + const payload = self.cast(T).?; const new_payload = try allocator.create(T); new_payload.* = payload.*; return Value{ .ptr_otherwise = &new_payload.base }; @@ -326,45 +454,45 @@ pub const Value = extern union { .unreachable_value => return out_stream.writeAll("unreachable"), .bool_true => return out_stream.writeAll("true"), .bool_false => return out_stream.writeAll("false"), - .ty => return val.cast(Payload.Ty).?.ty.format("", options, out_stream), + .ty => return val.castTag(.ty).?.data.format("", options, out_stream), .int_type => { - const int_type = val.cast(Payload.IntType).?; + const int_type = val.castTag(.int_type).?.data; return out_stream.print("{}{}", .{ if (int_type.signed) "s" else "u", int_type.bits, }); }, - .int_u64 => return std.fmt.formatIntValue(val.cast(Payload.Int_u64).?.int, "", options, out_stream), - .int_i64 => return std.fmt.formatIntValue(val.cast(Payload.Int_i64).?.int, "", options, out_stream), - .int_big_positive => return out_stream.print("{}", .{val.cast(Payload.IntBigPositive).?.asBigInt()}), - .int_big_negative => return out_stream.print("{}", .{val.cast(Payload.IntBigNegative).?.asBigInt()}), + .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", options, out_stream), + .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, out_stream), + .int_big_positive => return out_stream.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), + .int_big_negative => return out_stream.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), .function => return out_stream.writeAll("(function)"), .extern_fn => return out_stream.writeAll("(extern function)"), .variable => return out_stream.writeAll("(variable)"), .ref_val => { - const ref_val = val.cast(Payload.RefVal).?; + const ref_val = val.castTag(.ref_val).?.data; try out_stream.writeAll("&const "); - val = ref_val.val; + val = ref_val; }, .decl_ref => return out_stream.writeAll("(decl ref)"), .elem_ptr => { - const elem_ptr = val.cast(Payload.ElemPtr).?; + const elem_ptr = val.castTag(.elem_ptr).?.data; try out_stream.print("&[{}] ", .{elem_ptr.index}); val = elem_ptr.array_ptr; }, .empty_array => return out_stream.writeAll(".{}"), - .enum_literal => return out_stream.print(".{z}", .{self.cast(Payload.Bytes).?.data}), - .bytes => return out_stream.print("\"{Z}\"", .{self.cast(Payload.Bytes).?.data}), + .enum_literal => return out_stream.print(".{z}", .{self.castTag(.enum_literal).?.data}), + .bytes => return out_stream.print("\"{Z}\"", .{self.castTag(.bytes).?.data}), .repeated => { try out_stream.writeAll("(repeated) "); - val = val.cast(Payload.Repeated).?.val; + val = val.castTag(.repeated).?.data; }, - .float_16 => return out_stream.print("{}", .{val.cast(Payload.Float_16).?.val}), - .float_32 => return out_stream.print("{}", .{val.cast(Payload.Float_32).?.val}), - .float_64 => return out_stream.print("{}", .{val.cast(Payload.Float_64).?.val}), - .float_128 => return out_stream.print("{}", .{val.cast(Payload.Float_128).?.val}), + .float_16 => return out_stream.print("{}", .{val.castTag(.float_16).?.data}), + .float_32 => return out_stream.print("{}", .{val.castTag(.float_32).?.data}), + .float_64 => return out_stream.print("{}", .{val.castTag(.float_64).?.data}), + .float_128 => return out_stream.print("{}", .{val.castTag(.float_128).?.data}), .error_set => { - const error_set = val.cast(Payload.ErrorSet).?; + const error_set = val.castTag(.error_set).?.data; try out_stream.writeAll("error{"); var it = error_set.fields.iterator(); while (it.next()) |entry| { @@ -372,21 +500,24 @@ pub const Value = extern union { } return out_stream.writeAll("}"); }, - .@"error" => return out_stream.print("error.{}", .{val.cast(Payload.Error).?.name}), + .@"error" => return out_stream.print("error.{}", .{val.castTag(.@"error").?.data.name}), }; } /// Asserts that the value is representable as an array of bytes. /// Copies the value into a freshly allocated slice of memory, which is owned by the caller. pub fn toAllocatedBytes(self: Value, allocator: *Allocator) ![]u8 { - if (self.cast(Payload.Bytes)) |bytes| { - return std.mem.dupe(allocator, u8, bytes.data); + if (self.castTag(.bytes)) |payload| { + return std.mem.dupe(allocator, u8, payload.data); } - if (self.cast(Payload.Repeated)) |repeated| { + if (self.castTag(.enum_literal)) |payload| { + return std.mem.dupe(allocator, u8, payload.data); + } + if (self.castTag(.repeated)) |payload| { @panic("TODO implement toAllocatedBytes for this Value tag"); } - if (self.cast(Payload.DeclRef)) |declref| { - const val = try declref.decl.value(); + if (self.castTag(.decl_ref)) |payload| { + const val = try payload.data.value(); return val.toAllocatedBytes(allocator); } unreachable; @@ -395,7 +526,7 @@ pub const Value = extern union { /// Asserts that the value is representable as a type. pub fn toType(self: Value, allocator: *Allocator) !Type { return switch (self.tag()) { - .ty => self.cast(Payload.Ty).?.ty, + .ty => self.castTag(.ty).?.data, .u8_type => Type.initTag(.u8), .i8_type => Type.initTag(.i8), .u16_type => Type.initTag(.u16), @@ -439,7 +570,7 @@ pub const Value = extern union { .anyframe_type => Type.initTag(.@"anyframe"), .int_type => { - const payload = self.cast(Payload.IntType).?; + const payload = self.castTag(.int_type).?.data; const new = try allocator.create(Type.Payload.Bits); new.* = .{ .base = .{ @@ -450,7 +581,7 @@ pub const Value = extern union { return Type.initPayload(&new.base); }, .error_set => { - const payload = self.cast(Payload.ErrorSet).?; + const payload = self.castTag(.error_set).?.data; return Type.Tag.error_set.create(allocator, payload.decl); }, @@ -564,10 +695,10 @@ pub const Value = extern union { .bool_true, => return BigIntMutable.init(&space.limbs, 1).toConst(), - .int_u64 => return BigIntMutable.init(&space.limbs, self.cast(Payload.Int_u64).?.int).toConst(), - .int_i64 => return BigIntMutable.init(&space.limbs, self.cast(Payload.Int_i64).?.int).toConst(), - .int_big_positive => return self.cast(Payload.IntBigPositive).?.asBigInt(), - .int_big_negative => return self.cast(Payload.IntBigNegative).?.asBigInt(), + .int_u64 => return BigIntMutable.init(&space.limbs, self.castTag(.int_u64).?.data).toConst(), + .int_i64 => return BigIntMutable.init(&space.limbs, self.castTag(.int_i64).?.data).toConst(), + .int_big_positive => return self.castTag(.int_big_positive).?.asBigInt(), + .int_big_negative => return self.castTag(.int_big_negative).?.asBigInt(), } } @@ -649,10 +780,10 @@ pub const Value = extern union { .bool_true, => return 1, - .int_u64 => return self.cast(Payload.Int_u64).?.int, - .int_i64 => return @intCast(u64, self.cast(Payload.Int_i64).?.int), - .int_big_positive => return self.cast(Payload.IntBigPositive).?.asBigInt().to(u64) catch unreachable, - .int_big_negative => return self.cast(Payload.IntBigNegative).?.asBigInt().to(u64) catch unreachable, + .int_u64 => return self.castTag(.int_u64).?.data, + .int_i64 => return @intCast(u64, self.castTag(.int_i64).?.data), + .int_big_positive => return self.castTag(.int_big_positive).?.asBigInt().to(u64) catch unreachable, + .int_big_negative => return self.castTag(.int_big_negative).?.asBigInt().to(u64) catch unreachable, } } @@ -734,10 +865,10 @@ pub const Value = extern union { .bool_true, => return 1, - .int_u64 => return @intCast(i64, self.cast(Payload.Int_u64).?.int), - .int_i64 => return self.cast(Payload.Int_i64).?.int, - .int_big_positive => return self.cast(Payload.IntBigPositive).?.asBigInt().to(i64) catch unreachable, - .int_big_negative => return self.cast(Payload.IntBigNegative).?.asBigInt().to(i64) catch unreachable, + .int_u64 => return @intCast(i64, self.castTag(.int_u64).?.data), + .int_i64 => return self.castTag(.int_i64).?.data, + .int_big_positive => return self.castTag(.int_big_positive).?.asBigInt().to(i64) catch unreachable, + .int_big_negative => return self.castTag(.int_big_negative).?.asBigInt().to(i64) catch unreachable, } } @@ -753,14 +884,14 @@ pub const Value = extern union { pub fn toFloat(self: Value, comptime T: type) T { return switch (self.tag()) { .float_16 => @panic("TODO soft float"), - .float_32 => @floatCast(T, self.cast(Payload.Float_32).?.val), - .float_64 => @floatCast(T, self.cast(Payload.Float_64).?.val), - .float_128 => @floatCast(T, self.cast(Payload.Float_128).?.val), + .float_32 => @floatCast(T, self.castTag(.float_32).?.data), + .float_64 => @floatCast(T, self.castTag(.float_64).?.data), + .float_128 => @floatCast(T, self.castTag(.float_128).?.data), .zero => 0, .one => 1, - .int_u64 => @intToFloat(T, self.cast(Payload.Int_u64).?.int), - .int_i64 => @intToFloat(T, self.cast(Payload.Int_i64).?.int), + .int_u64 => @intToFloat(T, self.castTag(.int_u64).?.data), + .int_i64 => @intToFloat(T, self.castTag(.int_i64).?.data), .int_big_positive, .int_big_negative => @panic("big int to f128"), else => unreachable, @@ -846,15 +977,15 @@ pub const Value = extern union { => return 1, .int_u64 => { - const x = self.cast(Payload.Int_u64).?.int; + const x = self.castTag(.int_u64).?.data; if (x == 0) return 0; return @intCast(usize, std.math.log2(x) + 1); }, .int_i64 => { @panic("TODO implement i64 intBitCountTwosComp"); }, - .int_big_positive => return self.cast(Payload.IntBigPositive).?.asBigInt().bitCountTwosComp(), - .int_big_negative => return self.cast(Payload.IntBigNegative).?.asBigInt().bitCountTwosComp(), + .int_big_positive => return self.castTag(.int_big_positive).?.asBigInt().bitCountTwosComp(), + .int_big_negative => return self.castTag(.int_big_negative).?.asBigInt().bitCountTwosComp(), } } @@ -943,7 +1074,7 @@ pub const Value = extern union { .int_u64 => switch (ty.zigTypeTag()) { .Int => { - const x = self.cast(Payload.Int_u64).?.int; + const x = self.castTag(.int_u64).?.data; if (x == 0) return true; const info = ty.intInfo(target); const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); @@ -954,7 +1085,7 @@ pub const Value = extern union { }, .int_i64 => switch (ty.zigTypeTag()) { .Int => { - const x = self.cast(Payload.Int_i64).?.int; + const x = self.castTag(.int_i64).?.data; if (x == 0) return true; const info = ty.intInfo(target); if (info.signedness == .unsigned and x < 0) @@ -967,7 +1098,7 @@ pub const Value = extern union { .int_big_positive => switch (ty.zigTypeTag()) { .Int => { const info = ty.intInfo(target); - return self.cast(Payload.IntBigPositive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); + return self.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); }, .ComptimeInt => return true, else => unreachable, @@ -975,7 +1106,7 @@ pub const Value = extern union { .int_big_negative => switch (ty.zigTypeTag()) { .Int => { const info = ty.intInfo(target); - return self.cast(Payload.IntBigNegative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); + return self.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); }, .ComptimeInt => return true, else => unreachable, @@ -986,42 +1117,28 @@ pub const Value = extern union { /// Converts an integer or a float to a float. /// Returns `error.Overflow` if the value does not fit in the new type. pub fn floatCast(self: Value, allocator: *Allocator, ty: Type, target: Target) !Value { - const dest_bit_count = switch (ty.tag()) { - .comptime_float => 128, - else => ty.floatBits(target), - }; - switch (dest_bit_count) { - 16, 32, 64, 128 => {}, - else => std.debug.panic("TODO float cast bit count {}\n", .{dest_bit_count}), - } - if (ty.isInt()) { - @panic("TODO int to float"); - } - - switch (dest_bit_count) { - 16 => { - @panic("TODO soft float"); - // var res_payload = Value.Payload.Float_16{.val = self.toFloat(f16)}; - // if (!self.eql(Value.initPayload(&res_payload.base))) - // return error.Overflow; - // return Value.initPayload(&res_payload.base).copy(allocator); + switch (ty.tag()) { + .f16 => { + @panic("TODO add __trunctfhf2 to compiler-rt"); + //const res = try Value.Tag.float_16.create(allocator, self.toFloat(f16)); + //if (!self.eql(res)) + // return error.Overflow; + //return res; }, - 32 => { - var res_payload = Value.Payload.Float_32{ .val = self.toFloat(f32) }; - if (!self.eql(Value.initPayload(&res_payload.base))) + .f32 => { + const res = try Value.Tag.float_32.create(allocator, self.toFloat(f32)); + if (!self.eql(res)) return error.Overflow; - return Value.initPayload(&res_payload.base).copy(allocator); + return res; }, - 64 => { - var res_payload = Value.Payload.Float_64{ .val = self.toFloat(f64) }; - if (!self.eql(Value.initPayload(&res_payload.base))) + .f64 => { + const res = try Value.Tag.float_64.create(allocator, self.toFloat(f64)); + if (!self.eql(res)) return error.Overflow; - return Value.initPayload(&res_payload.base).copy(allocator); + return res; }, - 128 => { - const float_payload = try allocator.create(Value.Payload.Float_128); - float_payload.* = .{ .val = self.toFloat(f128) }; - return Value.initPayload(&float_payload.base); + .f128, .comptime_float, .c_longdouble => { + return Value.Tag.float_128.create(allocator, self.toFloat(f128)); }, else => unreachable, } @@ -1102,10 +1219,10 @@ pub const Value = extern union { .one, => false, - .float_16 => @rem(self.cast(Payload.Float_16).?.val, 1) != 0, - .float_32 => @rem(self.cast(Payload.Float_32).?.val, 1) != 0, - .float_64 => @rem(self.cast(Payload.Float_64).?.val, 1) != 0, - // .float_128 => @rem(self.cast(Payload.Float_128).?.val, 1) != 0, + .float_16 => @rem(self.castTag(.float_16).?.data, 1) != 0, + .float_32 => @rem(self.castTag(.float_32).?.data, 1) != 0, + .float_64 => @rem(self.castTag(.float_64).?.data, 1) != 0, + // .float_128 => @rem(self.castTag(.float_128).?.data, 1) != 0, .float_128 => @panic("TODO lld: error: undefined symbol: fmodl"), }; } @@ -1182,15 +1299,15 @@ pub const Value = extern union { .bool_true, => .gt, - .int_u64 => std.math.order(lhs.cast(Payload.Int_u64).?.int, 0), - .int_i64 => std.math.order(lhs.cast(Payload.Int_i64).?.int, 0), - .int_big_positive => lhs.cast(Payload.IntBigPositive).?.asBigInt().orderAgainstScalar(0), - .int_big_negative => lhs.cast(Payload.IntBigNegative).?.asBigInt().orderAgainstScalar(0), + .int_u64 => std.math.order(lhs.castTag(.int_u64).?.data, 0), + .int_i64 => std.math.order(lhs.castTag(.int_i64).?.data, 0), + .int_big_positive => lhs.castTag(.int_big_positive).?.asBigInt().orderAgainstScalar(0), + .int_big_negative => lhs.castTag(.int_big_negative).?.asBigInt().orderAgainstScalar(0), - .float_16 => std.math.order(lhs.cast(Payload.Float_16).?.val, 0), - .float_32 => std.math.order(lhs.cast(Payload.Float_32).?.val, 0), - .float_64 => std.math.order(lhs.cast(Payload.Float_64).?.val, 0), - .float_128 => std.math.order(lhs.cast(Payload.Float_128).?.val, 0), + .float_16 => std.math.order(lhs.castTag(.float_16).?.data, 0), + .float_32 => std.math.order(lhs.castTag(.float_32).?.data, 0), + .float_64 => std.math.order(lhs.castTag(.float_64).?.data, 0), + .float_128 => std.math.order(lhs.castTag(.float_128).?.data, 0), }; } @@ -1208,10 +1325,10 @@ pub const Value = extern union { if (lhs_float and rhs_float) { if (lhs_tag == rhs_tag) { return switch (lhs.tag()) { - .float_16 => return std.math.order(lhs.cast(Payload.Float_16).?.val, rhs.cast(Payload.Float_16).?.val), - .float_32 => return std.math.order(lhs.cast(Payload.Float_32).?.val, rhs.cast(Payload.Float_32).?.val), - .float_64 => return std.math.order(lhs.cast(Payload.Float_64).?.val, rhs.cast(Payload.Float_64).?.val), - .float_128 => return std.math.order(lhs.cast(Payload.Float_128).?.val, rhs.cast(Payload.Float_128).?.val), + .float_16 => return std.math.order(lhs.castTag(.float_16).?.data, rhs.castTag(.float_16).?.data), + .float_32 => return std.math.order(lhs.castTag(.float_32).?.data, rhs.castTag(.float_32).?.data), + .float_64 => return std.math.order(lhs.castTag(.float_64).?.data, rhs.castTag(.float_64).?.data), + .float_128 => return std.math.order(lhs.castTag(.float_128).?.data, rhs.castTag(.float_128).?.data), else => unreachable, }; } @@ -1244,8 +1361,8 @@ pub const Value = extern union { if (a.tag() == .void_value or a.tag() == .null_value) { return true; } else if (a.tag() == .enum_literal) { - const a_name = @fieldParentPtr(Payload.Bytes, "base", a.ptr_otherwise).data; - const b_name = @fieldParentPtr(Payload.Bytes, "base", b.ptr_otherwise).data; + const a_name = a.castTag(.enum_literal).?.data; + const b_name = b.castTag(.enum_literal).?.data; return std.mem.eql(u8, a_name, b_name); } } @@ -1313,11 +1430,11 @@ pub const Value = extern union { }, .error_set => { // Payload.decl should be same for all instances of the type. - const payload = @fieldParentPtr(Payload.ErrorSet, "base", self.ptr_otherwise); + const payload = self.castTag(.error_set).?.data; std.hash.autoHash(&hasher, payload.decl); }, .int_type => { - const payload = self.cast(Payload.IntType).?; + const payload = self.castTag(.int_type).?.data; var int_payload = Type.Payload.Bits{ .base = .{ .tag = if (payload.signed) .int_signed else .int_unsigned, @@ -1341,25 +1458,29 @@ pub const Value = extern union { .one, .bool_true => std.hash.autoHash(&hasher, @as(u64, 1)), .float_16, .float_32, .float_64, .float_128 => {}, - .enum_literal, .bytes => { - const payload = @fieldParentPtr(Payload.Bytes, "base", self.ptr_otherwise); + .enum_literal => { + const payload = self.castTag(.enum_literal).?; + hasher.update(payload.data); + }, + .bytes => { + const payload = self.castTag(.bytes).?; hasher.update(payload.data); }, .int_u64 => { - const payload = @fieldParentPtr(Payload.Int_u64, "base", self.ptr_otherwise); - std.hash.autoHash(&hasher, payload.int); + const payload = self.castTag(.int_u64).?; + std.hash.autoHash(&hasher, payload.data); }, .int_i64 => { - const payload = @fieldParentPtr(Payload.Int_i64, "base", self.ptr_otherwise); - std.hash.autoHash(&hasher, payload.int); + const payload = self.castTag(.int_i64).?; + std.hash.autoHash(&hasher, payload.data); }, .repeated => { - const payload = @fieldParentPtr(Payload.Repeated, "base", self.ptr_otherwise); - std.hash.autoHash(&hasher, payload.val.hash()); + const payload = self.castTag(.repeated).?; + std.hash.autoHash(&hasher, payload.data.hash()); }, .ref_val => { - const payload = @fieldParentPtr(Payload.RefVal, "base", self.ptr_otherwise); - std.hash.autoHash(&hasher, payload.val.hash()); + const payload = self.castTag(.ref_val).?; + std.hash.autoHash(&hasher, payload.data.hash()); }, .int_big_positive, .int_big_negative => { var space: BigIntSpace = undefined; @@ -1379,28 +1500,28 @@ pub const Value = extern union { } }, .elem_ptr => { - const payload = @fieldParentPtr(Payload.ElemPtr, "base", self.ptr_otherwise); + const payload = self.castTag(.elem_ptr).?.data; std.hash.autoHash(&hasher, payload.array_ptr.hash()); std.hash.autoHash(&hasher, payload.index); }, .decl_ref => { - const payload = @fieldParentPtr(Payload.DeclRef, "base", self.ptr_otherwise); - std.hash.autoHash(&hasher, payload.decl); + const decl = self.castTag(.decl_ref).?.data; + std.hash.autoHash(&hasher, decl); }, .function => { - const payload = @fieldParentPtr(Payload.Function, "base", self.ptr_otherwise); - std.hash.autoHash(&hasher, payload.func); + const func = self.castTag(.function).?.data; + std.hash.autoHash(&hasher, func); }, .extern_fn => { - const payload = @fieldParentPtr(Payload.ExternFn, "base", self.ptr_otherwise); - std.hash.autoHash(&hasher, payload.decl); + const decl = self.castTag(.extern_fn).?.data; + std.hash.autoHash(&hasher, decl); }, .variable => { - const payload = @fieldParentPtr(Payload.Variable, "base", self.ptr_otherwise); - std.hash.autoHash(&hasher, payload.variable); + const variable = self.castTag(.variable).?.data; + std.hash.autoHash(&hasher, variable); }, .@"error" => { - const payload = @fieldParentPtr(Payload.Error, "base", self.ptr_otherwise); + const payload = self.castTag(.@"error").?.data; hasher.update(payload.name); std.hash.autoHash(&hasher, payload.value); }, @@ -1483,10 +1604,10 @@ pub const Value = extern union { .empty_struct_value, => unreachable, - .ref_val => self.cast(Payload.RefVal).?.val, - .decl_ref => self.cast(Payload.DeclRef).?.decl.value(), + .ref_val => self.castTag(.ref_val).?.data, + .decl_ref => self.castTag(.decl_ref).?.data.value(), .elem_ptr => { - const elem_ptr = self.cast(Payload.ElemPtr).?; + const elem_ptr = self.castTag(.elem_ptr).?.data; const array_val = try elem_ptr.array_ptr.pointerDeref(allocator); return array_val.elemValue(allocator, elem_ptr.index); }, @@ -1570,26 +1691,26 @@ pub const Value = extern union { .empty_array => unreachable, // out of bounds array index - .bytes => { - const int_payload = try allocator.create(Payload.Int_u64); - int_payload.* = .{ .int = self.cast(Payload.Bytes).?.data[index] }; - return Value.initPayload(&int_payload.base); - }, + .bytes => return Tag.int_u64.create(allocator, self.castTag(.bytes).?.data[index]), // No matter the index; all the elements are the same! - .repeated => return self.cast(Payload.Repeated).?.val, + .repeated => return self.castTag(.repeated).?.data, } } /// Returns a pointer to the element value at the index. pub fn elemPtr(self: Value, allocator: *Allocator, index: usize) !Value { - const payload = try allocator.create(Payload.ElemPtr); - if (self.cast(Payload.ElemPtr)) |elem_ptr| { - payload.* = .{ .array_ptr = elem_ptr.array_ptr, .index = elem_ptr.index + index }; - } else { - payload.* = .{ .array_ptr = self, .index = index }; + if (self.castTag(.elem_ptr)) |elem_ptr| { + return Tag.elem_ptr.create(allocator, .{ + .array_ptr = elem_ptr.data.array_ptr, + .index = elem_ptr.data.index + index, + }); } - return Value.initPayload(&payload.base); + + return Tag.elem_ptr.create(allocator, .{ + .array_ptr = self, + .index = index, + }); } pub fn isUndef(self: Value) bool { @@ -1776,131 +1897,128 @@ pub const Value = extern union { pub const Payload = struct { tag: Tag, - pub const Int_u64 = struct { - base: Payload = Payload{ .tag = .int_u64 }, - int: u64, + pub const U64 = struct { + base: Payload, + data: u64, }; - pub const Int_i64 = struct { - base: Payload = Payload{ .tag = .int_i64 }, - int: i64, - }; - - pub const IntBigPositive = struct { - base: Payload = Payload{ .tag = .int_big_positive }, - limbs: []const std.math.big.Limb, - - pub fn asBigInt(self: IntBigPositive) BigIntConst { - return BigIntConst{ .limbs = self.limbs, .positive = true }; - } + pub const I64 = struct { + base: Payload, + data: i64, }; - pub const IntBigNegative = struct { - base: Payload = Payload{ .tag = .int_big_negative }, - limbs: []const std.math.big.Limb, + pub const BigInt = struct { + base: Payload, + data: []const std.math.big.Limb, - pub fn asBigInt(self: IntBigNegative) BigIntConst { - return BigIntConst{ .limbs = self.limbs, .positive = false }; + pub fn asBigInt(self: BigInt) BigIntConst { + const positive = switch (self.base.tag) { + .int_big_positive => true, + .int_big_negative => false, + else => unreachable, + }; + return BigIntConst{ .limbs = self.data, .positive = positive }; } }; pub const Function = struct { - base: Payload = Payload{ .tag = .function }, - func: *Module.Fn, + base: Payload, + data: *Module.Fn, }; - pub const ExternFn = struct { - base: Payload = Payload{ .tag = .extern_fn }, - decl: *Module.Decl, + pub const Decl = struct { + base: Payload, + data: *Module.Decl, }; pub const Variable = struct { - base: Payload = Payload{ .tag = .variable }, - variable: *Module.Var, - }; - - pub const ArraySentinel0_u8_Type = struct { - base: Payload = Payload{ .tag = .array_sentinel_0_u8_type }, - len: u64, - }; - - /// Represents a pointer to another immutable value. - pub const RefVal = struct { - base: Payload = Payload{ .tag = .ref_val }, - val: Value, + base: Payload, + data: *Module.Var, }; - /// Represents a pointer to a decl, not the value of the decl. - pub const DeclRef = struct { - base: Payload = Payload{ .tag = .decl_ref }, - decl: *Module.Decl, + pub const SubValue = struct { + base: Payload, + data: Value, }; pub const ElemPtr = struct { - base: Payload = Payload{ .tag = .elem_ptr }, - array_ptr: Value, - index: usize, + pub const base_tag = Tag.elem_ptr; + + base: Payload = Payload{ .tag = base_tag }, + data: struct { + array_ptr: Value, + index: usize, + }, }; pub const Bytes = struct { - base: Payload = Payload{ .tag = .bytes }, + base: Payload, data: []const u8, }; pub const Ty = struct { - base: Payload = Payload{ .tag = .ty }, - ty: Type, + base: Payload, + data: Type, }; pub const IntType = struct { - base: Payload = Payload{ .tag = .int_type }, - bits: u16, - signed: bool, - }; + pub const base_tag = Tag.int_type; - pub const Repeated = struct { - base: Payload = Payload{ .tag = .ty }, - /// This value is repeated some number of times. The amount of times to repeat - /// is stored externally. - val: Value, + base: Payload = Payload{ .tag = base_tag }, + data: struct { + bits: u16, + signed: bool, + }, }; pub const Float_16 = struct { - base: Payload = .{ .tag = .float_16 }, - val: f16, + pub const base_tag = Tag.float_16; + + base: Payload = .{ .tag = base_tag }, + data: f16, }; pub const Float_32 = struct { - base: Payload = .{ .tag = .float_32 }, - val: f32, + pub const base_tag = Tag.float_32; + + base: Payload = .{ .tag = base_tag }, + data: f32, }; pub const Float_64 = struct { - base: Payload = .{ .tag = .float_64 }, - val: f64, + pub const base_tag = Tag.float_64; + + base: Payload = .{ .tag = base_tag }, + data: f64, }; pub const Float_128 = struct { - base: Payload = .{ .tag = .float_128 }, - val: f128, + pub const base_tag = Tag.float_128; + + base: Payload = .{ .tag = base_tag }, + data: f128, }; pub const ErrorSet = struct { - base: Payload = .{ .tag = .error_set }, + pub const base_tag = Tag.error_set; - // TODO revisit this when we have the concept of the error tag type - fields: std.StringHashMapUnmanaged(u16), - decl: *Module.Decl, + base: Payload = .{ .tag = base_tag }, + data: struct { + // TODO revisit this when we have the concept of the error tag type + fields: std.StringHashMapUnmanaged(u16), + decl: *Module.Decl, + }, }; pub const Error = struct { base: Payload = .{ .tag = .@"error" }, - - // TODO revisit this when we have the concept of the error tag type - /// `name` is owned by `Module` and will be valid for the entire - /// duration of the compilation. - name: []const u8, - value: u16, + data: struct { + // TODO revisit this when we have the concept of the error tag type + /// `name` is owned by `Module` and will be valid for the entire + /// duration of the compilation. + name: []const u8, + value: u16, + }, }; }; @@ -1914,15 +2032,24 @@ pub const Value = extern union { test "hash same value different representation" { const zero_1 = Value.initTag(.zero); - var payload_1 = Value.Payload.Int_u64{ .int = 0 }; + var payload_1 = Value.Payload.U64{ + .base = .{ .tag = .int_u64 }, + .data = 0, + }; const zero_2 = Value.initPayload(&payload_1.base); std.testing.expectEqual(zero_1.hash(), zero_2.hash()); - var payload_2 = Value.Payload.Int_i64{ .int = 0 }; + var payload_2 = Value.Payload.I64{ + .base = .{ .tag = .int_i64 }, + .data = 0, + }; const zero_3 = Value.initPayload(&payload_2.base); std.testing.expectEqual(zero_2.hash(), zero_3.hash()); - var payload_3 = Value.Payload.IntBigNegative{ .limbs = &[_]std.math.big.Limb{0} }; + var payload_3 = Value.Payload.BigInt{ + .base = .{ .tag = .int_big_negative }, + .data = &[_]std.math.big.Limb{0}, + }; const zero_4 = Value.initPayload(&payload_3.base); std.testing.expectEqual(zero_3.hash(), zero_4.hash()); } diff --git a/src/zir.zig b/src/zir.zig index bd9ab2c538..21bd4f8435 100644 --- a/src/zir.zig +++ b/src/zir.zig @@ -1990,15 +1990,15 @@ const EmitZIR = struct { fn resolveInst(self: *EmitZIR, new_body: ZirBody, inst: *ir.Inst) !*Inst { if (inst.cast(ir.Inst.Constant)) |const_inst| { - const new_inst = if (const_inst.val.cast(Value.Payload.Function)) |func_pl| blk: { - const owner_decl = func_pl.func.owner_decl; + const new_inst = if (const_inst.val.castTag(.function)) |func_pl| blk: { + const owner_decl = func_pl.data.owner_decl; break :blk try self.emitDeclVal(inst.src, mem.spanZ(owner_decl.name)); - } else if (const_inst.val.cast(Value.Payload.DeclRef)) |declref| blk: { - const decl_ref = try self.emitDeclRef(inst.src, declref.decl); + } else if (const_inst.val.castTag(.decl_ref)) |declref| blk: { + const decl_ref = try self.emitDeclRef(inst.src, declref.data); try new_body.instructions.append(decl_ref); break :blk decl_ref; - } else if (const_inst.val.cast(Value.Payload.Variable)) |var_pl| blk: { - const owner_decl = var_pl.variable.owner_decl; + } else if (const_inst.val.castTag(.variable)) |var_pl| blk: { + const owner_decl = var_pl.data.owner_decl; break :blk try self.emitDeclVal(inst.src, mem.spanZ(owner_decl.name)); } else blk: { break :blk (try self.emitTypedValue(inst.src, .{ .ty = inst.ty, .val = const_inst.val })).inst; @@ -2150,13 +2150,13 @@ const EmitZIR = struct { fn emitTypedValue(self: *EmitZIR, src: usize, typed_value: TypedValue) Allocator.Error!*Decl { const allocator = &self.arena.allocator; - if (typed_value.val.cast(Value.Payload.DeclRef)) |decl_ref| { - const decl = decl_ref.decl; + if (typed_value.val.castTag(.decl_ref)) |decl_ref| { + const decl = decl_ref.data; return try self.emitUnnamedDecl(try self.emitDeclRef(src, decl)); - } else if (typed_value.val.cast(Value.Payload.Variable)) |variable| { + } else if (typed_value.val.castTag(.variable)) |variable| { return self.emitTypedValue(src, .{ .ty = typed_value.ty, - .val = variable.variable.init, + .val = variable.data.init, }); } if (typed_value.val.isUndef()) { @@ -2215,7 +2215,7 @@ const EmitZIR = struct { return self.emitType(src, ty); }, .Fn => { - const module_fn = typed_value.val.cast(Value.Payload.Function).?.func; + const module_fn = typed_value.val.castTag(.function).?.data; return self.emitFn(module_fn, src, typed_value.ty); }, .Array => { @@ -2248,7 +2248,7 @@ const EmitZIR = struct { else return self.emitPrimitive(src, .@"false"), .EnumLiteral => { - const enum_literal = @fieldParentPtr(Value.Payload.Bytes, "base", typed_value.val.ptr_otherwise); + const enum_literal = typed_value.val.castTag(.enum_literal).?; const inst = try self.arena.allocator.create(Inst.Str); inst.* = .{ .base = .{ @@ -2748,9 +2748,8 @@ const EmitZIR = struct { .signed => .@"true", .unsigned => .@"false", }); - const bits_payload = try self.arena.allocator.create(Value.Payload.Int_u64); - bits_payload.* = .{ .int = info.bits }; - const bits = try self.emitComptimeIntVal(src, Value.initPayload(&bits_payload.base)); + const bits_val = try Value.Tag.int_u64.create(&self.arena.allocator, info.bits); + const bits = try self.emitComptimeIntVal(src, bits_val); const inttype_inst = try self.arena.allocator.create(Inst.IntType); inttype_inst.* = .{ .base = .{ @@ -2800,7 +2799,10 @@ const EmitZIR = struct { return self.emitUnnamedDecl(&inst.base); }, .Array => { - var len_pl = Value.Payload.Int_u64{ .int = ty.arrayLen() }; + var len_pl = Value.Payload.U64{ + .base = .{ .tag = .int_u64 }, + .data = ty.arrayLen(), + }; const len = Value.initPayload(&len_pl.base); const inst = if (ty.sentinel()) |sentinel| blk: { diff --git a/src/zir_sema.zig b/src/zir_sema.zig index 8960f3ba4d..2e3cced839 100644 --- a/src/zir_sema.zig +++ b/src/zir_sema.zig @@ -364,12 +364,9 @@ fn analyzeInstRef(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError! const ptr_type = try mod.simplePtrType(scope, inst.base.src, operand.ty, false, .One); if (operand.value()) |val| { - const ref_payload = try scope.arena().create(Value.Payload.RefVal); - ref_payload.* = .{ .val = val }; - return mod.constInst(scope, inst.base.src, .{ .ty = ptr_type, - .val = Value.initPayload(&ref_payload.base), + .val = try Value.Tag.ref_val.create(scope.arena(), val), }); } @@ -480,12 +477,9 @@ fn analyzeInstStr(mod: *Module, scope: *Scope, str_inst: *zir.Inst.Str) InnerErr errdefer new_decl_arena.deinit(); const arena_bytes = try new_decl_arena.allocator.dupe(u8, str_inst.positionals.bytes); - const bytes_payload = try scope.arena().create(Value.Payload.Bytes); - bytes_payload.* = .{ .data = arena_bytes }; - const new_decl = try mod.createAnonymousDecl(scope, &new_decl_arena, .{ .ty = try Type.Tag.array_u8_sentinel_0.create(scope.arena(), arena_bytes.len), - .val = Value.initPayload(&bytes_payload.base), + .val = try Value.Tag.bytes.create(scope.arena(), arena_bytes), }); return mod.analyzeDeclRef(scope, str_inst.base.src, new_decl); } @@ -779,11 +773,9 @@ fn analyzeInstFn(mod: *Module, scope: *Scope, fn_inst: *zir.Inst.Fn) InnerError! .analysis = .{ .queued = fn_zir }, .owner_decl = scope.decl().?, }; - const fn_payload = try scope.arena().create(Value.Payload.Function); - fn_payload.* = .{ .func = new_func }; return mod.constInst(scope, fn_inst.base.src, .{ .ty = fn_type, - .val = Value.initPayload(&fn_payload.base), + .val = try Value.Tag.function.create(scope.arena(), new_func), }); } @@ -838,14 +830,17 @@ fn analyzeInstErrorSet(mod: *Module, scope: *Scope, inst: *zir.Inst.ErrorSet) In const payload = try scope.arena().create(Value.Payload.ErrorSet); payload.* = .{ - .fields = .{}, - .decl = undefined, // populated below + .base = .{ .tag = .error_set }, + .data = .{ + .fields = .{}, + .decl = undefined, // populated below + }, }; - try payload.fields.ensureCapacity(&new_decl_arena.allocator, @intCast(u32, inst.positionals.fields.len)); + try payload.data.fields.ensureCapacity(&new_decl_arena.allocator, @intCast(u32, inst.positionals.fields.len)); for (inst.positionals.fields) |field_name| { const entry = try mod.getErrorValue(field_name); - if (payload.fields.fetchPutAssumeCapacity(entry.key, entry.value)) |prev| { + if (payload.data.fields.fetchPutAssumeCapacity(entry.key, entry.value)) |prev| { return mod.fail(scope, inst.base.src, "duplicate error: '{}'", .{field_name}); } } @@ -854,7 +849,7 @@ fn analyzeInstErrorSet(mod: *Module, scope: *Scope, inst: *zir.Inst.ErrorSet) In .ty = Type.initTag(.type), .val = Value.initPayload(&payload.base), }); - payload.decl = new_decl; + payload.data.decl = new_decl; return mod.analyzeDeclRef(scope, inst.base.src, new_decl); } @@ -863,14 +858,10 @@ fn analyzeInstMergeErrorSets(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) } fn analyzeInstEnumLiteral(mod: *Module, scope: *Scope, inst: *zir.Inst.EnumLiteral) InnerError!*Inst { - const payload = try scope.arena().create(Value.Payload.Bytes); - payload.* = .{ - .base = .{ .tag = .enum_literal }, - .data = try scope.arena().dupe(u8, inst.positionals.name), - }; + const duped_name = try scope.arena().dupe(u8, inst.positionals.name); return mod.constInst(scope, inst.base.src, .{ .ty = Type.initTag(.enum_literal), - .val = Value.initPayload(&payload.base), + .val = try Value.Tag.enum_literal.create(scope.arena(), duped_name), }); } @@ -989,15 +980,12 @@ fn analyzeInstFieldPtr(mod: *Module, scope: *Scope, fieldptr: *zir.Inst.FieldPtr switch (elem_ty.zigTypeTag()) { .Array => { if (mem.eql(u8, field_name, "len")) { - const len_payload = try scope.arena().create(Value.Payload.Int_u64); - len_payload.* = .{ .int = elem_ty.arrayLen() }; - - const ref_payload = try scope.arena().create(Value.Payload.RefVal); - ref_payload.* = .{ .val = Value.initPayload(&len_payload.base) }; - return mod.constInst(scope, fieldptr.base.src, .{ .ty = Type.initTag(.single_const_pointer_to_comptime_int), - .val = Value.initPayload(&ref_payload.base), + .val = try Value.Tag.ref_val.create( + scope.arena(), + try Value.Tag.int_u64.create(scope.arena(), elem_ty.arrayLen()), + ), }); } else { return mod.fail( @@ -1013,15 +1001,12 @@ fn analyzeInstFieldPtr(mod: *Module, scope: *Scope, fieldptr: *zir.Inst.FieldPtr switch (ptr_child.zigTypeTag()) { .Array => { if (mem.eql(u8, field_name, "len")) { - const len_payload = try scope.arena().create(Value.Payload.Int_u64); - len_payload.* = .{ .int = ptr_child.arrayLen() }; - - const ref_payload = try scope.arena().create(Value.Payload.RefVal); - ref_payload.* = .{ .val = Value.initPayload(&len_payload.base) }; - return mod.constInst(scope, fieldptr.base.src, .{ .ty = Type.initTag(.single_const_pointer_to_comptime_int), - .val = Value.initPayload(&ref_payload.base), + .val = try Value.Tag.ref_val.create( + scope.arena(), + try Value.Tag.int_u64.create(scope.arena(), ptr_child.arrayLen()), + ), }); } else { return mod.fail( @@ -1043,21 +1028,12 @@ fn analyzeInstFieldPtr(mod: *Module, scope: *Scope, fieldptr: *zir.Inst.FieldPtr switch (child_type.zigTypeTag()) { .ErrorSet => { // TODO resolve inferred error sets - const entry = if (val.cast(Value.Payload.ErrorSet)) |payload| - (payload.fields.getEntry(field_name) orelse + const entry = if (val.castTag(.error_set)) |payload| + (payload.data.fields.getEntry(field_name) orelse return mod.fail(scope, fieldptr.base.src, "no error named '{}' in '{}'", .{ field_name, child_type })).* else try mod.getErrorValue(field_name); - const error_payload = try scope.arena().create(Value.Payload.Error); - error_payload.* = .{ - .name = entry.key, - .value = entry.value, - }; - - const ref_payload = try scope.arena().create(Value.Payload.RefVal); - ref_payload.* = .{ .val = Value.initPayload(&error_payload.base) }; - const result_type = if (child_type.tag() == .anyerror) try Type.Tag.error_set_single.create(scope.arena(), entry.key) else @@ -1065,7 +1041,13 @@ fn analyzeInstFieldPtr(mod: *Module, scope: *Scope, fieldptr: *zir.Inst.FieldPtr return mod.constInst(scope, fieldptr.base.src, .{ .ty = try mod.simplePtrType(scope, fieldptr.base.src, result_type, false, .One), - .val = Value.initPayload(&ref_payload.base), + .val = try Value.Tag.ref_val.create( + scope.arena(), + try Value.Tag.@"error".create(scope.arena(), .{ + .name = entry.key, + .value = entry.value, + }), + ), }); }, .Struct => { -- cgit v1.2.3 From 4d2919a1eed3a916f87cc5f838c9cd2b23ddef70 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 27 Dec 2020 18:56:33 +0100 Subject: stage2 ARM: implement genCondBr --- src/codegen.zig | 49 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 48 insertions(+), 1 deletion(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index f978115ebc..a0e7968b97 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -42,6 +42,11 @@ pub const Reloc = union(enum) { /// To perform the reloc, write 32-bit signed little-endian integer /// which is a relative jump, based on the address following the reloc. rel32: usize, + /// A branch in the ARM instruction set + arm_branch: struct { + pos: usize, + cond: @import("codegen/arm.zig").Condition, + }, }; pub const Result = union(enum) { @@ -653,7 +658,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const mcv = try self.genFuncInst(inst); if (!inst.isUnused()) { - log.debug("{*} => {}", .{ inst, mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.putNoClobber(self.gpa, inst, mcv); } @@ -2016,6 +2020,27 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.code.items.len += 4; break :reloc reloc; }, + .arm, .armeb => reloc: { + const condition: Condition = switch (cond) { + .register => |reg| blk: { + // cmp reg, 1 + // bne ... + const op = Instruction.Operand.imm(1, 0); + writeInt(u32, try self.code.addManyAsArray(4), Instruction.cmp(.al, reg, op).toU32()); + break :blk .ne; + }, + else => return self.fail(inst.base.src, "TODO implement condbr {} when condition is {}", .{ self.target.cpu.arch, @tagName(cond) }), + }; + + const reloc = Reloc{ + .arm_branch = .{ + .pos = self.code.items.len, + .cond = condition, + }, + }; + try self.code.resize(self.code.items.len + 4); + break :reloc reloc; + }, else => return self.fail(inst.base.src, "TODO implement condbr {}", .{self.target.cpu.arch}), }; @@ -2225,6 +2250,19 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail(src, "unable to perform relocation: jump too far", .{}); mem.writeIntLittle(i32, self.code.items[pos..][0..4], s32_amt); }, + .arm_branch => |info| { + switch (arch) { + .arm, .armeb => { + const amt = self.code.items.len - (info.pos + 4); + if (math.cast(i26, amt)) |delta| { + writeInt(u32, self.code.items[info.pos..][0..4], Instruction.b(info.cond, delta).toU32()); + } else |_| { + return self.fail(src, "TODO: enable larger branch offset", .{}); + } + }, + else => unreachable, // attempting to perfrom an ARM relocation on a non-ARM target arch + } + }, } } @@ -2278,6 +2316,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Leave the jump offset undefined block.codegen.relocs.appendAssumeCapacity(.{ .rel32 = self.code.items.len - 4 }); }, + .arm, .armeb => { + try self.code.resize(self.code.items.len + 4); + block.codegen.relocs.appendAssumeCapacity(.{ + .arm_branch = .{ + .pos = self.code.items.len - 4, + .cond = .al, + }, + }); + }, else => return self.fail(src, "TODO implement brvoid for {}", .{self.target.cpu.arch}), } return .none; -- cgit v1.2.3 From 85e1b47c40c023d8a3d75fadaeeb643a361bbc4d Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Mon, 28 Dec 2020 18:17:22 +0100 Subject: stage2 ARM: implement genCondBr for compare_flags --- src/codegen.zig | 45 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 3 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index a0e7968b97..4a02a8ffc0 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1277,11 +1277,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (op) { .add => { - // TODO runtime safety checks (overflow) writeInt(u32, try self.code.addManyAsArray(4), Instruction.add(.al, dst_reg, dst_reg, operand).toU32()); }, .sub => { - // TODO runtime safety checks (underflow) if (lhs_is_dest) { writeInt(u32, try self.code.addManyAsArray(4), Instruction.sub(.al, dst_reg, dst_reg, operand).toU32()); } else { @@ -1297,6 +1295,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .not, .xor => { writeInt(u32, try self.code.addManyAsArray(4), Instruction.eor(.al, dst_reg, dst_reg, operand).toU32()); }, + .cmp_eq => { + writeInt(u32, try self.code.addManyAsArray(4), Instruction.cmp(.al, dst_reg, operand).toU32()); + }, else => unreachable, // not a binary instruction } } @@ -1957,6 +1958,20 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .unsigned => MCValue{ .compare_flags_unsigned = op }, }; }, + .arm, .armeb => { + const lhs = try self.resolveInst(inst.lhs); + const rhs = try self.resolveInst(inst.rhs); + + const src_mcv = rhs; + const dst_mcv = if (lhs != .register) try self.copyToNewRegister(&inst.base, lhs) else lhs; + + try self.genArmBinOpCode(inst.base.src, dst_mcv.register, src_mcv, true, .cmp_eq); + const info = inst.lhs.ty.intInfo(self.target.*); + return switch (info.signedness) { + .signed => MCValue{ .compare_flags_signed = op }, + .unsigned => MCValue{ .compare_flags_unsigned = op }, + }; + }, else => return self.fail(inst.base.src, "TODO implement cmp for {}", .{self.target.cpu.arch}), } } @@ -2022,6 +2037,30 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, .arm, .armeb => reloc: { const condition: Condition = switch (cond) { + .compare_flags_signed => |cmp_op| blk: { + // Here we map to the opposite condition because the jump is to the false branch. + const condition: Condition = switch (cmp_op) { + .gte => .lt, + .gt => .le, + .neq => .eq, + .lt => .ge, + .lte => .gt, + .eq => .ne, + }; + break :blk condition; + }, + .compare_flags_unsigned => |cmp_op| blk: { + // Here we map to the opposite condition because the jump is to the false branch. + const condition: Condition = switch (cmp_op) { + .gte => .cc, + .gt => .ls, + .neq => .eq, + .lt => .cs, + .lte => .hi, + .eq => .ne, + }; + break :blk condition; + }, .register => |reg| blk: { // cmp reg, 1 // bne ... @@ -2253,7 +2292,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .arm_branch => |info| { switch (arch) { .arm, .armeb => { - const amt = self.code.items.len - (info.pos + 4); + const amt = @intCast(i32, self.code.items.len) - @intCast(i32, info.pos + 8); if (math.cast(i26, amt)) |delta| { writeInt(u32, self.code.items[info.pos..][0..4], Instruction.b(info.cond, delta).toU32()); } else |_| { -- cgit v1.2.3 From c52ca0b1780c2865cb0c242cb2f1a397766e6ce8 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Mon, 28 Dec 2020 21:09:48 +0100 Subject: stage2 ARM: implement genSetReg with compare_flags --- src/codegen.zig | 41 ++++++++++++++++++--------------- src/codegen/arm.zig | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 19 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 4a02a8ffc0..66aa32b14e 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -658,6 +658,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const mcv = try self.genFuncInst(inst); if (!inst.isUnused()) { + log.debug("{*} => {}", .{ inst, mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.putNoClobber(self.gpa, inst, mcv); } @@ -2039,27 +2040,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const condition: Condition = switch (cond) { .compare_flags_signed => |cmp_op| blk: { // Here we map to the opposite condition because the jump is to the false branch. - const condition: Condition = switch (cmp_op) { - .gte => .lt, - .gt => .le, - .neq => .eq, - .lt => .ge, - .lte => .gt, - .eq => .ne, - }; - break :blk condition; + const condition = Condition.fromCompareOperatorSigned(cmp_op); + break :blk condition.negate(); }, .compare_flags_unsigned => |cmp_op| blk: { // Here we map to the opposite condition because the jump is to the false branch. - const condition: Condition = switch (cmp_op) { - .gte => .cc, - .gt => .ls, - .neq => .eq, - .lt => .cs, - .lte => .hi, - .eq => .ne, - }; - break :blk condition; + const condition = Condition.fromCompareOperatorUnsigned(cmp_op); + break :blk condition.negate(); }, .register => |reg| blk: { // cmp reg, 1 @@ -2239,7 +2226,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } }, .arm, .armeb => { - if (math.cast(i26, @intCast(i32, index) - @intCast(i32, self.code.items.len))) |delta| { + if (math.cast(i26, @intCast(i32, index) - @intCast(i32, self.code.items.len + 8))) |delta| { writeInt(u32, try self.code.addManyAsArray(4), Instruction.b(.al, delta).toU32()); } else |err| { return self.fail(src, "TODO: enable larger branch offset", .{}); @@ -2736,6 +2723,22 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Write the debug undefined value. return self.genSetReg(src, reg, .{ .immediate = 0xaaaaaaaa }); }, + .compare_flags_unsigned, + .compare_flags_signed, + => |op| { + const condition = switch (mcv) { + .compare_flags_unsigned => Condition.fromCompareOperatorUnsigned(op), + .compare_flags_signed => Condition.fromCompareOperatorSigned(op), + else => unreachable, + }; + + // mov reg, 0 + // moveq reg, 1 + const zero = Instruction.Operand.imm(0, 0); + const one = Instruction.Operand.imm(1, 0); + writeInt(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, reg, zero).toU32()); + writeInt(u32, try self.code.addManyAsArray(4), Instruction.mov(condition, reg, one).toU32()); + }, .immediate => |x| { if (x > math.maxInt(u32)) return self.fail(src, "ARM registers are 32-bit wide", .{}); diff --git a/src/codegen/arm.zig b/src/codegen/arm.zig index 33ff789648..978c653cb0 100644 --- a/src/codegen/arm.zig +++ b/src/codegen/arm.zig @@ -35,8 +35,74 @@ pub const Condition = enum(u4) { le, /// always al, + + /// Converts a std.math.CompareOperator into a condition flag, + /// i.e. returns the condition that is true iff the result of the + /// comparison is true. Assumes signed comparison + pub fn fromCompareOperatorSigned(op: std.math.CompareOperator) Condition { + return switch (op) { + .gte => .ge, + .gt => .gt, + .neq => .ne, + .lt => .lt, + .lte => .le, + .eq => .eq, + }; + } + + /// Converts a std.math.CompareOperator into a condition flag, + /// i.e. returns the condition that is true iff the result of the + /// comparison is true. Assumes unsigned comparison + pub fn fromCompareOperatorUnsigned(op: std.math.CompareOperator) Condition { + return switch (op) { + .gte => .cs, + .gt => .hi, + .neq => .ne, + .lt => .cc, + .lte => .ls, + .eq => .eq, + }; + } + + /// Returns the condition which is true iff the given condition is + /// false (if such a condition exists) + pub fn negate(cond: Condition) Condition { + return switch (cond) { + .eq => .ne, + .ne => .eq, + .cs => .cc, + .cc => .cs, + .mi => .pl, + .pl => .mi, + .vs => .vc, + .vc => .vs, + .hi => .ls, + .ls => .hi, + .ge => .lt, + .lt => .ge, + .gt => .le, + .le => .gt, + .al => unreachable, + }; + } }; +test "condition from CompareOperator" { + testing.expectEqual(@as(Condition, .eq), Condition.fromCompareOperatorSigned(.eq)); + testing.expectEqual(@as(Condition, .eq), Condition.fromCompareOperatorUnsigned(.eq)); + + testing.expectEqual(@as(Condition, .gt), Condition.fromCompareOperatorSigned(.gt)); + testing.expectEqual(@as(Condition, .hi), Condition.fromCompareOperatorUnsigned(.gt)); + + testing.expectEqual(@as(Condition, .le), Condition.fromCompareOperatorSigned(.lte)); + testing.expectEqual(@as(Condition, .ls), Condition.fromCompareOperatorUnsigned(.lte)); +} + +test "negate condition" { + testing.expectEqual(@as(Condition, .eq), Condition.ne.negate()); + testing.expectEqual(@as(Condition, .ne), Condition.eq.negate()); +} + /// Represents a register in the ARM instruction set architecture pub const Register = enum(u5) { r0, -- cgit v1.2.3 From c5ec096b2ffd42aff6385debe2a412a6db67868f Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sat, 14 Nov 2020 18:25:23 +0100 Subject: stage2 AArch64: add logical (shifted register) instructions --- src/codegen.zig | 23 ++---- src/codegen/aarch64.zig | 200 ++++++++++++++++++++++++++++-------------------- 2 files changed, 125 insertions(+), 98 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 66aa32b14e..9e6de711d4 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -2415,19 +2415,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.genSetReg(inst.base.src, reg, arg); } - // TODO move this to lib/std/{elf, macho}.zig, etc. - const is_syscall_inst = switch (self.bin_file.tag) { - .macho => mem.eql(u8, inst.asm_source, "svc #0x80"), - .elf => mem.eql(u8, inst.asm_source, "svc #0"), - else => |tag| return self.fail(inst.base.src, "TODO implement aarch64 support for other syscall instructions for file format: '{}'", .{tag}), - }; - if (is_syscall_inst) { - const imm16: u16 = switch (self.bin_file.tag) { - .macho => 0x80, - .elf => 0, - else => unreachable, - }; - mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.svc(imm16).toU32()); + if (mem.eql(u8, inst.asm_source, "svc #0")) { + mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.svc(0x0).toU32()); + } else if (mem.eql(u8, inst.asm_source, "svc #0x80")) { + mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.svc(0x80).toU32()); } else { return self.fail(inst.base.src, "TODO implement support for more aarch64 assembly instructions", .{}); } @@ -2876,8 +2867,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // mov r, x0 mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.orr( reg, + .xzr, .x0, - Instruction.RegisterShift.none(), + Instruction.Shift.none, ).toU32()); // ldr x28, [sp], #16 mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(.x28, .{ @@ -2908,8 +2900,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // mov r, x0 mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.orr( reg, + .xzr, .x0, - Instruction.RegisterShift.none(), + Instruction.Shift.none, ).toU32()); // ldp x0, x28, [sp, #16] mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldp( diff --git a/src/codegen/aarch64.zig b/src/codegen/aarch64.zig index 0e9ad61745..50cdf6a262 100644 --- a/src/codegen/aarch64.zig +++ b/src/codegen/aarch64.zig @@ -200,17 +200,6 @@ test "FloatingPointRegister.toX" { /// Represents an instruction in the AArch64 instruction set pub const Instruction = union(enum) { - OrShiftedRegister: packed struct { - rd: u5, - rn: u5, - imm6: u6, - rm: u5, - n: u1, - shift: u2, - fixed: u5 = 0b01010, - opc: u2 = 0b01, - sf: u1, - }, MoveWideImmediate: packed struct { rd: u5, imm16: u16, @@ -274,10 +263,37 @@ pub const Instruction = union(enum) { NoOperation: packed struct { fixed: u32 = 0b1101010100_0_00_011_0010_0000_000_11111, }, + LogicalShiftedRegister: packed struct { + rd: u5, + rn: u5, + imm6: u6, + rm: u5, + n: u1, + shift: u2, + fixed: u5 = 0b01010, + opc: u2, + sf: u1, + }, + + pub const Shift = struct { + shift: Type = .lsl, + amount: u6 = 0, + + pub const Type = enum(u2) { + lsl, + lsr, + asr, + ror, + }; + + pub const none = Shift{ + .shift = .lsl, + .amount = 0, + }; + }; pub fn toU32(self: Instruction) u32 { return switch (self) { - .OrShiftedRegister => |v| @bitCast(u32, v), .MoveWideImmediate => |v| @bitCast(u32, v), .PCRelativeAddress => |v| @bitCast(u32, v), .LoadStoreRegister => |v| @bitCast(u32, v), @@ -287,68 +303,10 @@ pub const Instruction = union(enum) { .UnconditionalBranchRegister => |v| @bitCast(u32, v), .UnconditionalBranchImmediate => |v| @bitCast(u32, v), .NoOperation => |v| @bitCast(u32, v), + .LogicalShiftedRegister => |v| @bitCast(u32, v), }; } - pub const RegisterShift = struct { - rn: u5, - imm6: u6, - shift: enum(u2) { - Lsl = 0, - Lsr = 1, - Asr = 2, - Ror = 3, - }, - - pub fn none() RegisterShift { - return .{ - .rn = 0b11111, - .imm6 = 0, - .shift = .Lsl, - }; - } - }; - - // Helper functions for assembly syntax functions - - fn orShiftedRegister( - rd: Register, - rm: Register, - shift: RegisterShift, - invert: bool, - ) Instruction { - const n: u1 = if (invert) 1 else 0; - switch (rd.size()) { - 32 => { - return Instruction{ - .OrShiftedRegister = .{ - .rd = rd.id(), - .rn = shift.rn, - .imm6 = shift.imm6, - .rm = rm.id(), - .n = n, - .shift = @enumToInt(shift.shift), - .sf = 0, - }, - }; - }, - 64 => { - return Instruction{ - .OrShiftedRegister = .{ - .rd = rd.id(), - .rn = shift.rn, - .imm6 = shift.imm6, - .rm = rm.id(), - .n = n, - .shift = @enumToInt(shift.shift), - .sf = 1, - }, - }; - }, - else => unreachable, // unexpected register size - } - } - fn moveWideImmediate( opc: u2, rd: Register, @@ -671,15 +629,49 @@ pub const Instruction = union(enum) { }; } - // Bitwise (inclusive) OR of a register value - - pub fn orr(rd: Register, rm: Register, shift: RegisterShift) Instruction { - return orShiftedRegister(rd, rm, shift, false); + fn logicalShiftedRegister( + opc: u2, + n: u1, + shift: Shift, + rd: Register, + rn: Register, + rm: Register, + ) Instruction { + switch (rd.size()) { + 32 => { + assert(shift.amount < 32); + return Instruction{ + .LogicalShiftedRegister = .{ + .rd = rd.id(), + .rn = rn.id(), + .imm6 = shift.amount, + .rm = rm.id(), + .n = n, + .shift = @enumToInt(shift.shift), + .opc = opc, + .sf = 0b0, + }, + }; + }, + 64 => { + return Instruction{ + .LogicalShiftedRegister = .{ + .rd = rd.id(), + .rn = rn.id(), + .imm6 = shift.amount, + .rm = rm.id(), + .n = n, + .shift = @enumToInt(shift.shift), + .opc = opc, + .sf = 0b1, + }, + }; + }, + else => unreachable, // unexpected register size + } } - pub fn orn(rd: Register, rm: Register, shift: RegisterShift) Instruction { - return orShiftedRegister(rd, rm, shift, true); - } + // Helper functions for assembly syntax functions // Move wide (immediate) @@ -823,6 +815,40 @@ pub const Instruction = union(enum) { pub fn nop() Instruction { return Instruction{ .NoOperation = {} }; } + + // Logical (shifted register) + + pub fn @"and"(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction { + return logicalShiftedRegister(0b00, 0b0, shift, rd, rn, rm); + } + + pub fn bic(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction { + return logicalShiftedRegister(0b00, 0b1, shift, rd, rn, rm); + } + + pub fn orr(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction { + return logicalShiftedRegister(0b01, 0b0, shift, rd, rn, rm); + } + + pub fn orn(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction { + return logicalShiftedRegister(0b01, 0b1, shift, rd, rn, rm); + } + + pub fn eor(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction { + return logicalShiftedRegister(0b10, 0b0, shift, rd, rn, rm); + } + + pub fn eon(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction { + return logicalShiftedRegister(0b10, 0b1, shift, rd, rn, rm); + } + + pub fn ands(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction { + return logicalShiftedRegister(0b11, 0b0, shift, rd, rn, rm); + } + + pub fn bics(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction { + return logicalShiftedRegister(0b11, 0b1, shift, rd, rn, rm); + } }; test "" { @@ -836,15 +862,15 @@ test "serialize instructions" { }; const testcases = [_]Testcase{ - .{ // orr x0 x1 - .inst = Instruction.orr(.x0, .x1, Instruction.RegisterShift.none()), + .{ // orr x0, xzr, x1 + .inst = Instruction.orr(.x0, .xzr, .x1, Instruction.Shift.none), .expected = 0b1_01_01010_00_0_00001_000000_11111_00000, }, - .{ // orn x0 x1 - .inst = Instruction.orn(.x0, .x1, Instruction.RegisterShift.none()), + .{ // orn x0, xzr, x1 + .inst = Instruction.orn(.x0, .xzr, .x1, Instruction.Shift.none), .expected = 0b1_01_01010_00_1_00001_000000_11111_00000, }, - .{ // movz x1 #4 + .{ // movz x1, #4 .inst = Instruction.movz(.x1, 4, 0), .expected = 0b1_10_100101_00_0000000000000100_00001, }, @@ -944,6 +970,14 @@ test "serialize instructions" { .inst = Instruction.ldp(.x1, .x2, Register.sp, Instruction.LoadStorePairOffset.post_index(16)), .expected = 0b10_101_0_001_1_0000010_00010_11111_00001, }, + .{ // and x0, x4, x2 + .inst = Instruction.@"and"(.x0, .x4, .x2, .{}), + .expected = 0b1_00_01010_00_0_00010_000000_00100_00000, + }, + .{ // and x0, x4, x2, lsl #0x8 + .inst = Instruction.@"and"(.x0, .x4, .x2, .{ .shift = .lsl, .amount = 0x8 }), + .expected = 0b1_00_01010_00_0_00010_001000_00100_00000, + }, }; for (testcases) |case| { -- cgit v1.2.3 From 1c13ca5a05978011283ff55a586443b10b69fc85 Mon Sep 17 00:00:00 2001 From: LemonBoy Date: Thu, 26 Nov 2020 13:19:30 +0100 Subject: stage2: Use {s} instead of {} when formatting strings --- lib/std/meta/trait.zig | 14 +++ src/Cache.zig | 4 +- src/Compilation.zig | 94 +++++++++--------- src/DepTokenizer.zig | 6 +- src/Module.zig | 16 ++-- src/astgen.zig | 8 +- src/codegen.zig | 38 ++++---- src/codegen/c.zig | 15 +-- src/codegen/llvm.zig | 125 ++++++++++++++++++++++++ src/glibc.zig | 26 ++--- src/libc_installation.zig | 32 +++---- src/link.zig | 8 +- src/link/C.zig | 5 +- src/link/Coff.zig | 10 +- src/link/Elf.zig | 18 ++-- src/link/MachO.zig | 22 ++--- src/link/Wasm.zig | 6 +- src/main.zig | 236 +++++++++++++++++++++++----------------------- src/mingw.zig | 4 +- src/musl.zig | 8 +- src/print_env.zig | 2 +- src/print_targets.zig | 4 +- src/stage1.zig | 4 +- src/translate_c.zig | 76 +++++++-------- src/value.zig | 4 +- src/zir.zig | 44 ++++----- src/zir_sema.zig | 34 +++---- 27 files changed, 503 insertions(+), 360 deletions(-) create mode 100644 src/codegen/llvm.zig (limited to 'src/codegen.zig') diff --git a/lib/std/meta/trait.zig b/lib/std/meta/trait.zig index 8c8b26cf45..8e54293533 100644 --- a/lib/std/meta/trait.zig +++ b/lib/std/meta/trait.zig @@ -298,6 +298,20 @@ pub fn isNumber(comptime T: type) bool { }; } +pub fn isIntegerNumber(comptime T: type) bool { + return switch (@typeInfo(T)) { + .Int, .ComptimeInt => true, + else => false, + }; +} + +pub fn isFloatingNumber(comptime T: type) bool { + return switch (@typeInfo(T)) { + .Float, .ComptimeFloat => true, + else => false, + }; +} + test "std.meta.trait.isNumber" { const NotANumber = struct { number: u8, diff --git a/src/Cache.zig b/src/Cache.zig index 3d33226f27..03a0d61157 100644 --- a/src/Cache.zig +++ b/src/Cache.zig @@ -549,7 +549,7 @@ pub const Manifest = struct { .target, .target_must_resolve, .prereq => {}, else => |err| { try err.printError(error_buf.writer()); - std.log.err("failed parsing {}: {}", .{ dep_file_basename, error_buf.items }); + std.log.err("failed parsing {s}: {s}", .{ dep_file_basename, error_buf.items }); return error.InvalidDepFile; }, } @@ -561,7 +561,7 @@ pub const Manifest = struct { .prereq => |bytes| try self.addFilePost(bytes), else => |err| { try err.printError(error_buf.writer()); - std.log.err("failed parsing {}: {}", .{ dep_file_basename, error_buf.items }); + std.log.err("failed parsing {s}: {s}", .{ dep_file_basename, error_buf.items }); return error.InvalidDepFile; }, } diff --git a/src/Compilation.zig b/src/Compilation.zig index 33376d46d2..3a9c1ea993 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1475,7 +1475,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor // lifetime annotations in the ZIR. var decl_arena = decl.typed_value.most_recent.arena.?.promote(module.gpa); defer decl.typed_value.most_recent.arena.?.* = decl_arena.state; - log.debug("analyze liveness of {}\n", .{decl.name}); + log.debug("analyze liveness of {s}\n", .{decl.name}); try liveness.analyze(module.gpa, &decl_arena.allocator, func.analysis.success); } @@ -1492,7 +1492,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor module.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( module.gpa, decl.src(), - "unable to codegen: {}", + "unable to codegen: {s}", .{@errorName(err)}, )); decl.analysis = .codegen_failure_retryable; @@ -1535,7 +1535,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor module.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( module.gpa, decl.src(), - "unable to update line number: {}", + "unable to update line number: {s}", .{@errorName(err)}, )); decl.analysis = .codegen_failure_retryable; @@ -1544,50 +1544,50 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor .glibc_crt_file => |crt_file| { glibc.buildCRTFile(self, crt_file) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. - fatal("unable to build glibc CRT file: {}", .{@errorName(err)}); + fatal("unable to build glibc CRT file: {s}", .{@errorName(err)}); }; }, .glibc_shared_objects => { glibc.buildSharedObjects(self) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. - fatal("unable to build glibc shared objects: {}", .{@errorName(err)}); + fatal("unable to build glibc shared objects: {s}", .{@errorName(err)}); }; }, .musl_crt_file => |crt_file| { musl.buildCRTFile(self, crt_file) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. - fatal("unable to build musl CRT file: {}", .{@errorName(err)}); + fatal("unable to build musl CRT file: {s}", .{@errorName(err)}); }; }, .mingw_crt_file => |crt_file| { mingw.buildCRTFile(self, crt_file) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. - fatal("unable to build mingw-w64 CRT file: {}", .{@errorName(err)}); + fatal("unable to build mingw-w64 CRT file: {s}", .{@errorName(err)}); }; }, .windows_import_lib => |index| { const link_lib = self.bin_file.options.system_libs.items()[index].key; mingw.buildImportLib(self, link_lib) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. - fatal("unable to generate DLL import .lib file: {}", .{@errorName(err)}); + fatal("unable to generate DLL import .lib file: {s}", .{@errorName(err)}); }; }, .libunwind => { libunwind.buildStaticLib(self) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. - fatal("unable to build libunwind: {}", .{@errorName(err)}); + fatal("unable to build libunwind: {s}", .{@errorName(err)}); }; }, .libcxx => { libcxx.buildLibCXX(self) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. - fatal("unable to build libcxx: {}", .{@errorName(err)}); + fatal("unable to build libcxx: {s}", .{@errorName(err)}); }; }, .libcxxabi => { libcxx.buildLibCXXABI(self) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. - fatal("unable to build libcxxabi: {}", .{@errorName(err)}); + fatal("unable to build libcxxabi: {s}", .{@errorName(err)}); }; }, .libtsan => { @@ -1611,20 +1611,20 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor .libssp => { self.buildOutputFromZig("ssp.zig", .Lib, &self.libssp_static_lib) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. - fatal("unable to build libssp: {}", .{@errorName(err)}); + fatal("unable to build libssp: {s}", .{@errorName(err)}); }; }, .zig_libc => { self.buildOutputFromZig("c.zig", .Lib, &self.libc_static_lib) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. - fatal("unable to build zig's multitarget libc: {}", .{@errorName(err)}); + fatal("unable to build zig's multitarget libc: {s}", .{@errorName(err)}); }; }, .generate_builtin_zig => { // This Job is only queued up if there is a zig module. self.updateBuiltinZigFile(self.bin_file.options.module.?) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. - fatal("unable to update builtin.zig file: {}", .{@errorName(err)}); + fatal("unable to update builtin.zig file: {s}", .{@errorName(err)}); }; }, .stage1_module => { @@ -1704,11 +1704,11 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult { const out_h_path = try comp.local_cache_directory.join(arena, &[_][]const u8{ tmp_dir_sub_path, cimport_basename, }); - const out_dep_path = try std.fmt.allocPrint(arena, "{}.d", .{out_h_path}); + const out_dep_path = try std.fmt.allocPrint(arena, "{s}.d", .{out_h_path}); try zig_cache_tmp_dir.writeFile(cimport_basename, c_src); if (comp.verbose_cimport) { - log.info("C import source: {}", .{out_h_path}); + log.info("C import source: {s}", .{out_h_path}); } var argv = std.ArrayList([]const u8).init(comp.gpa); @@ -1755,7 +1755,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult { defer tree.deinit(); if (comp.verbose_cimport) { - log.info("C import .d file: {}", .{out_dep_path}); + log.info("C import .d file: {s}", .{out_dep_path}); } const dep_basename = std.fs.path.basename(out_dep_path); @@ -1775,7 +1775,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult { try bos.flush(); man.writeManifest() catch |err| { - log.warn("failed to write cache manifest for C import: {}", .{@errorName(err)}); + log.warn("failed to write cache manifest for C import: {s}", .{@errorName(err)}); }; break :digest digest; @@ -1785,7 +1785,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult { "o", &digest, cimport_zig_basename, }); if (comp.verbose_cimport) { - log.info("C import output: {}\n", .{out_zig_path}); + log.info("C import output: {s}\n", .{out_zig_path}); } return CImportResult{ .out_zig_path = out_zig_path, @@ -1946,7 +1946,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_comp_progress_node: * child.stderr_behavior = .Inherit; const term = child.spawnAndWait() catch |err| { - return comp.failCObj(c_object, "unable to spawn {}: {}", .{ argv.items[0], @errorName(err) }); + return comp.failCObj(c_object, "unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) }); }; switch (term) { .Exited => |code| { @@ -1974,7 +1974,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_comp_progress_node: * const stderr = try stderr_reader.readAllAlloc(arena, 10 * 1024 * 1024); const term = child.wait() catch |err| { - return comp.failCObj(c_object, "unable to spawn {}: {}", .{ argv.items[0], @errorName(err) }); + return comp.failCObj(c_object, "unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) }); }; switch (term) { @@ -1982,12 +1982,12 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_comp_progress_node: * if (code != 0) { // TODO parse clang stderr and turn it into an error message // and then call failCObjWithOwnedErrorMsg - log.err("clang failed with stderr: {}", .{stderr}); + log.err("clang failed with stderr: {s}", .{stderr}); return comp.failCObj(c_object, "clang exited with code {}", .{code}); } }, else => { - log.err("clang terminated with stderr: {}", .{stderr}); + log.err("clang terminated with stderr: {s}", .{stderr}); return comp.failCObj(c_object, "clang terminated unexpectedly", .{}); }, } @@ -1999,7 +1999,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_comp_progress_node: * try man.addDepFilePost(zig_cache_tmp_dir, dep_basename); // Just to save disk space, we delete the file because it is never needed again. zig_cache_tmp_dir.deleteFile(dep_basename) catch |err| { - log.warn("failed to delete '{}': {}", .{ dep_file_path, @errorName(err) }); + log.warn("failed to delete '{s}': {s}", .{ dep_file_path, @errorName(err) }); }; } @@ -2015,7 +2015,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_comp_progress_node: * try std.fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, o_basename); man.writeManifest() catch |err| { - log.warn("failed to write cache manifest when compiling '{}': {}", .{ c_object.src.src_path, @errorName(err) }); + log.warn("failed to write cache manifest when compiling '{s}': {s}", .{ c_object.src.src_path, @errorName(err) }); }; break :blk digest; }; @@ -2034,7 +2034,7 @@ pub fn tmpFilePath(comp: *Compilation, arena: *Allocator, suffix: []const u8) er const s = std.fs.path.sep_str; const rand_int = std.crypto.random.int(u64); if (comp.local_cache_directory.path) |p| { - return std.fmt.allocPrint(arena, "{}" ++ s ++ "tmp" ++ s ++ "{x}-{s}", .{ p, rand_int, suffix }); + return std.fmt.allocPrint(arena, "{s}" ++ s ++ "tmp" ++ s ++ "{x}-{s}", .{ p, rand_int, suffix }); } else { return std.fmt.allocPrint(arena, "tmp" ++ s ++ "{x}-{s}", .{ rand_int, suffix }); } @@ -2144,7 +2144,7 @@ pub fn addCCArgs( } const mcmodel = comp.bin_file.options.machine_code_model; if (mcmodel != .default) { - try argv.append(try std.fmt.allocPrint(arena, "-mcmodel={}", .{@tagName(mcmodel)})); + try argv.append(try std.fmt.allocPrint(arena, "-mcmodel={s}", .{@tagName(mcmodel)})); } switch (target.os.tag) { @@ -2497,22 +2497,22 @@ fn detectLibCIncludeDirs( const s = std.fs.path.sep_str; const arch_include_dir = try std.fmt.allocPrint( arena, - "{}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{}-{}-{}", + "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-{s}-{s}", .{ zig_lib_dir, arch_name, os_name, abi_name }, ); const generic_include_dir = try std.fmt.allocPrint( arena, - "{}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "generic-{}", + "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "generic-{s}", .{ zig_lib_dir, generic_name }, ); const arch_os_include_dir = try std.fmt.allocPrint( arena, - "{}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{}-{}-any", + "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-{s}-any", .{ zig_lib_dir, @tagName(target.cpu.arch), os_name }, ); const generic_os_include_dir = try std.fmt.allocPrint( arena, - "{}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "any-{}-any", + "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "any-{s}-any", .{ zig_lib_dir, os_name }, ); @@ -2631,9 +2631,9 @@ fn updateBuiltinZigFile(comp: *Compilation, mod: *Module) !void { pub fn dump_argv(argv: []const []const u8) void { for (argv[0 .. argv.len - 1]) |arg| { - std.debug.print("{} ", .{arg}); + std.debug.print("{s} ", .{arg}); } - std.debug.print("{}\n", .{argv[argv.len - 1]}); + std.debug.print("{s}\n", .{argv[argv.len - 1]}); } pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) ![]u8 { @@ -2653,15 +2653,15 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) ![]u8 \\pub const arch = Target.current.cpu.arch; \\/// Deprecated \\pub const endian = Target.current.cpu.arch.endian(); - \\pub const output_mode = OutputMode.{}; - \\pub const link_mode = LinkMode.{}; + \\pub const output_mode = OutputMode.{s}; + \\pub const link_mode = LinkMode.{s}; \\pub const is_test = {}; \\pub const single_threaded = {}; - \\pub const abi = Abi.{}; + \\pub const abi = Abi.{s}; \\pub const cpu: Cpu = Cpu{{ - \\ .arch = .{}, - \\ .model = &Target.{}.cpu.{}, - \\ .features = Target.{}.featureSet(&[_]Target.{}.Feature{{ + \\ .arch = .{s}, + \\ .model = &Target.{s}.cpu.{s}, + \\ .features = Target.{s}.featureSet(&[_]Target.{s}.Feature{{ \\ , .{ @tagName(comp.bin_file.options.output_mode), @@ -2692,7 +2692,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) ![]u8 \\ }}), \\}}; \\pub const os = Os{{ - \\ .tag = .{}, + \\ .tag = .{s}, \\ .version_range = .{{ , .{@tagName(target.os.tag)}, @@ -2778,8 +2778,8 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) ![]u8 (comp.bin_file.options.skip_linker_dependencies and comp.bin_file.options.parent_compilation_link_libc); try buffer.writer().print( - \\pub const object_format = ObjectFormat.{}; - \\pub const mode = Mode.{}; + \\pub const object_format = ObjectFormat.{s}; + \\pub const mode = Mode.{s}; \\pub const link_libc = {}; \\pub const link_libcpp = {}; \\pub const have_error_return_tracing = {}; @@ -2787,7 +2787,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) ![]u8 \\pub const position_independent_code = {}; \\pub const position_independent_executable = {}; \\pub const strip_debug_info = {}; - \\pub const code_model = CodeModel.{}; + \\pub const code_model = CodeModel.{s}; \\ , .{ @tagName(comp.bin_file.options.object_format), @@ -3013,7 +3013,7 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node id_symlink_basename, &prev_digest_buf, ) catch |err| blk: { - log.debug("stage1 {} new_digest={} error: {}", .{ mod.root_pkg.root_src_path, digest, @errorName(err) }); + log.debug("stage1 {} new_digest={} error: {s}", .{ mod.root_pkg.root_src_path, digest, @errorName(err) }); // Handle this as a cache miss. break :blk prev_digest_buf[0..0]; }; @@ -3189,7 +3189,7 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node // Update the small file with the digest. If it fails we can continue; it only // means that the next invocation will have an unnecessary cache miss. const stage1_flags_byte = @bitCast(u8, mod.stage1_flags); - log.debug("stage1 {} final digest={} flags={x}", .{ + log.debug("stage1 {s} final digest={} flags={x}", .{ mod.root_pkg.root_src_path, digest, stage1_flags_byte, }); var digest_plus_flags: [digest.len + 2]u8 = undefined; @@ -3202,11 +3202,11 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node digest_plus_flags, stage1_flags_byte, mod.stage1_flags.have_winmain_crt_startup, }); Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest_plus_flags) catch |err| { - log.warn("failed to save stage1 hash digest file: {}", .{@errorName(err)}); + log.warn("failed to save stage1 hash digest file: {s}", .{@errorName(err)}); }; // Failure here only means an unnecessary cache miss. man.writeManifest() catch |err| { - log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)}); + log.warn("failed to write cache manifest when linking: {s}", .{@errorName(err)}); }; // We hang on to this lock so that the output file path can be used without // other processes clobbering it. diff --git a/src/DepTokenizer.zig b/src/DepTokenizer.zig index 99db6e4b3c..c42583a786 100644 --- a/src/DepTokenizer.zig +++ b/src/DepTokenizer.zig @@ -366,7 +366,7 @@ pub const Token = union(enum) { .incomplete_quoted_prerequisite, .incomplete_target, => |index_and_bytes| { - try writer.print("{} '", .{self.errStr()}); + try writer.print("{s} '", .{self.errStr()}); if (self == .incomplete_target) { const tmp = Token{ .target_must_resolve = index_and_bytes.bytes }; try tmp.resolve(writer); @@ -383,7 +383,7 @@ pub const Token = union(enum) { => |index_and_char| { try writer.writeAll("illegal char "); try printUnderstandableChar(writer, index_and_char.char); - try writer.print(" at position {}: {}", .{ index_and_char.index, self.errStr() }); + try writer.print(" at position {}: {s}", .{ index_and_char.index, self.errStr() }); }, } } @@ -943,7 +943,7 @@ fn printSection(out: anytype, label: []const u8, bytes: []const u8) !void { fn printLabel(out: anytype, label: []const u8, bytes: []const u8) !void { var buf: [80]u8 = undefined; - var text = try std.fmt.bufPrint(buf[0..], "{} {} bytes ", .{ label, bytes.len }); + var text = try std.fmt.bufPrint(buf[0..], "{s} {} bytes ", .{ label, bytes.len }); try out.writeAll(text); var i: usize = text.len; const end = 79; diff --git a/src/Module.zig b/src/Module.zig index 5ea78d06d1..e0042d6c4d 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -953,7 +953,7 @@ pub fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void { self.failed_decls.putAssumeCapacityNoClobber(decl, try Compilation.ErrorMsg.create( self.gpa, decl.src(), - "unable to analyze: {}", + "unable to analyze: {s}", .{@errorName(err)}, )); decl.analysis = .sema_failure_retryable; @@ -1475,7 +1475,7 @@ fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module { if (zir_module.error_msg) |src_err_msg| { self.failed_files.putAssumeCapacityNoClobber( &root_scope.base, - try Compilation.ErrorMsg.create(self.gpa, src_err_msg.byte_offset, "{}", .{src_err_msg.msg}), + try Compilation.ErrorMsg.create(self.gpa, src_err_msg.byte_offset, "{s}", .{src_err_msg.msg}), ); root_scope.status = .unloaded_parse_failure; return error.AnalysisFail; @@ -1581,7 +1581,7 @@ pub fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void decl.src_index = decl_i; if (deleted_decls.remove(decl) == null) { decl.analysis = .sema_failure; - const err_msg = try Compilation.ErrorMsg.create(self.gpa, tree.token_locs[name_tok].start, "redefinition of '{}'", .{decl.name}); + const err_msg = try Compilation.ErrorMsg.create(self.gpa, tree.token_locs[name_tok].start, "redefinition of '{s}'", .{decl.name}); errdefer err_msg.destroy(self.gpa); try self.failed_decls.putNoClobber(self.gpa, decl, err_msg); } else { @@ -1623,7 +1623,7 @@ pub fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void decl.src_index = decl_i; if (deleted_decls.remove(decl) == null) { decl.analysis = .sema_failure; - const err_msg = try Compilation.ErrorMsg.create(self.gpa, name_loc.start, "redefinition of '{}'", .{decl.name}); + const err_msg = try Compilation.ErrorMsg.create(self.gpa, name_loc.start, "redefinition of '{s}'", .{decl.name}); errdefer err_msg.destroy(self.gpa); try self.failed_decls.putNoClobber(self.gpa, decl, err_msg); } else if (!srcHashEql(decl.contents_hash, contents_hash)) { @@ -1991,7 +1991,7 @@ pub fn analyzeExport( self.failed_exports.putAssumeCapacityNoClobber(new_export, try Compilation.ErrorMsg.create( self.gpa, src, - "exported symbol collision: {}", + "exported symbol collision: {s}", .{symbol_name}, )); // TODO: add a note @@ -2007,7 +2007,7 @@ pub fn analyzeExport( self.failed_exports.putAssumeCapacityNoClobber(new_export, try Compilation.ErrorMsg.create( self.gpa, src, - "unable to export: {}", + "unable to export: {s}", .{@errorName(err)}, )); new_export.status = .failed_retryable; @@ -2277,7 +2277,7 @@ pub fn createAnonymousDecl( ) !*Decl { const name_index = self.getNextAnonNameIndex(); const scope_decl = scope.decl().?; - const name = try std.fmt.allocPrint(self.gpa, "{}__anon_{}", .{ scope_decl.name, name_index }); + const name = try std.fmt.allocPrint(self.gpa, "{s}__anon_{}", .{ scope_decl.name, name_index }); defer self.gpa.free(name); const name_hash = scope.namespace().fullyQualifiedNameHash(name); const src_hash: std.zig.SrcHash = undefined; @@ -2384,7 +2384,7 @@ pub fn analyzeDeref(self: *Module, scope: *Scope, src: usize, ptr: *Inst, ptr_sr pub fn analyzeDeclRefByName(self: *Module, scope: *Scope, src: usize, decl_name: []const u8) InnerError!*Inst { const decl = self.lookupDeclName(scope, decl_name) orelse - return self.fail(scope, src, "decl '{}' not found", .{decl_name}); + return self.fail(scope, src, "decl '{s}' not found", .{decl_name}); return self.analyzeDeclRef(scope, src, decl); } diff --git a/src/astgen.zig b/src/astgen.zig index 3670cb260e..b4e1760368 100644 --- a/src/astgen.zig +++ b/src/astgen.zig @@ -1955,7 +1955,7 @@ fn identifier(mod: *Module, scope: *Scope, rl: ResultLoc, ident: *ast.Node.OneTo error.Overflow => return mod.failNode( scope, &ident.base, - "primitive integer type '{}' exceeds maximum bit width of 65535", + "primitive integer type '{s}' exceeds maximum bit width of 65535", .{ident_name}, ), error.InvalidCharacter => break :integer, @@ -2010,7 +2010,7 @@ fn identifier(mod: *Module, scope: *Scope, rl: ResultLoc, ident: *ast.Node.OneTo return rlWrapPtr(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.DeclValInModule, .{ .decl = decl }, .{})); } - return mod.failNode(scope, &ident.base, "use of undeclared identifier '{}'", .{ident_name}); + return mod.failNode(scope, &ident.base, "use of undeclared identifier '{s}'", .{ident_name}); } fn stringLiteral(mod: *Module, scope: *Scope, str_lit: *ast.Node.OneToken) InnerError!*zir.Inst { @@ -2204,7 +2204,7 @@ fn ensureBuiltinParamCount(mod: *Module, scope: *Scope, call: *ast.Node.BuiltinC return; const s = if (count == 1) "" else "s"; - return mod.failTok(scope, call.builtin_token, "expected {} parameter{}, found {}", .{ count, s, call.params_len }); + return mod.failTok(scope, call.builtin_token, "expected {} parameter{s}, found {}", .{ count, s, call.params_len }); } fn simpleCast( @@ -2383,7 +2383,7 @@ fn builtinCall(mod: *Module, scope: *Scope, rl: ResultLoc, call: *ast.Node.Built } else if (mem.eql(u8, builtin_name, "@compileError")) { return compileError(mod, scope, call); } else { - return mod.failTok(scope, call.builtin_token, "invalid builtin function: '{}'", .{builtin_name}); + return mod.failTok(scope, call.builtin_token, "invalid builtin function: '{s}'", .{builtin_name}); } } diff --git a/src/codegen.zig b/src/codegen.zig index 9e6de711d4..6530b687e5 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -228,7 +228,7 @@ pub fn generateSymbol( .fail = try ErrorMsg.create( bin_file.allocator, src, - "TODO implement generateSymbol for type '{}'", + "TODO implement generateSymbol for type '{s}'", .{@tagName(t)}, ), }; @@ -2029,7 +2029,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }); break :blk 0x84; }, - else => return self.fail(inst.base.src, "TODO implement condbr {} when condition is {}", .{ self.target.cpu.arch, @tagName(cond) }), + else => return self.fail(inst.base.src, "TODO implement condbr {s} when condition is {s}", .{ self.target.cpu.arch, @tagName(cond) }), }; self.code.appendSliceAssumeCapacity(&[_]u8{ 0x0f, opcode }); const reloc = Reloc{ .rel32 = self.code.items.len }; @@ -2376,11 +2376,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .arm, .armeb => { for (inst.inputs) |input, i| { if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm input constraint: '{}'", .{input}); + return self.fail(inst.base.src, "unrecognized asm input constraint: '{s}'", .{input}); } const reg_name = input[1 .. input.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{}'", .{reg_name}); + return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); const arg = try self.resolveInst(inst.args[i]); try self.genSetReg(inst.base.src, reg, arg); } @@ -2393,11 +2393,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (inst.output) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm output constraint: '{}'", .{output}); + return self.fail(inst.base.src, "unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{}'", .{reg_name}); + return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); return MCValue{ .register = reg }; } else { return MCValue.none; @@ -2406,11 +2406,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .aarch64 => { for (inst.inputs) |input, i| { if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm input constraint: '{}'", .{input}); + return self.fail(inst.base.src, "unrecognized asm input constraint: '{s}'", .{input}); } const reg_name = input[1 .. input.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{}'", .{reg_name}); + return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); const arg = try self.resolveInst(inst.args[i]); try self.genSetReg(inst.base.src, reg, arg); } @@ -2425,11 +2425,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (inst.output) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm output constraint: '{}'", .{output}); + return self.fail(inst.base.src, "unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{}'", .{reg_name}); + return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); return MCValue{ .register = reg }; } else { return MCValue.none; @@ -2438,11 +2438,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .riscv64 => { for (inst.inputs) |input, i| { if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm input constraint: '{}'", .{input}); + return self.fail(inst.base.src, "unrecognized asm input constraint: '{s}'", .{input}); } const reg_name = input[1 .. input.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{}'", .{reg_name}); + return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); const arg = try self.resolveInst(inst.args[i]); try self.genSetReg(inst.base.src, reg, arg); } @@ -2455,11 +2455,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (inst.output) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm output constraint: '{}'", .{output}); + return self.fail(inst.base.src, "unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{}'", .{reg_name}); + return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); return MCValue{ .register = reg }; } else { return MCValue.none; @@ -2468,11 +2468,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .x86_64, .i386 => { for (inst.inputs) |input, i| { if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm input constraint: '{}'", .{input}); + return self.fail(inst.base.src, "unrecognized asm input constraint: '{s}'", .{input}); } const reg_name = input[1 .. input.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{}'", .{reg_name}); + return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); const arg = try self.resolveInst(inst.args[i]); try self.genSetReg(inst.base.src, reg, arg); } @@ -2485,11 +2485,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (inst.output) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm output constraint: '{}'", .{output}); + return self.fail(inst.base.src, "unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{}'", .{reg_name}); + return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); return MCValue{ .register = reg }; } else { return MCValue.none; @@ -3417,7 +3417,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { next_int_reg += 1; } }, - else => return self.fail(src, "TODO implement function parameters of type {}", .{@tagName(ty.zigTypeTag())}), + else => return self.fail(src, "TODO implement function parameters of type {s}", .{@tagName(ty.zigTypeTag())}), } } result.stack_byte_count = next_stack_offset; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index c6c29942d9..b97e1590e3 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -235,7 +235,7 @@ fn renderFunctionSignature( try writer.writeAll(", "); } try renderType(ctx, writer, tv.ty.fnParamType(index)); - try writer.print(" arg{}", .{index}); + try writer.print(" arg{d}", .{index}); } } try writer.writeByte(')'); @@ -481,8 +481,9 @@ fn genBinOp(ctx: *Context, file: *C, inst: *Inst.BinOp, operator: []const u8) !? const rhs = try ctx.resolveInst(inst.rhs); const writer = file.main.writer(); const name = try ctx.name(); - try renderTypeAndName(ctx, writer, inst.base.ty, name, .Const); - try writer.print(" = {s} {s} {s};\n", .{ lhs, operator, rhs }); + try writer.writeAll(indentation ++ "const "); + try renderType(ctx, writer, inst.base.ty); + try writer.print(" {s} = {s} " ++ operator ++ " {s};\n", .{ name, lhs, rhs }); return name; } @@ -587,7 +588,7 @@ fn genAsm(ctx: *Context, file: *C, as: *Inst.Assembly) !?[]u8 { const arg = as.args[index]; try writer.writeAll("register "); try renderType(ctx, writer, arg.ty); - try writer.print(" {}_constant __asm__(\"{}\") = ", .{ reg, reg }); + try writer.print(" {s}_constant __asm__(\"{s}\") = ", .{ reg, reg }); // TODO merge constant handling into inst_map as well if (arg.castTag(.constant)) |c| { try renderValue(ctx, writer, arg.ty, c.val); @@ -597,13 +598,13 @@ fn genAsm(ctx: *Context, file: *C, as: *Inst.Assembly) !?[]u8 { if (!gop.found_existing) { return ctx.fail(ctx.decl.src(), "Internal error in C backend: asm argument not found in inst_map", .{}); } - try writer.print("{};\n ", .{gop.entry.value}); + try writer.print("{s};\n ", .{gop.entry.value}); } } else { return ctx.fail(ctx.decl.src(), "TODO non-explicit inline asm regs", .{}); } } - try writer.print("__asm {} (\"{}\"", .{ if (as.is_volatile) @as([]const u8, "volatile") else "", as.asm_source }); + try writer.print("__asm {s} (\"{s}\"", .{ if (as.is_volatile) @as([]const u8, "volatile") else "", as.asm_source }); if (as.output) |o| { return ctx.fail(ctx.decl.src(), "TODO inline asm output", .{}); } @@ -619,7 +620,7 @@ fn genAsm(ctx: *Context, file: *C, as: *Inst.Assembly) !?[]u8 { if (index > 0) { try writer.writeAll(", "); } - try writer.print("\"\"({}_constant)", .{reg}); + try writer.print("\"\"({s}_constant)", .{reg}); } else { // This is blocked by the earlier test unreachable; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig new file mode 100644 index 0000000000..3a1ebada3b --- /dev/null +++ b/src/codegen/llvm.zig @@ -0,0 +1,125 @@ +const std = @import("std"); +const Allocator = std.mem.Allocator; + +pub fn targetTriple(allocator: *Allocator, target: std.Target) ![]u8 { + const llvm_arch = switch (target.cpu.arch) { + .arm => "arm", + .armeb => "armeb", + .aarch64 => "aarch64", + .aarch64_be => "aarch64_be", + .aarch64_32 => "aarch64_32", + .arc => "arc", + .avr => "avr", + .bpfel => "bpfel", + .bpfeb => "bpfeb", + .hexagon => "hexagon", + .mips => "mips", + .mipsel => "mipsel", + .mips64 => "mips64", + .mips64el => "mips64el", + .msp430 => "msp430", + .powerpc => "powerpc", + .powerpc64 => "powerpc64", + .powerpc64le => "powerpc64le", + .r600 => "r600", + .amdgcn => "amdgcn", + .riscv32 => "riscv32", + .riscv64 => "riscv64", + .sparc => "sparc", + .sparcv9 => "sparcv9", + .sparcel => "sparcel", + .s390x => "s390x", + .tce => "tce", + .tcele => "tcele", + .thumb => "thumb", + .thumbeb => "thumbeb", + .i386 => "i386", + .x86_64 => "x86_64", + .xcore => "xcore", + .nvptx => "nvptx", + .nvptx64 => "nvptx64", + .le32 => "le32", + .le64 => "le64", + .amdil => "amdil", + .amdil64 => "amdil64", + .hsail => "hsail", + .hsail64 => "hsail64", + .spir => "spir", + .spir64 => "spir64", + .kalimba => "kalimba", + .shave => "shave", + .lanai => "lanai", + .wasm32 => "wasm32", + .wasm64 => "wasm64", + .renderscript32 => "renderscript32", + .renderscript64 => "renderscript64", + .ve => "ve", + .spu_2 => return error.LLVMBackendDoesNotSupportSPUMarkII, + }; + // TODO Add a sub-arch for some architectures depending on CPU features. + + const llvm_os = switch (target.os.tag) { + .freestanding => "unknown", + .ananas => "ananas", + .cloudabi => "cloudabi", + .dragonfly => "dragonfly", + .freebsd => "freebsd", + .fuchsia => "fuchsia", + .ios => "ios", + .kfreebsd => "kfreebsd", + .linux => "linux", + .lv2 => "lv2", + .macos => "macosx", + .netbsd => "netbsd", + .openbsd => "openbsd", + .solaris => "solaris", + .windows => "windows", + .haiku => "haiku", + .minix => "minix", + .rtems => "rtems", + .nacl => "nacl", + .cnk => "cnk", + .aix => "aix", + .cuda => "cuda", + .nvcl => "nvcl", + .amdhsa => "amdhsa", + .ps4 => "ps4", + .elfiamcu => "elfiamcu", + .tvos => "tvos", + .watchos => "watchos", + .mesa3d => "mesa3d", + .contiki => "contiki", + .amdpal => "amdpal", + .hermit => "hermit", + .hurd => "hurd", + .wasi => "wasi", + .emscripten => "emscripten", + .uefi => "windows", + .other => "unknown", + }; + + const llvm_abi = switch (target.abi) { + .none => "unknown", + .gnu => "gnu", + .gnuabin32 => "gnuabin32", + .gnuabi64 => "gnuabi64", + .gnueabi => "gnueabi", + .gnueabihf => "gnueabihf", + .gnux32 => "gnux32", + .code16 => "code16", + .eabi => "eabi", + .eabihf => "eabihf", + .android => "android", + .musl => "musl", + .musleabi => "musleabi", + .musleabihf => "musleabihf", + .msvc => "msvc", + .itanium => "itanium", + .cygnus => "cygnus", + .coreclr => "coreclr", + .simulator => "simulator", + .macabi => "macabi", + }; + + return std.fmt.allocPrint(allocator, "{s}-unknown-{s}-{s}", .{ llvm_arch, llvm_os, llvm_abi }); +} diff --git a/src/glibc.zig b/src/glibc.zig index 0c27872720..f69dd11ada 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -72,7 +72,7 @@ pub fn loadMetaData(gpa: *Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError! errdefer version_table.deinit(gpa); var glibc_dir = zig_lib_dir.openDir("libc" ++ path.sep_str ++ "glibc", .{}) catch |err| { - std.log.err("unable to open glibc dir: {}", .{@errorName(err)}); + std.log.err("unable to open glibc dir: {s}", .{@errorName(err)}); return error.ZigInstallationCorrupt; }; defer glibc_dir.close(); @@ -81,7 +81,7 @@ pub fn loadMetaData(gpa: *Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError! const vers_txt_contents = glibc_dir.readFileAlloc(gpa, "vers.txt", max_txt_size) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => { - std.log.err("unable to read vers.txt: {}", .{@errorName(err)}); + std.log.err("unable to read vers.txt: {s}", .{@errorName(err)}); return error.ZigInstallationCorrupt; }, }; @@ -91,7 +91,7 @@ pub fn loadMetaData(gpa: *Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError! const fns_txt_contents = glibc_dir.readFileAlloc(arena, "fns.txt", max_txt_size) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => { - std.log.err("unable to read fns.txt: {}", .{@errorName(err)}); + std.log.err("unable to read fns.txt: {s}", .{@errorName(err)}); return error.ZigInstallationCorrupt; }, }; @@ -99,7 +99,7 @@ pub fn loadMetaData(gpa: *Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError! const abi_txt_contents = glibc_dir.readFileAlloc(gpa, "abi.txt", max_txt_size) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => { - std.log.err("unable to read abi.txt: {}", .{@errorName(err)}); + std.log.err("unable to read abi.txt: {s}", .{@errorName(err)}); return error.ZigInstallationCorrupt; }, }; @@ -116,7 +116,7 @@ pub fn loadMetaData(gpa: *Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError! } const adjusted_line = line[prefix.len..]; const ver = std.builtin.Version.parse(adjusted_line) catch |err| { - std.log.err("vers.txt:{}: unable to parse glibc version '{}': {}", .{ line_i, line, @errorName(err) }); + std.log.err("vers.txt:{}: unable to parse glibc version '{s}': {s}", .{ line_i, line, @errorName(err) }); return error.ZigInstallationCorrupt; }; try all_versions.append(arena, ver); @@ -136,7 +136,7 @@ pub fn loadMetaData(gpa: *Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError! return error.ZigInstallationCorrupt; }; const lib = findLib(lib_name) orelse { - std.log.err("fns.txt:{}: unknown library name: {}", .{ line_i, lib_name }); + std.log.err("fns.txt:{}: unknown library name: {s}", .{ line_i, lib_name }); return error.ZigInstallationCorrupt; }; try all_functions.append(arena, .{ @@ -170,15 +170,15 @@ pub fn loadMetaData(gpa: *Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError! return error.ZigInstallationCorrupt; }; const arch_tag = std.meta.stringToEnum(std.Target.Cpu.Arch, arch_name) orelse { - std.log.err("abi.txt:{}: unrecognized arch: '{}'", .{ line_i, arch_name }); + std.log.err("abi.txt:{}: unrecognized arch: '{s}'", .{ line_i, arch_name }); return error.ZigInstallationCorrupt; }; if (!mem.eql(u8, os_name, "linux")) { - std.log.err("abi.txt:{}: expected OS 'linux', found '{}'", .{ line_i, os_name }); + std.log.err("abi.txt:{}: expected OS 'linux', found '{s}'", .{ line_i, os_name }); return error.ZigInstallationCorrupt; } const abi_tag = std.meta.stringToEnum(std.Target.Abi, abi_name) orelse { - std.log.err("abi.txt:{}: unrecognized ABI: '{}'", .{ line_i, abi_name }); + std.log.err("abi.txt:{}: unrecognized ABI: '{s}'", .{ line_i, abi_name }); return error.ZigInstallationCorrupt; }; @@ -211,7 +211,7 @@ pub fn loadMetaData(gpa: *Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError! } const version_index = std.fmt.parseInt(u8, version_index_string, 10) catch |err| { // If this happens with legit data, increase the size of the integer type in the struct. - std.log.err("abi.txt:{}: unable to parse version: {}", .{ line_i, @errorName(err) }); + std.log.err("abi.txt:{}: unable to parse version: {s}", .{ line_i, @errorName(err) }); return error.ZigInstallationCorrupt; }; @@ -531,7 +531,7 @@ fn add_include_dirs(comp: *Compilation, arena: *Allocator, args: *std.ArrayList( try args.append(try path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, lib_libc ++ "glibc" })); try args.append("-I"); - try args.append(try std.fmt.allocPrint(arena, "{}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{}-{}-{}", .{ + try args.append(try std.fmt.allocPrint(arena, "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-{s}-{s}", .{ comp.zig_lib_directory.path.?, @tagName(arch), @tagName(target.os.tag), @tagName(target.abi), })); @@ -539,7 +539,7 @@ fn add_include_dirs(comp: *Compilation, arena: *Allocator, args: *std.ArrayList( try args.append(try lib_path(comp, arena, lib_libc ++ "include" ++ s ++ "generic-glibc")); try args.append("-I"); - try args.append(try std.fmt.allocPrint(arena, "{}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{}-linux-any", .{ + try args.append(try std.fmt.allocPrint(arena, "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-linux-any", .{ comp.zig_lib_directory.path.?, @tagName(arch), })); @@ -881,7 +881,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void { if (o_directory.handle.createFile(ok_basename, .{})) |file| { file.close(); } else |err| { - std.log.warn("glibc shared objects: failed to mark completion: {}", .{@errorName(err)}); + std.log.warn("glibc shared objects: failed to mark completion: {s}", .{@errorName(err)}); } } diff --git a/src/libc_installation.zig b/src/libc_installation.zig index 0b1029eeb8..cc96146e0b 100644 --- a/src/libc_installation.zig +++ b/src/libc_installation.zig @@ -83,7 +83,7 @@ pub const LibCInstallation = struct { } inline for (fields) |field, i| { if (!found_keys[i].found) { - log.err("missing field: {}\n", .{field.name}); + log.err("missing field: {s}\n", .{field.name}); return error.ParseError; } } @@ -96,18 +96,18 @@ pub const LibCInstallation = struct { return error.ParseError; } if (self.crt_dir == null and !is_darwin) { - log.err("crt_dir may not be empty for {}\n", .{@tagName(Target.current.os.tag)}); + log.err("crt_dir may not be empty for {s}\n", .{@tagName(Target.current.os.tag)}); return error.ParseError; } if (self.msvc_lib_dir == null and is_windows and !is_gnu) { - log.err("msvc_lib_dir may not be empty for {}-{}\n", .{ + log.err("msvc_lib_dir may not be empty for {s}-{s}\n", .{ @tagName(Target.current.os.tag), @tagName(Target.current.abi), }); return error.ParseError; } if (self.kernel32_lib_dir == null and is_windows and !is_gnu) { - log.err("kernel32_lib_dir may not be empty for {}-{}\n", .{ + log.err("kernel32_lib_dir may not be empty for {s}-{s}\n", .{ @tagName(Target.current.os.tag), @tagName(Target.current.abi), }); @@ -128,25 +128,25 @@ pub const LibCInstallation = struct { try out.print( \\# The directory that contains `stdlib.h`. \\# On POSIX-like systems, include directories be found with: `cc -E -Wp,-v -xc /dev/null` - \\include_dir={} + \\include_dir={s} \\ \\# The system-specific include directory. May be the same as `include_dir`. \\# On Windows it's the directory that includes `vcruntime.h`. \\# On POSIX it's the directory that includes `sys/errno.h`. - \\sys_include_dir={} + \\sys_include_dir={s} \\ \\# The directory that contains `crt1.o` or `crt2.o`. \\# On POSIX, can be found with `cc -print-file-name=crt1.o`. \\# Not needed when targeting MacOS. - \\crt_dir={} + \\crt_dir={s} \\ \\# The directory that contains `vcruntime.lib`. \\# Only needed when targeting MSVC on Windows. - \\msvc_lib_dir={} + \\msvc_lib_dir={s} \\ \\# The directory that contains `kernel32.lib`. \\# Only needed when targeting MSVC on Windows. - \\kernel32_lib_dir={} + \\kernel32_lib_dir={s} \\ , .{ include_dir, @@ -338,7 +338,7 @@ pub const LibCInstallation = struct { for (searches) |search| { result_buf.shrink(0); - try result_buf.outStream().print("{}\\Include\\{}\\ucrt", .{ search.path, search.version }); + try result_buf.outStream().print("{s}\\Include\\{s}\\ucrt", .{ search.path, search.version }); var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { error.FileNotFound, @@ -384,7 +384,7 @@ pub const LibCInstallation = struct { for (searches) |search| { result_buf.shrink(0); - try result_buf.outStream().print("{}\\Lib\\{}\\ucrt\\{}", .{ search.path, search.version, arch_sub_dir }); + try result_buf.outStream().print("{s}\\Lib\\{s}\\ucrt\\{s}", .{ search.path, search.version, arch_sub_dir }); var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { error.FileNotFound, @@ -439,7 +439,7 @@ pub const LibCInstallation = struct { for (searches) |search| { result_buf.shrink(0); const stream = result_buf.outStream(); - try stream.print("{}\\Lib\\{}\\um\\{}", .{ search.path, search.version, arch_sub_dir }); + try stream.print("{s}\\Lib\\{s}\\um\\{s}", .{ search.path, search.version, arch_sub_dir }); var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { error.FileNotFound, @@ -520,7 +520,7 @@ fn ccPrintFileName(args: CCPrintFileNameOptions) ![:0]u8 { const allocator = args.allocator; const cc_exe = std.os.getenvZ("CC") orelse default_cc_exe; - const arg1 = try std.fmt.allocPrint(allocator, "-print-file-name={}", .{args.search_basename}); + const arg1 = try std.fmt.allocPrint(allocator, "-print-file-name={s}", .{args.search_basename}); defer allocator.free(arg1); const argv = [_][]const u8{ cc_exe, arg1 }; @@ -584,17 +584,17 @@ fn printVerboseInvocation( if (!verbose) return; if (search_basename) |s| { - std.debug.warn("Zig attempted to find the file '{}' by executing this command:\n", .{s}); + std.debug.warn("Zig attempted to find the file '{s}' by executing this command:\n", .{s}); } else { std.debug.warn("Zig attempted to find the path to native system libc headers by executing this command:\n", .{}); } for (argv) |arg, i| { if (i != 0) std.debug.warn(" ", .{}); - std.debug.warn("{}", .{arg}); + std.debug.warn("{s}", .{arg}); } std.debug.warn("\n", .{}); if (stderr) |s| { - std.debug.warn("Output:\n==========\n{}\n==========\n", .{s}); + std.debug.warn("Output:\n==========\n{s}\n==========\n", .{s}); } } diff --git a/src/link.zig b/src/link.zig index 468f23bffd..92702c7973 100644 --- a/src/link.zig +++ b/src/link.zig @@ -560,9 +560,9 @@ pub const File = struct { const full_out_path_z = try arena.dupeZ(u8, full_out_path); if (base.options.verbose_link) { - std.debug.print("ar rcs {}", .{full_out_path_z}); + std.debug.print("ar rcs {s}", .{full_out_path_z}); for (object_files.items) |arg| { - std.debug.print(" {}", .{arg}); + std.debug.print(" {s}", .{arg}); } std.debug.print("\n", .{}); } @@ -574,11 +574,11 @@ pub const File = struct { if (!base.options.disable_lld_caching) { Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| { - log.warn("failed to save archive hash digest file: {}", .{@errorName(err)}); + log.warn("failed to save archive hash digest file: {s}", .{@errorName(err)}); }; man.writeManifest() catch |err| { - log.warn("failed to write cache manifest when archiving: {}", .{@errorName(err)}); + log.warn("failed to write cache manifest when archiving: {s}", .{@errorName(err)}); }; base.lock = man.toOwnedLock(); diff --git a/src/link/C.zig b/src/link/C.zig index 1059e52115..3ac9db717f 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -111,8 +111,11 @@ pub fn flushModule(self: *C, comp: *Compilation) !void { if (self.header.buf.items.len > 0) { try writer.writeByte('\n'); } + if (self.header.items.len > 0) { + try writer.print("{s}\n", .{self.header.items}); + } if (self.constants.items.len > 0) { - try writer.print("{}\n", .{self.constants.items}); + try writer.print("{s}\n", .{self.constants.items}); } if (self.main.items.len > 1) { const last_two = self.main.items[self.main.items.len - 2 ..]; diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 096fa2cd0b..a3cb5cc4c7 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -686,7 +686,7 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { if (need_realloc) { const curr_vaddr = self.getDeclVAddr(decl); const vaddr = try self.growTextBlock(&decl.link.coff, code.len, required_alignment); - log.debug("growing {} from 0x{x} to 0x{x}\n", .{ decl.name, curr_vaddr, vaddr }); + log.debug("growing {s} from 0x{x} to 0x{x}\n", .{ decl.name, curr_vaddr, vaddr }); if (vaddr != curr_vaddr) { log.debug(" (writing new offset table entry)\n", .{}); self.offset_table.items[decl.link.coff.offset_table_index] = vaddr; @@ -697,7 +697,7 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { } } else { const vaddr = try self.allocateTextBlock(&decl.link.coff, code.len, required_alignment); - log.debug("allocated text block for {} at 0x{x} (size: {Bi})\n", .{ mem.spanZ(decl.name), vaddr, code.len }); + log.debug("allocated text block for {s} at 0x{x} (size: {Bi})\n", .{ mem.spanZ(decl.name), vaddr, code.len }); errdefer self.freeTextBlock(&decl.link.coff); self.offset_table.items[decl.link.coff.offset_table_index] = vaddr; try self.writeOffsetTableEntry(decl.link.coff.offset_table_index); @@ -880,7 +880,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void { id_symlink_basename, &prev_digest_buf, ) catch |err| blk: { - log.debug("COFF LLD new_digest={} error: {}", .{ digest, @errorName(err) }); + log.debug("COFF LLD new_digest={} error: {s}", .{ digest, @errorName(err) }); // Handle this as a cache miss. break :blk prev_digest_buf[0..0]; }; @@ -1236,11 +1236,11 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void { // Update the file with the digest. If it fails we can continue; it only // means that the next invocation will have an unnecessary cache miss. Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| { - log.warn("failed to save linking hash digest file: {}", .{@errorName(err)}); + log.warn("failed to save linking hash digest file: {s}", .{@errorName(err)}); }; // Again failure here only means an unnecessary cache miss. man.writeManifest() catch |err| { - log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)}); + log.warn("failed to write cache manifest when linking: {s}", .{@errorName(err)}); }; // We hang on to this lock so that the output file path can be used without // other processes clobbering it. diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 4b2b95fc72..2db15ae280 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1362,7 +1362,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void { id_symlink_basename, &prev_digest_buf, ) catch |err| blk: { - log.debug("ELF LLD new_digest={} error: {}", .{ digest, @errorName(err) }); + log.debug("ELF LLD new_digest={} error: {s}", .{ digest, @errorName(err) }); // Handle this as a cache miss. break :blk prev_digest_buf[0..0]; }; @@ -1396,7 +1396,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void { if (self.base.options.output_mode == .Exe) { try argv.append("-z"); - try argv.append(try std.fmt.allocPrint(arena, "stack-size={}", .{stack_size})); + try argv.append(try std.fmt.allocPrint(arena, "stack-size={d}", .{stack_size})); } if (self.base.options.image_base_override) |image_base| { @@ -1438,7 +1438,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void { if (getLDMOption(target)) |ldm| { // Any target ELF will use the freebsd osabi if suffixed with "_fbsd". const arg = if (target.os.tag == .freebsd) - try std.fmt.allocPrint(arena, "{}_fbsd", .{ldm}) + try std.fmt.allocPrint(arena, "{s}_fbsd", .{ldm}) else ldm; try argv.append("-m"); @@ -1599,7 +1599,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void { // (the check for that needs to be earlier), but they could be full paths to .so files, in which // case we want to avoid prepending "-l". const ext = Compilation.classifyFileExt(link_lib); - const arg = if (ext == .shared_library) link_lib else try std.fmt.allocPrint(arena, "-l{}", .{link_lib}); + const arg = if (ext == .shared_library) link_lib else try std.fmt.allocPrint(arena, "-l{s}", .{link_lib}); argv.appendAssumeCapacity(arg); } @@ -1733,11 +1733,11 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void { // Update the file with the digest. If it fails we can continue; it only // means that the next invocation will have an unnecessary cache miss. Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| { - log.warn("failed to save linking hash digest file: {}", .{@errorName(err)}); + log.warn("failed to save linking hash digest file: {s}", .{@errorName(err)}); }; // Again failure here only means an unnecessary cache miss. man.writeManifest() catch |err| { - log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)}); + log.warn("failed to write cache manifest when linking: {s}", .{@errorName(err)}); }; // We hang on to this lock so that the output file path can be used without // other processes clobbering it. @@ -2082,10 +2082,10 @@ pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void { try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1); if (self.local_symbol_free_list.popOrNull()) |i| { - log.debug("reusing symbol index {} for {}\n", .{ i, decl.name }); + log.debug("reusing symbol index {} for {s}\n", .{ i, decl.name }); decl.link.elf.local_sym_index = i; } else { - log.debug("allocating symbol index {} for {}\n", .{ self.local_symbols.items.len, decl.name }); + log.debug("allocating symbol index {} for {s}\n", .{ self.local_symbols.items.len, decl.name }); decl.link.elf.local_sym_index = @intCast(u32, self.local_symbols.items.len); _ = self.local_symbols.addOneAssumeCapacity(); } @@ -2182,7 +2182,7 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { if (zir_dumps.len != 0) { for (zir_dumps) |fn_name| { if (mem.eql(u8, mem.spanZ(decl.name), fn_name)) { - std.debug.print("\n{}\n", .{decl.name}); + std.debug.print("\n{s}\n", .{decl.name}); typed_value.val.castTag(.function).?.data.dump(module.*); } } diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 6abbae2c26..d4a61c8149 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -520,7 +520,7 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void { id_symlink_basename, &prev_digest_buf, ) catch |err| blk: { - log.debug("MachO LLD new_digest={} error: {}", .{ digest, @errorName(err) }); + log.debug("MachO LLD new_digest={} error: {s}", .{ digest, @errorName(err) }); // Handle this as a cache miss. break :blk prev_digest_buf[0..0]; }; @@ -706,7 +706,7 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void { // (the check for that needs to be earlier), but they could be full paths to .dylib files, in which // case we want to avoid prepending "-l". const ext = Compilation.classifyFileExt(link_lib); - const arg = if (ext == .shared_library) link_lib else try std.fmt.allocPrint(arena, "-l{}", .{link_lib}); + const arg = if (ext == .shared_library) link_lib else try std.fmt.allocPrint(arena, "-l{s}", .{link_lib}); argv.appendAssumeCapacity(arg); } @@ -759,15 +759,15 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void { self.base.allocator.free(result.stderr); } if (result.stdout.len != 0) { - log.warn("unexpected LD stdout: {}", .{result.stdout}); + log.warn("unexpected LD stdout: {s}", .{result.stdout}); } if (result.stderr.len != 0) { - log.warn("unexpected LD stderr: {}", .{result.stderr}); + log.warn("unexpected LD stderr: {s}", .{result.stderr}); } if (result.term != .Exited or result.term.Exited != 0) { // TODO parse this output and surface with the Compilation API rather than // directly outputting to stderr here. - log.err("{}", .{result.stderr}); + log.err("{s}", .{result.stderr}); return error.LDReportedFailure; } } else { @@ -980,11 +980,11 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void { // Update the file with the digest. If it fails we can continue; it only // means that the next invocation will have an unnecessary cache miss. Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| { - log.warn("failed to save linking hash digest file: {}", .{@errorName(err)}); + log.warn("failed to save linking hash digest file: {s}", .{@errorName(err)}); }; // Again failure here only means an unnecessary cache miss. man.writeManifest() catch |err| { - log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)}); + log.warn("failed to write cache manifest when linking: {s}", .{@errorName(err)}); }; // We hang on to this lock so that the output file path can be used without // other processes clobbering it. @@ -1088,10 +1088,10 @@ pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void { try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1); if (self.local_symbol_free_list.popOrNull()) |i| { - log.debug("reusing symbol index {} for {}", .{ i, decl.name }); + log.debug("reusing symbol index {d} for {s}", .{ i, decl.name }); decl.link.macho.local_sym_index = i; } else { - log.debug("allocating symbol index {} for {}", .{ self.local_symbols.items.len, decl.name }); + log.debug("allocating symbol index {d} for {s}", .{ self.local_symbols.items.len, decl.name }); decl.link.macho.local_sym_index = @intCast(u32, self.local_symbols.items.len); _ = self.local_symbols.addOneAssumeCapacity(); } @@ -1165,7 +1165,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, symbol.n_value, required_alignment); if (need_realloc) { const vaddr = try self.growTextBlock(&decl.link.macho, code.len, required_alignment); - log.debug("growing {} from 0x{x} to 0x{x}", .{ decl.name, symbol.n_value, vaddr }); + log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl.name, symbol.n_value, vaddr }); if (vaddr != symbol.n_value) { symbol.n_value = vaddr; log.debug(" (writing new offset table entry)", .{}); @@ -1188,7 +1188,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { const decl_name = mem.spanZ(decl.name); const name_str_index = try self.makeString(decl_name); const addr = try self.allocateTextBlock(&decl.link.macho, code.len, required_alignment); - log.debug("allocated text block for {} at 0x{x}", .{ decl_name, addr }); + log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, addr }); errdefer self.freeTextBlock(&decl.link.macho); symbol.* = .{ diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 374fa2230b..cbb3e83147 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -321,7 +321,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void { id_symlink_basename, &prev_digest_buf, ) catch |err| blk: { - log.debug("WASM LLD new_digest={} error: {}", .{ digest, @errorName(err) }); + log.debug("WASM LLD new_digest={} error: {s}", .{ digest, @errorName(err) }); // Handle this as a cache miss. break :blk prev_digest_buf[0..0]; }; @@ -463,11 +463,11 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void { // Update the file with the digest. If it fails we can continue; it only // means that the next invocation will have an unnecessary cache miss. Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| { - log.warn("failed to save linking hash digest symlink: {}", .{@errorName(err)}); + log.warn("failed to save linking hash digest symlink: {s}", .{@errorName(err)}); }; // Again failure here only means an unnecessary cache miss. man.writeManifest() catch |err| { - log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)}); + log.warn("failed to write cache manifest when linking: {s}", .{@errorName(err)}); }; // We hang on to this lock so that the output file path can be used without // other processes clobbering it. diff --git a/src/main.zig b/src/main.zig index 98b19cc3ea..7d03ea956f 100644 --- a/src/main.zig +++ b/src/main.zig @@ -118,7 +118,7 @@ pub fn main() anyerror!void { pub fn mainArgs(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !void { if (args.len <= 1) { - std.log.info("{}", .{usage}); + std.log.info("{s}", .{usage}); fatal("expected command argument", .{}); } @@ -204,8 +204,8 @@ pub fn mainArgs(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !v } else if (mem.eql(u8, cmd, "help") or mem.eql(u8, cmd, "-h") or mem.eql(u8, cmd, "--help")) { try io.getStdOut().writeAll(usage); } else { - std.log.info("{}", .{usage}); - fatal("unknown command: {}", .{args[1]}); + std.log.info("{s}", .{usage}); + fatal("unknown command: {s}", .{args[1]}); } } @@ -615,7 +615,7 @@ fn buildOutputType( fatal("unexpected end-of-parameter mark: --", .{}); } } else if (mem.eql(u8, arg, "--pkg-begin")) { - if (i + 2 >= args.len) fatal("Expected 2 arguments after {}", .{arg}); + if (i + 2 >= args.len) fatal("Expected 2 arguments after {s}", .{arg}); i += 1; const pkg_name = args[i]; i += 1; @@ -635,7 +635,7 @@ fn buildOutputType( cur_pkg = cur_pkg.parent orelse fatal("encountered --pkg-end with no matching --pkg-begin", .{}); } else if (mem.eql(u8, arg, "--main-pkg-path")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; main_pkg_path = args[i]; } else if (mem.eql(u8, arg, "-cflags")) { @@ -653,10 +653,10 @@ fn buildOutputType( i += 1; const next_arg = args[i]; color = std.meta.stringToEnum(Color, next_arg) orelse { - fatal("expected [auto|on|off] after --color, found '{}'", .{next_arg}); + fatal("expected [auto|on|off] after --color, found '{s}'", .{next_arg}); }; } else if (mem.eql(u8, arg, "--subsystem")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; if (mem.eql(u8, args[i], "console")) { subsystem = .Console; @@ -689,51 +689,51 @@ fn buildOutputType( }); } } else if (mem.eql(u8, arg, "-O")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; optimize_mode_string = args[i]; } else if (mem.eql(u8, arg, "--stack")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; stack_size_override = std.fmt.parseUnsigned(u64, args[i], 0) catch |err| { fatal("unable to parse '{}': {}", .{ arg, @errorName(err) }); }; } else if (mem.eql(u8, arg, "--image-base")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; image_base_override = std.fmt.parseUnsigned(u64, args[i], 0) catch |err| { fatal("unable to parse '{}': {}", .{ arg, @errorName(err) }); }; } else if (mem.eql(u8, arg, "--name")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; provided_name = args[i]; } else if (mem.eql(u8, arg, "-rpath")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; try rpath_list.append(args[i]); } else if (mem.eql(u8, arg, "--library-directory") or mem.eql(u8, arg, "-L")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; try lib_dirs.append(args[i]); } else if (mem.eql(u8, arg, "-F")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; try framework_dirs.append(args[i]); } else if (mem.eql(u8, arg, "-framework")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; try frameworks.append(args[i]); } else if (mem.eql(u8, arg, "-T") or mem.eql(u8, arg, "--script")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; linker_script = args[i]; } else if (mem.eql(u8, arg, "--version-script")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; version_script = args[i]; } else if (mem.eql(u8, arg, "--library") or mem.eql(u8, arg, "-l")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); // We don't know whether this library is part of libc or libc++ until we resolve the target. // So we simply append to the list for now. i += 1; @@ -743,7 +743,7 @@ fn buildOutputType( mem.eql(u8, arg, "-I") or mem.eql(u8, arg, "-dirafter")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; try clang_argv.append(arg); try clang_argv.append(args[i]); @@ -753,19 +753,19 @@ fn buildOutputType( } i += 1; version = std.builtin.Version.parse(args[i]) catch |err| { - fatal("unable to parse --version '{}': {}", .{ args[i], @errorName(err) }); + fatal("unable to parse --version '{s}': {s}", .{ args[i], @errorName(err) }); }; have_version = true; } else if (mem.eql(u8, arg, "-target")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; target_arch_os_abi = args[i]; } else if (mem.eql(u8, arg, "-mcpu")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; target_mcpu = args[i]; } else if (mem.eql(u8, arg, "-mcmodel")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; machine_code_model = parseCodeModel(args[i]); } else if (mem.startsWith(u8, arg, "-ofmt=")) { @@ -777,35 +777,35 @@ fn buildOutputType( } else if (mem.startsWith(u8, arg, "-O")) { optimize_mode_string = arg["-O".len..]; } else if (mem.eql(u8, arg, "--dynamic-linker")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; target_dynamic_linker = args[i]; } else if (mem.eql(u8, arg, "--libc")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; libc_paths_file = args[i]; } else if (mem.eql(u8, arg, "--test-filter")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; test_filter = args[i]; } else if (mem.eql(u8, arg, "--test-name-prefix")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; test_name_prefix = args[i]; } else if (mem.eql(u8, arg, "--test-cmd")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; try test_exec_args.append(args[i]); } else if (mem.eql(u8, arg, "--cache-dir")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; override_local_cache_dir = args[i]; } else if (mem.eql(u8, arg, "--global-cache-dir")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; override_global_cache_dir = args[i]; } else if (mem.eql(u8, arg, "--override-lib-dir")) { - if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); + if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg}); i += 1; override_lib_dir = args[i]; } else if (mem.eql(u8, arg, "-fcompiler-rt")) { @@ -968,7 +968,7 @@ fn buildOutputType( { try clang_argv.append(arg); } else { - fatal("unrecognized parameter: '{}'", .{arg}); + fatal("unrecognized parameter: '{s}'", .{arg}); } } else switch (Compilation.classifyFileExt(arg)) { .object, .static_library, .shared_library => { @@ -982,19 +982,19 @@ fn buildOutputType( }, .zig, .zir => { if (root_src_file) |other| { - fatal("found another zig file '{}' after root source file '{}'", .{ arg, other }); + fatal("found another zig file '{s}' after root source file '{s}'", .{ arg, other }); } else { root_src_file = arg; } }, .unknown => { - fatal("unrecognized file extension of parameter '{}'", .{arg}); + fatal("unrecognized file extension of parameter '{s}'", .{arg}); }, } } if (optimize_mode_string) |s| { optimize_mode = std.meta.stringToEnum(std.builtin.Mode, s) orelse - fatal("unrecognized optimization mode: '{}'", .{s}); + fatal("unrecognized optimization mode: '{s}'", .{s}); } }, .cc, .cpp => { @@ -1018,7 +1018,7 @@ fn buildOutputType( var it = ClangArgIterator.init(arena, all_args); while (it.has_next) { it.next() catch |err| { - fatal("unable to parse command line parameters: {}", .{@errorName(err)}); + fatal("unable to parse command line parameters: {s}", .{@errorName(err)}); }; switch (it.zig_equivalent) { .target => target_arch_os_abi = it.only_arg, // example: -target riscv64-linux-unknown @@ -1038,7 +1038,7 @@ fn buildOutputType( }, .zig, .zir => { if (root_src_file) |other| { - fatal("found another zig file '{}' after root source file '{}'", .{ it.only_arg, other }); + fatal("found another zig file '{s}' after root source file '{s}'", .{ it.only_arg, other }); } else { root_src_file = it.only_arg; } @@ -1153,7 +1153,7 @@ fn buildOutputType( if (mem.eql(u8, arg, "-soname")) { i += 1; if (i >= linker_args.items.len) { - fatal("expected linker arg after '{}'", .{arg}); + fatal("expected linker arg after '{s}'", .{arg}); } const name = linker_args.items[i]; soname = .{ .yes = name }; @@ -1185,7 +1185,7 @@ fn buildOutputType( } else if (mem.eql(u8, arg, "-rpath")) { i += 1; if (i >= linker_args.items.len) { - fatal("expected linker arg after '{}'", .{arg}); + fatal("expected linker arg after '{s}'", .{arg}); } try rpath_list.append(linker_args.items[i]); } else if (mem.eql(u8, arg, "-I") or @@ -1194,7 +1194,7 @@ fn buildOutputType( { i += 1; if (i >= linker_args.items.len) { - fatal("expected linker arg after '{}'", .{arg}); + fatal("expected linker arg after '{s}'", .{arg}); } target_dynamic_linker = linker_args.items[i]; } else if (mem.eql(u8, arg, "-E") or @@ -1205,7 +1205,7 @@ fn buildOutputType( } else if (mem.eql(u8, arg, "--version-script")) { i += 1; if (i >= linker_args.items.len) { - fatal("expected linker arg after '{}'", .{arg}); + fatal("expected linker arg after '{s}'", .{arg}); } version_script = linker_args.items[i]; } else if (mem.startsWith(u8, arg, "-O")) { @@ -1227,7 +1227,7 @@ fn buildOutputType( } else if (mem.eql(u8, arg, "-z")) { i += 1; if (i >= linker_args.items.len) { - fatal("expected linker arg after '{}'", .{arg}); + fatal("expected linker arg after '{s}'", .{arg}); } const z_arg = linker_args.items[i]; if (mem.eql(u8, z_arg, "nodelete")) { @@ -1235,44 +1235,44 @@ fn buildOutputType( } else if (mem.eql(u8, z_arg, "defs")) { linker_z_defs = true; } else { - warn("unsupported linker arg: -z {}", .{z_arg}); + warn("unsupported linker arg: -z {s}", .{z_arg}); } } else if (mem.eql(u8, arg, "--major-image-version")) { i += 1; if (i >= linker_args.items.len) { - fatal("expected linker arg after '{}'", .{arg}); + fatal("expected linker arg after '{s}'", .{arg}); } version.major = std.fmt.parseUnsigned(u32, linker_args.items[i], 10) catch |err| { - fatal("unable to parse '{}': {}", .{ arg, @errorName(err) }); + fatal("unable to parse '{s}': {s}", .{ arg, @errorName(err) }); }; have_version = true; } else if (mem.eql(u8, arg, "--minor-image-version")) { i += 1; if (i >= linker_args.items.len) { - fatal("expected linker arg after '{}'", .{arg}); + fatal("expected linker arg after '{s}'", .{arg}); } version.minor = std.fmt.parseUnsigned(u32, linker_args.items[i], 10) catch |err| { - fatal("unable to parse '{}': {}", .{ arg, @errorName(err) }); + fatal("unable to parse '{s}': {s}", .{ arg, @errorName(err) }); }; have_version = true; } else if (mem.eql(u8, arg, "--stack")) { i += 1; if (i >= linker_args.items.len) { - fatal("expected linker arg after '{}'", .{arg}); + fatal("expected linker arg after '{s}'", .{arg}); } stack_size_override = std.fmt.parseUnsigned(u64, linker_args.items[i], 0) catch |err| { - fatal("unable to parse '{}': {}", .{ arg, @errorName(err) }); + fatal("unable to parse '{s}': {s}", .{ arg, @errorName(err) }); }; } else if (mem.eql(u8, arg, "--image-base")) { i += 1; if (i >= linker_args.items.len) { - fatal("expected linker arg after '{}'", .{arg}); + fatal("expected linker arg after '{s}'", .{arg}); } image_base_override = std.fmt.parseUnsigned(u64, linker_args.items[i], 0) catch |err| { - fatal("unable to parse '{}': {}", .{ arg, @errorName(err) }); + fatal("unable to parse '{s}': {s}", .{ arg, @errorName(err) }); }; } else { - warn("unsupported linker arg: {}", .{arg}); + warn("unsupported linker arg: {s}", .{arg}); } } @@ -1328,7 +1328,7 @@ fn buildOutputType( } if (arg_mode == .translate_c and c_source_files.items.len != 1) { - fatal("translate-c expects exactly 1 source file (found {})", .{c_source_files.items.len}); + fatal("translate-c expects exactly 1 source file (found {d})", .{c_source_files.items.len}); } if (root_src_file == null and arg_mode == .zig_test) { @@ -1373,25 +1373,25 @@ fn buildOutputType( help: { var help_text = std.ArrayList(u8).init(arena); for (diags.arch.?.allCpuModels()) |cpu| { - help_text.writer().print(" {}\n", .{cpu.name}) catch break :help; + help_text.writer().print(" {s}\n", .{cpu.name}) catch break :help; } - std.log.info("Available CPUs for architecture '{}': {}", .{ + std.log.info("Available CPUs for architecture '{s}': {s}", .{ @tagName(diags.arch.?), help_text.items, }); } - fatal("Unknown CPU: '{}'", .{diags.cpu_name.?}); + fatal("Unknown CPU: '{s}'", .{diags.cpu_name.?}); }, error.UnknownCpuFeature => { help: { var help_text = std.ArrayList(u8).init(arena); for (diags.arch.?.allFeaturesList()) |feature| { - help_text.writer().print(" {}: {}\n", .{ feature.name, feature.description }) catch break :help; + help_text.writer().print(" {s}: {s}\n", .{ feature.name, feature.description }) catch break :help; } - std.log.info("Available CPU features for architecture '{}': {}", .{ + std.log.info("Available CPU features for architecture '{s}': {s}", .{ @tagName(diags.arch.?), help_text.items, }); } - fatal("Unknown CPU feature: '{}'", .{diags.unknown_feature_name}); + fatal("Unknown CPU feature: '{s}'", .{diags.unknown_feature_name}); }, else => |e| return e, }; @@ -1431,10 +1431,10 @@ fn buildOutputType( if (cross_target.isNativeOs() and (system_libs.items.len != 0 or want_native_include_dirs)) { const paths = std.zig.system.NativePaths.detect(arena) catch |err| { - fatal("unable to detect native system paths: {}", .{@errorName(err)}); + fatal("unable to detect native system paths: {s}", .{@errorName(err)}); }; for (paths.warnings.items) |warning| { - warn("{}", .{warning}); + warn("{s}", .{warning}); } const has_sysroot = if (comptime std.Target.current.isDarwin()) outer: { @@ -1492,7 +1492,7 @@ fn buildOutputType( } else if (mem.eql(u8, ofmt, "raw")) { break :blk .raw; } else { - fatal("unsupported object format: {}", .{ofmt}); + fatal("unsupported object format: {s}", .{ofmt}); } }; @@ -1562,7 +1562,7 @@ fn buildOutputType( } if (fs.path.dirname(full_path)) |dirname| { const handle = fs.cwd().openDir(dirname, .{}) catch |err| { - fatal("unable to open output directory '{}': {}", .{ dirname, @errorName(err) }); + fatal("unable to open output directory '{s}': {s}", .{ dirname, @errorName(err) }); }; cleanup_emit_bin_dir = handle; break :b Compilation.EmitLoc{ @@ -1585,19 +1585,19 @@ fn buildOutputType( }, }; - const default_h_basename = try std.fmt.allocPrint(arena, "{}.h", .{root_name}); + const default_h_basename = try std.fmt.allocPrint(arena, "{s}.h", .{root_name}); var emit_h_resolved = try emit_h.resolve(default_h_basename); defer emit_h_resolved.deinit(); - const default_asm_basename = try std.fmt.allocPrint(arena, "{}.s", .{root_name}); + const default_asm_basename = try std.fmt.allocPrint(arena, "{s}.s", .{root_name}); var emit_asm_resolved = try emit_asm.resolve(default_asm_basename); defer emit_asm_resolved.deinit(); - const default_llvm_ir_basename = try std.fmt.allocPrint(arena, "{}.ll", .{root_name}); + const default_llvm_ir_basename = try std.fmt.allocPrint(arena, "{s}.ll", .{root_name}); var emit_llvm_ir_resolved = try emit_llvm_ir.resolve(default_llvm_ir_basename); defer emit_llvm_ir_resolved.deinit(); - const default_analysis_basename = try std.fmt.allocPrint(arena, "{}-analysis.json", .{root_name}); + const default_analysis_basename = try std.fmt.allocPrint(arena, "{s}-analysis.json", .{root_name}); var emit_analysis_resolved = try emit_analysis.resolve(default_analysis_basename); defer emit_analysis_resolved.deinit(); @@ -1609,10 +1609,10 @@ fn buildOutputType( .yes_default_path => blk: { if (root_src_file) |rsf| { if (mem.endsWith(u8, rsf, ".zir")) { - break :blk try std.fmt.allocPrint(arena, "{}.out.zir", .{root_name}); + break :blk try std.fmt.allocPrint(arena, "{s}.out.zir", .{root_name}); } } - break :blk try std.fmt.allocPrint(arena, "{}.zir", .{root_name}); + break :blk try std.fmt.allocPrint(arena, "{s}.zir", .{root_name}); }, .yes => |p| p, }; @@ -1642,7 +1642,7 @@ fn buildOutputType( } else introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| { - fatal("unable to find zig installation directory: {}", .{@errorName(err)}); + fatal("unable to find zig installation directory: {s}", .{@errorName(err)}); }; defer zig_lib_directory.handle.close(); @@ -1655,7 +1655,7 @@ fn buildOutputType( if (libc_paths_file) |paths_file| { libc_installation = LibCInstallation.parse(gpa, paths_file) catch |err| { - fatal("unable to parse libc paths file: {}", .{@errorName(err)}); + fatal("unable to parse libc paths file: {s}", .{@errorName(err)}); }; } @@ -1791,7 +1791,7 @@ fn buildOutputType( .disable_lld_caching = !have_enable_cache, .subsystem = subsystem, }) catch |err| { - fatal("unable to create compilation: {}", .{@errorName(err)}); + fatal("unable to create compilation: {s}", .{@errorName(err)}); }; var comp_destroyed = false; defer if (!comp_destroyed) comp.destroy(); @@ -1914,12 +1914,12 @@ fn buildOutputType( if (!watch) return cleanExit(); } else { const cmd = try argvCmd(arena, argv.items); - fatal("the following test command failed with exit code {}:\n{}", .{ code, cmd }); + fatal("the following test command failed with exit code {}:\n{s}", .{ code, cmd }); } }, else => { const cmd = try argvCmd(arena, argv.items); - fatal("the following test command crashed:\n{}", .{cmd}); + fatal("the following test command crashed:\n{s}", .{cmd}); }, } }, @@ -1936,7 +1936,7 @@ fn buildOutputType( try stderr.print("(zig) ", .{}); try comp.makeBinFileExecutable(); if (stdin.readUntilDelimiterOrEof(&repl_buf, '\n') catch |err| { - try stderr.print("\nUnable to parse command: {}\n", .{@errorName(err)}); + try stderr.print("\nUnable to parse command: {s}\n", .{@errorName(err)}); continue; }) |line| { const actual_line = mem.trimRight(u8, line, "\r\n "); @@ -1954,7 +1954,7 @@ fn buildOutputType( } else if (mem.eql(u8, actual_line, "help")) { try stderr.writeAll(repl_help); } else { - try stderr.print("unknown command: {}\n", .{actual_line}); + try stderr.print("unknown command: {s}\n", .{actual_line}); } } else { break; @@ -2012,14 +2012,14 @@ fn cmdTranslateC(comp: *Compilation, arena: *Allocator, enable_cache: bool) !voi assert(comp.c_source_files.len == 1); const c_source_file = comp.c_source_files[0]; - const translated_zig_basename = try std.fmt.allocPrint(arena, "{}.zig", .{comp.bin_file.options.root_name}); + const translated_zig_basename = try std.fmt.allocPrint(arena, "{s}.zig", .{comp.bin_file.options.root_name}); var man: Cache.Manifest = comp.obtainCObjectCacheManifest(); defer if (enable_cache) man.deinit(); man.hash.add(@as(u16, 0xb945)); // Random number to distinguish translate-c from compiling C objects _ = man.addFile(c_source_file.src_path, null) catch |err| { - fatal("unable to process '{}': {}", .{ c_source_file.src_path, @errorName(err) }); + fatal("unable to process '{s}': {s}", .{ c_source_file.src_path, @errorName(err) }); }; const digest = if (try man.hit()) man.final() else digest: { @@ -2034,7 +2034,7 @@ fn cmdTranslateC(comp: *Compilation, arena: *Allocator, enable_cache: bool) !voi break :blk null; const c_src_basename = fs.path.basename(c_source_file.src_path); - const dep_basename = try std.fmt.allocPrint(arena, "{}.d", .{c_src_basename}); + const dep_basename = try std.fmt.allocPrint(arena, "{s}.d", .{c_src_basename}); const out_dep_path = try comp.tmpFilePath(arena, dep_basename); break :blk out_dep_path; }; @@ -2069,7 +2069,7 @@ fn cmdTranslateC(comp: *Compilation, arena: *Allocator, enable_cache: bool) !voi error.ASTUnitFailure => fatal("clang API returned errors but due to a clang bug, it is not exposing the errors for zig to see. For more details: https://github.com/ziglang/zig/issues/4455", .{}), error.SemanticAnalyzeFail => { for (clang_errors) |clang_err| { - std.debug.print("{}:{}:{}: {}\n", .{ + std.debug.print("{s}:{}:{}: {s}\n", .{ if (clang_err.filename_ptr) |p| p[0..clang_err.filename_len] else "(no file)", clang_err.line + 1, clang_err.column + 1, @@ -2087,7 +2087,7 @@ fn cmdTranslateC(comp: *Compilation, arena: *Allocator, enable_cache: bool) !voi try man.addDepFilePost(zig_cache_tmp_dir, dep_basename); // Just to save disk space, we delete the file because it is never needed again. zig_cache_tmp_dir.deleteFile(dep_basename) catch |err| { - warn("failed to delete '{}': {}", .{ dep_file_path, @errorName(err) }); + warn("failed to delete '{s}': {s}", .{ dep_file_path, @errorName(err) }); }; } @@ -2102,7 +2102,7 @@ fn cmdTranslateC(comp: *Compilation, arena: *Allocator, enable_cache: bool) !voi _ = try std.zig.render(comp.gpa, bos.writer(), tree); try bos.flush(); - man.writeManifest() catch |err| warn("failed to write cache manifest: {}", .{@errorName(err)}); + man.writeManifest() catch |err| warn("failed to write cache manifest: {s}", .{@errorName(err)}); break :digest digest; }; @@ -2111,7 +2111,7 @@ fn cmdTranslateC(comp: *Compilation, arena: *Allocator, enable_cache: bool) !voi const full_zig_path = try comp.local_cache_directory.join(arena, &[_][]const u8{ "o", &digest, translated_zig_basename, }); - try io.getStdOut().writer().print("{}\n", .{full_zig_path}); + try io.getStdOut().writer().print("{s}\n", .{full_zig_path}); return cleanExit(); } else { const out_zig_path = try fs.path.join(arena, &[_][]const u8{ "o", &digest, translated_zig_basename }); @@ -2148,10 +2148,10 @@ pub fn cmdLibC(gpa: *Allocator, args: []const []const u8) !void { try stdout.writeAll(usage_libc); return cleanExit(); } else { - fatal("unrecognized parameter: '{}'", .{arg}); + fatal("unrecognized parameter: '{s}'", .{arg}); } } else if (input_file != null) { - fatal("unexpected extra parameter: '{}'", .{arg}); + fatal("unexpected extra parameter: '{s}'", .{arg}); } else { input_file = arg; } @@ -2159,7 +2159,7 @@ pub fn cmdLibC(gpa: *Allocator, args: []const []const u8) !void { } if (input_file) |libc_file| { var libc = LibCInstallation.parse(gpa, libc_file) catch |err| { - fatal("unable to parse libc file: {}", .{@errorName(err)}); + fatal("unable to parse libc file: {s}", .{@errorName(err)}); }; defer libc.deinit(gpa); } else { @@ -2167,7 +2167,7 @@ pub fn cmdLibC(gpa: *Allocator, args: []const []const u8) !void { .allocator = gpa, .verbose = true, }) catch |err| { - fatal("unable to detect native libc: {}", .{@errorName(err)}); + fatal("unable to detect native libc: {s}", .{@errorName(err)}); }; defer libc.deinit(gpa); @@ -2205,16 +2205,16 @@ pub fn cmdInit( try io.getStdOut().writeAll(usage_init); return cleanExit(); } else { - fatal("unrecognized parameter: '{}'", .{arg}); + fatal("unrecognized parameter: '{s}'", .{arg}); } } else { - fatal("unexpected extra parameter: '{}'", .{arg}); + fatal("unexpected extra parameter: '{s}'", .{arg}); } } } const self_exe_path = try fs.selfExePathAlloc(arena); var zig_lib_directory = introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| { - fatal("unable to find zig installation directory: {}\n", .{@errorName(err)}); + fatal("unable to find zig installation directory: {s}\n", .{@errorName(err)}); }; defer zig_lib_directory.handle.close(); @@ -2232,7 +2232,7 @@ pub fn cmdInit( const max_bytes = 10 * 1024 * 1024; const build_zig_contents = template_dir.readFileAlloc(arena, "build.zig", max_bytes) catch |err| { - fatal("unable to read template file 'build.zig': {}", .{@errorName(err)}); + fatal("unable to read template file 'build.zig': {s}", .{@errorName(err)}); }; var modified_build_zig_contents = std.ArrayList(u8).init(arena); try modified_build_zig_contents.ensureCapacity(build_zig_contents.len); @@ -2244,13 +2244,13 @@ pub fn cmdInit( } } const main_zig_contents = template_dir.readFileAlloc(arena, "src" ++ s ++ "main.zig", max_bytes) catch |err| { - fatal("unable to read template file 'main.zig': {}", .{@errorName(err)}); + fatal("unable to read template file 'main.zig': {s}", .{@errorName(err)}); }; if (fs.cwd().access("build.zig", .{})) |_| { fatal("existing build.zig file would be overwritten", .{}); } else |err| switch (err) { error.FileNotFound => {}, - else => fatal("unable to test existence of build.zig: {}\n", .{@errorName(err)}), + else => fatal("unable to test existence of build.zig: {s}\n", .{@errorName(err)}), } var src_dir = try fs.cwd().makeOpenPath("src", .{}); defer src_dir.close(); @@ -2311,23 +2311,23 @@ pub fn cmdBuild(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !v const arg = args[i]; if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "--build-file")) { - if (i + 1 >= args.len) fatal("expected argument after '{}'", .{arg}); + if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); i += 1; build_file = args[i]; continue; } else if (mem.eql(u8, arg, "--override-lib-dir")) { - if (i + 1 >= args.len) fatal("expected argument after '{}'", .{arg}); + if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); i += 1; override_lib_dir = args[i]; try child_argv.appendSlice(&[_][]const u8{ arg, args[i] }); continue; } else if (mem.eql(u8, arg, "--cache-dir")) { - if (i + 1 >= args.len) fatal("expected argument after '{}'", .{arg}); + if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); i += 1; override_local_cache_dir = args[i]; continue; } else if (mem.eql(u8, arg, "--global-cache-dir")) { - if (i + 1 >= args.len) fatal("expected argument after '{}'", .{arg}); + if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); i += 1; override_global_cache_dir = args[i]; continue; @@ -2344,7 +2344,7 @@ pub fn cmdBuild(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !v } else introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| { - fatal("unable to find zig installation directory: {}", .{@errorName(err)}); + fatal("unable to find zig installation directory: {s}", .{@errorName(err)}); }; defer zig_lib_directory.handle.close(); @@ -2385,7 +2385,7 @@ pub fn cmdBuild(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !v } else |err| switch (err) { error.FileNotFound => { dirname = fs.path.dirname(dirname) orelse { - std.log.info("{}", .{ + std.log.info("{s}", .{ \\Initialize a 'build.zig' template file with `zig init-lib` or `zig init-exe`, \\or see `zig --help` for more options. }); @@ -2467,7 +2467,7 @@ pub fn cmdBuild(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !v .self_exe_path = self_exe_path, .thread_pool = &thread_pool, }) catch |err| { - fatal("unable to create compilation: {}", .{@errorName(err)}); + fatal("unable to create compilation: {s}", .{@errorName(err)}); }; defer comp.destroy(); @@ -2493,11 +2493,11 @@ pub fn cmdBuild(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !v .Exited => |code| { if (code == 0) return cleanExit(); const cmd = try argvCmd(arena, child_argv); - fatal("the following build command failed with exit code {}:\n{}", .{ code, cmd }); + fatal("the following build command failed with exit code {}:\n{s}", .{ code, cmd }); }, else => { const cmd = try argvCmd(arena, child_argv); - fatal("the following build command crashed:\n{}", .{cmd}); + fatal("the following build command crashed:\n{s}", .{cmd}); }, } } @@ -2564,14 +2564,14 @@ pub fn cmdFmt(gpa: *Allocator, args: []const []const u8) !void { i += 1; const next_arg = args[i]; color = std.meta.stringToEnum(Color, next_arg) orelse { - fatal("expected [auto|on|off] after --color, found '{}'", .{next_arg}); + fatal("expected [auto|on|off] after --color, found '{s}'", .{next_arg}); }; } else if (mem.eql(u8, arg, "--stdin")) { stdin_flag = true; } else if (mem.eql(u8, arg, "--check")) { check_flag = true; } else { - fatal("unrecognized parameter: '{}'", .{arg}); + fatal("unrecognized parameter: '{s}'", .{arg}); } } else { try input_files.append(arg); @@ -2590,7 +2590,7 @@ pub fn cmdFmt(gpa: *Allocator, args: []const []const u8) !void { defer gpa.free(source_code); const tree = std.zig.parse(gpa, source_code) catch |err| { - fatal("error parsing stdin: {}", .{err}); + fatal("error parsing stdin: {s}", .{err}); }; defer tree.deinit(); @@ -2629,7 +2629,7 @@ pub fn cmdFmt(gpa: *Allocator, args: []const []const u8) !void { for (input_files.items) |file_path| { // Get the real path here to avoid Windows failing on relative file paths with . or .. in them. const real_path = fs.realpathAlloc(gpa, file_path) catch |err| { - fatal("unable to open '{}': {}", .{ file_path, err }); + fatal("unable to open '{s}': {s}", .{ file_path, @errorName(err) }); }; defer gpa.free(real_path); @@ -2668,7 +2668,7 @@ fn fmtPath(fmt: *Fmt, file_path: []const u8, check_mode: bool, dir: fs.Dir, sub_ fmtPathFile(fmt, file_path, check_mode, dir, sub_path) catch |err| switch (err) { error.IsDir, error.AccessDenied => return fmtPathDir(fmt, file_path, check_mode, dir, sub_path), else => { - warn("unable to format '{}': {}", .{ file_path, err }); + warn("unable to format '{s}': {s}", .{ file_path, @errorName(err) }); fmt.any_error = true; return; }, @@ -2702,7 +2702,7 @@ fn fmtPathDir( try fmtPathDir(fmt, full_path, check_mode, dir, entry.name); } else { fmtPathFile(fmt, full_path, check_mode, dir, entry.name) catch |err| { - warn("unable to format '{}': {}", .{ full_path, err }); + warn("unable to format '{s}': {s}", .{ full_path, @errorName(err) }); fmt.any_error = true; return; }; @@ -2761,7 +2761,7 @@ fn fmtPathFile( const anything_changed = try std.zig.render(fmt.gpa, io.null_out_stream, tree); if (anything_changed) { const stdout = io.getStdOut().writer(); - try stdout.print("{}\n", .{file_path}); + try stdout.print("{s}\n", .{file_path}); fmt.any_error = true; } } else { @@ -2779,7 +2779,7 @@ fn fmtPathFile( try af.file.writeAll(fmt.out_buffer.items); try af.finish(); const stdout = io.getStdOut().writer(); - try stdout.print("{}\n", .{file_path}); + try stdout.print("{s}\n", .{file_path}); } } @@ -2812,7 +2812,7 @@ fn printErrMsgToFile( const text = text_buf.items; const stream = file.outStream(); - try stream.print("{}:{}:{}: error: {}\n", .{ path, start_loc.line + 1, start_loc.column + 1, text }); + try stream.print("{s}:{}:{}: error: {s}\n", .{ path, start_loc.line + 1, start_loc.column + 1, text }); if (!color_on) return; @@ -2984,7 +2984,7 @@ pub const ClangArgIterator = struct { const max_bytes = 10 * 1024 * 1024; // 10 MiB of command line arguments is a reasonable limit const resp_file_path = arg[1..]; const resp_contents = fs.cwd().readFileAlloc(allocator, resp_file_path, max_bytes) catch |err| { - fatal("unable to read response file '{}': {}", .{ resp_file_path, @errorName(err) }); + fatal("unable to read response file '{s}': {s}", .{ resp_file_path, @errorName(err) }); }; defer allocator.free(resp_contents); // TODO is there a specification for this file format? Let's find it and make this parsing more robust @@ -3057,7 +3057,7 @@ pub const ClangArgIterator = struct { const prefix_len = clang_arg.matchStartsWith(arg); if (prefix_len == arg.len) { if (self.next_index >= self.argv.len) { - fatal("Expected parameter after '{}'", .{arg}); + fatal("Expected parameter after '{s}'", .{arg}); } self.only_arg = self.argv[self.next_index]; self.incrementArgIndex(); @@ -3078,7 +3078,7 @@ pub const ClangArgIterator = struct { if (prefix_len != 0) { self.only_arg = arg[prefix_len..]; if (self.next_index >= self.argv.len) { - fatal("Expected parameter after '{}'", .{arg}); + fatal("Expected parameter after '{s}'", .{arg}); } self.second_arg = self.argv[self.next_index]; self.incrementArgIndex(); @@ -3089,7 +3089,7 @@ pub const ClangArgIterator = struct { }, .separate => if (clang_arg.matchEql(arg) > 0) { if (self.next_index >= self.argv.len) { - fatal("Expected parameter after '{}'", .{arg}); + fatal("Expected parameter after '{s}'", .{arg}); } self.only_arg = self.argv[self.next_index]; self.incrementArgIndex(); @@ -3115,7 +3115,7 @@ pub const ClangArgIterator = struct { }, } else { - fatal("Unknown Clang option: '{}'", .{arg}); + fatal("Unknown Clang option: '{s}'", .{arg}); } } @@ -3143,7 +3143,7 @@ pub const ClangArgIterator = struct { fn parseCodeModel(arg: []const u8) std.builtin.CodeModel { return std.meta.stringToEnum(std.builtin.CodeModel, arg) orelse - fatal("unsupported machine code model: '{}'", .{arg}); + fatal("unsupported machine code model: '{s}'", .{arg}); } /// Raise the open file descriptor limit. Ask and ye shall receive. @@ -3263,7 +3263,7 @@ fn detectNativeTargetInfo(gpa: *Allocator, cross_target: std.zig.CrossTarget) !s // CPU model & feature detection is todo so here we rely on LLVM. // https://github.com/ziglang/zig/issues/4591 if (!build_options.have_llvm) - fatal("CPU features detection is not yet available for {} without LLVM extensions", .{@tagName(arch)}); + fatal("CPU features detection is not yet available for {s} without LLVM extensions", .{@tagName(arch)}); const llvm = @import("llvm_bindings.zig"); const llvm_cpu_name = llvm.GetHostCPUName(); diff --git a/src/mingw.zig b/src/mingw.zig index 246b0f33dc..d55cc28b2b 100644 --- a/src/mingw.zig +++ b/src/mingw.zig @@ -381,7 +381,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { const term = child.wait() catch |err| { // TODO surface a proper error here - log.err("unable to spawn {}: {}", .{ args[0], @errorName(err) }); + log.err("unable to spawn {s}: {s}", .{ args[0], @errorName(err) }); return error.ClangPreprocessorFailed; }; @@ -395,7 +395,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { }, else => { // TODO surface a proper error here - log.err("clang terminated unexpectedly with stderr: {}", .{stderr}); + log.err("clang terminated unexpectedly with stderr: {s}", .{stderr}); return error.ClangPreprocessorFailed; }, } diff --git a/src/musl.zig b/src/musl.zig index f67fe90add..a865e78623 100644 --- a/src/musl.zig +++ b/src/musl.zig @@ -155,21 +155,21 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void { if (!is_arch_specific) { // Look for an arch specific override. override_path.shrinkRetainingCapacity(0); - try override_path.writer().print("{}" ++ s ++ "{}" ++ s ++ "{}.s", .{ + try override_path.writer().print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.s", .{ dirname, arch_name, noextbasename, }); if (source_table.contains(override_path.items)) continue; override_path.shrinkRetainingCapacity(0); - try override_path.writer().print("{}" ++ s ++ "{}" ++ s ++ "{}.S", .{ + try override_path.writer().print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.S", .{ dirname, arch_name, noextbasename, }); if (source_table.contains(override_path.items)) continue; override_path.shrinkRetainingCapacity(0); - try override_path.writer().print("{}" ++ s ++ "{}" ++ s ++ "{}.c", .{ + try override_path.writer().print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.c", .{ dirname, arch_name, noextbasename, }); if (source_table.contains(override_path.items)) @@ -322,7 +322,7 @@ fn add_cc_args( const target = comp.getTarget(); const arch_name = target_util.archMuslName(target.cpu.arch); const os_name = @tagName(target.os.tag); - const triple = try std.fmt.allocPrint(arena, "{}-{}-musl", .{ arch_name, os_name }); + const triple = try std.fmt.allocPrint(arena, "{s}-{s}-musl", .{ arch_name, os_name }); const o_arg = if (want_O3) "-O3" else "-Os"; try args.appendSlice(&[_][]const u8{ diff --git a/src/print_env.zig b/src/print_env.zig index 8aa692e644..bcf4a983ab 100644 --- a/src/print_env.zig +++ b/src/print_env.zig @@ -9,7 +9,7 @@ pub fn cmdEnv(gpa: *Allocator, args: []const []const u8, stdout: std.fs.File.Wri defer gpa.free(self_exe_path); var zig_lib_directory = introspect.findZigLibDirFromSelfExe(gpa, self_exe_path) catch |err| { - fatal("unable to find zig installation directory: {}\n", .{@errorName(err)}); + fatal("unable to find zig installation directory: {s}\n", .{@errorName(err)}); }; defer gpa.free(zig_lib_directory.path.?); defer zig_lib_directory.handle.close(); diff --git a/src/print_targets.zig b/src/print_targets.zig index 724cb7a9ac..cf55eee516 100644 --- a/src/print_targets.zig +++ b/src/print_targets.zig @@ -18,7 +18,7 @@ pub fn cmdTargets( native_target: Target, ) !void { var zig_lib_directory = introspect.findZigLibDir(allocator) catch |err| { - fatal("unable to find zig installation directory: {}\n", .{@errorName(err)}); + fatal("unable to find zig installation directory: {s}\n", .{@errorName(err)}); }; defer zig_lib_directory.handle.close(); defer allocator.free(zig_lib_directory.path.?); @@ -61,7 +61,7 @@ pub fn cmdTargets( try jws.objectField("libc"); try jws.beginArray(); for (target.available_libcs) |libc| { - const tmp = try std.fmt.allocPrint(allocator, "{}-{}-{}", .{ + const tmp = try std.fmt.allocPrint(allocator, "{s}-{s}-{s}", .{ @tagName(libc.arch), @tagName(libc.os), @tagName(libc.abi), }); defer allocator.free(tmp); diff --git a/src/stage1.zig b/src/stage1.zig index 1d50a71ad1..cf3a252ce8 100644 --- a/src/stage1.zig +++ b/src/stage1.zig @@ -37,14 +37,14 @@ pub export fn main(argc: c_int, argv: [*][*:0]u8) c_int { defer arena_instance.deinit(); const arena = &arena_instance.allocator; - const args = arena.alloc([]const u8, @intCast(usize, argc)) catch fatal("{}", .{"OutOfMemory"}); + const args = arena.alloc([]const u8, @intCast(usize, argc)) catch fatal("{s}", .{"OutOfMemory"}); for (args) |*arg, i| { arg.* = mem.spanZ(argv[i]); } if (std.builtin.mode == .Debug) { stage2.mainArgs(gpa, arena, args) catch unreachable; } else { - stage2.mainArgs(gpa, arena, args) catch |err| fatal("{}", .{@errorName(err)}); + stage2.mainArgs(gpa, arena, args) catch |err| fatal("{s}", .{@errorName(err)}); } return 0; } diff --git a/src/translate_c.zig b/src/translate_c.zig index 1b2aa4b219..c609597770 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -136,7 +136,7 @@ const Scope = struct { var proposed_name = name_copy; while (scope.contains(proposed_name)) { scope.mangle_count += 1; - proposed_name = try std.fmt.allocPrint(c.arena, "{}_{}", .{ name, scope.mangle_count }); + proposed_name = try std.fmt.allocPrint(c.arena, "{s}_{}", .{ name, scope.mangle_count }); } try scope.variables.append(.{ .name = name_copy, .alias = proposed_name }); return proposed_name; @@ -290,7 +290,7 @@ pub const Context = struct { const line = c.source_manager.getSpellingLineNumber(spelling_loc); const column = c.source_manager.getSpellingColumnNumber(spelling_loc); - return std.fmt.allocPrint(c.arena, "{}:{}:{}", .{ filename, line, column }); + return std.fmt.allocPrint(c.arena, "{s}:{d}:{d}", .{ filename, line, column }); } fn createCall(c: *Context, fn_expr: *ast.Node, params_len: ast.NodeIndex) !*ast.Node.Call { @@ -530,7 +530,7 @@ fn declVisitor(c: *Context, decl: *const clang.Decl) Error!void { }, else => { const decl_name = try c.str(decl.getDeclKindName()); - try emitWarning(c, decl.getLocation(), "ignoring {} declaration", .{decl_name}); + try emitWarning(c, decl.getLocation(), "ignoring {s} declaration", .{decl_name}); }, } } @@ -625,7 +625,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { const param_name = if (param.name_token) |name_tok| tokenSlice(c, name_tok) else - return failDecl(c, fn_decl_loc, fn_name, "function {} parameter has no name", .{fn_name}); + return failDecl(c, fn_decl_loc, fn_name, "function {s} parameter has no name", .{fn_name}); const c_param = fn_decl.getParamDecl(param_id); const qual_type = c_param.getOriginalType(); @@ -634,7 +634,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { const mangled_param_name = try block_scope.makeMangledName(c, param_name); if (!is_const) { - const bare_arg_name = try std.fmt.allocPrint(c.arena, "arg_{}", .{mangled_param_name}); + const bare_arg_name = try std.fmt.allocPrint(c.arena, "arg_{s}", .{mangled_param_name}); const arg_name = try block_scope.makeMangledName(c, bare_arg_name); const mut_tok = try appendToken(c, .Keyword_var, "var"); @@ -727,7 +727,7 @@ fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]co // TODO https://github.com/ziglang/zig/issues/3756 // TODO https://github.com/ziglang/zig/issues/1802 - const checked_name = if (isZigPrimitiveType(var_name)) try std.fmt.allocPrint(c.arena, "{}_{}", .{ var_name, c.getMangle() }) else var_name; + const checked_name = if (isZigPrimitiveType(var_name)) try std.fmt.allocPrint(c.arena, "{s}_{d}", .{ var_name, c.getMangle() }) else var_name; const var_decl_loc = var_decl.getLocation(); const qual_type = var_decl.getTypeSourceInfo_getType(); @@ -808,7 +808,7 @@ fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]co _ = try appendToken(rp.c, .LParen, "("); const expr = try transCreateNodeStringLiteral( rp.c, - try std.fmt.allocPrint(rp.c.arena, "\"{}\"", .{str_ptr[0..str_len]}), + try std.fmt.allocPrint(rp.c.arena, "\"{s}\"", .{str_ptr[0..str_len]}), ); _ = try appendToken(rp.c, .RParen, ")"); @@ -887,7 +887,7 @@ fn transTypeDef(c: *Context, typedef_decl: *const clang.TypedefNameDecl, top_lev // TODO https://github.com/ziglang/zig/issues/3756 // TODO https://github.com/ziglang/zig/issues/1802 - const checked_name = if (isZigPrimitiveType(typedef_name)) try std.fmt.allocPrint(c.arena, "{}_{}", .{ typedef_name, c.getMangle() }) else typedef_name; + const checked_name = if (isZigPrimitiveType(typedef_name)) try std.fmt.allocPrint(c.arena, "{s}_{d}", .{ typedef_name, c.getMangle() }) else typedef_name; if (checkForBuiltinTypedef(checked_name)) |builtin| { return transTypeDefAsBuiltin(c, typedef_decl, builtin); } @@ -958,11 +958,11 @@ fn transRecordDecl(c: *Context, record_decl: *const clang.RecordDecl) Error!?*as container_kind_name = "struct"; container_kind = .Keyword_struct; } else { - try emitWarning(c, record_loc, "record {} is not a struct or union", .{bare_name}); + try emitWarning(c, record_loc, "record {s} is not a struct or union", .{bare_name}); return null; } - const name = try std.fmt.allocPrint(c.arena, "{}_{}", .{ container_kind_name, bare_name }); + const name = try std.fmt.allocPrint(c.arena, "{s}_{s}", .{ container_kind_name, bare_name }); _ = try c.decl_table.put(c.gpa, @ptrToInt(record_decl.getCanonicalDecl()), name); const visib_tok = if (!is_unnamed) try appendToken(c, .Keyword_pub, "pub") else null; @@ -1003,7 +1003,7 @@ fn transRecordDecl(c: *Context, record_decl: *const clang.RecordDecl) Error!?*as _ = try c.opaque_demotes.put(c.gpa, @ptrToInt(record_decl.getCanonicalDecl()), {}); const opaque_type = try transCreateNodeOpaqueType(c); semicolon = try appendToken(c, .Semicolon, ";"); - try emitWarning(c, field_loc, "{} demoted to opaque type - has bitfield", .{container_kind_name}); + try emitWarning(c, field_loc, "{s} demoted to opaque type - has bitfield", .{container_kind_name}); break :blk opaque_type; } @@ -1011,7 +1011,7 @@ fn transRecordDecl(c: *Context, record_decl: *const clang.RecordDecl) Error!?*as _ = try c.opaque_demotes.put(c.gpa, @ptrToInt(record_decl.getCanonicalDecl()), {}); const opaque_type = try transCreateNodeOpaqueType(c); semicolon = try appendToken(c, .Semicolon, ";"); - try emitWarning(c, field_loc, "{} demoted to opaque type - has variable length array", .{container_kind_name}); + try emitWarning(c, field_loc, "{s} demoted to opaque type - has variable length array", .{container_kind_name}); break :blk opaque_type; } @@ -1030,7 +1030,7 @@ fn transRecordDecl(c: *Context, record_decl: *const clang.RecordDecl) Error!?*as _ = try c.opaque_demotes.put(c.gpa, @ptrToInt(record_decl.getCanonicalDecl()), {}); const opaque_type = try transCreateNodeOpaqueType(c); semicolon = try appendToken(c, .Semicolon, ";"); - try emitWarning(c, record_loc, "{} demoted to opaque type - unable to translate type of field {}", .{ container_kind_name, raw_name }); + try emitWarning(c, record_loc, "{s} demoted to opaque type - unable to translate type of field {s}", .{ container_kind_name, raw_name }); break :blk opaque_type; }, else => |e| return e, @@ -1114,7 +1114,7 @@ fn transEnumDecl(c: *Context, enum_decl: *const clang.EnumDecl) Error!?*ast.Node is_unnamed = true; } - const name = try std.fmt.allocPrint(c.arena, "enum_{}", .{bare_name}); + const name = try std.fmt.allocPrint(c.arena, "enum_{s}", .{bare_name}); _ = try c.decl_table.put(c.gpa, @ptrToInt(enum_decl.getCanonicalDecl()), name); const visib_tok = if (!is_unnamed) try appendToken(c, .Keyword_pub, "pub") else null; @@ -1385,7 +1385,7 @@ fn transStmt( rp, error.UnsupportedTranslation, stmt.getBeginLoc(), - "TODO implement translation of stmt class {}", + "TODO implement translation of stmt class {s}", .{@tagName(sc)}, ); }, @@ -1684,7 +1684,7 @@ fn transDeclStmtOne( rp, error.UnsupportedTranslation, decl.getLocation(), - "TODO implement translation of DeclStmt kind {}", + "TODO implement translation of DeclStmt kind {s}", .{@tagName(kind)}, ), } @@ -1782,7 +1782,7 @@ fn transImplicitCastExpr( rp, error.UnsupportedTranslation, @ptrCast(*const clang.Stmt, expr).getBeginLoc(), - "TODO implement translation of CastKind {}", + "TODO implement translation of CastKind {s}", .{@tagName(kind)}, ), } @@ -2043,7 +2043,7 @@ fn transStringLiteral( rp, error.UnsupportedTranslation, @ptrCast(*const clang.Stmt, stmt).getBeginLoc(), - "TODO: support string literal kind {}", + "TODO: support string literal kind {s}", .{kind}, ), } @@ -2168,7 +2168,6 @@ fn transCCast( // @boolToInt returns either a comptime_int or a u1 // TODO: if dst_type is 1 bit & signed (bitfield) we need @bitCast // instead of @as - const builtin_node = try rp.c.createBuiltinCall("@boolToInt", 1); builtin_node.params()[0] = expr; builtin_node.rparen_token = try appendToken(rp.c, .RParen, ")"); @@ -2455,7 +2454,7 @@ fn transInitListExpr( ); } else { const type_name = rp.c.str(qual_type.getTypeClassName()); - return revertAndWarn(rp, error.UnsupportedType, source_loc, "unsupported initlist type: '{}'", .{type_name}); + return revertAndWarn(rp, error.UnsupportedType, source_loc, "unsupported initlist type: '{s}'", .{type_name}); } } @@ -4433,7 +4432,8 @@ fn transCreateNodeBoolLiteral(c: *Context, value: bool) !*ast.Node { } fn transCreateNodeInt(c: *Context, int: anytype) !*ast.Node { - const token = try appendTokenFmt(c, .IntegerLiteral, "{}", .{int}); + const fmt_s = if (comptime std.meta.trait.isIntegerNumber(@TypeOf(int))) "{d}" else "{s}"; + const token = try appendTokenFmt(c, .IntegerLiteral, fmt_s, .{int}); const node = try c.arena.create(ast.Node.OneToken); node.* = .{ .base = .{ .tag = .IntegerLiteral }, @@ -4442,8 +4442,8 @@ fn transCreateNodeInt(c: *Context, int: anytype) !*ast.Node { return &node.base; } -fn transCreateNodeFloat(c: *Context, int: anytype) !*ast.Node { - const token = try appendTokenFmt(c, .FloatLiteral, "{}", .{int}); +fn transCreateNodeFloat(c: *Context, str: []const u8) !*ast.Node { + const token = try appendTokenFmt(c, .FloatLiteral, "{s}", .{str}); const node = try c.arena.create(ast.Node.OneToken); node.* = .{ .base = .{ .tag = .FloatLiteral }, @@ -4916,7 +4916,7 @@ fn transType(rp: RestorePoint, ty: *const clang.Type, source_loc: clang.SourceLo }, else => { const type_name = rp.c.str(ty.getTypeClassName()); - return revertAndWarn(rp, error.UnsupportedType, source_loc, "unsupported type: '{}'", .{type_name}); + return revertAndWarn(rp, error.UnsupportedType, source_loc, "unsupported type: '{s}'", .{type_name}); }, } } @@ -4999,7 +4999,7 @@ fn transCC( rp, error.UnsupportedType, source_loc, - "unsupported calling convention: {}", + "unsupported calling convention: {s}", .{@tagName(clang_cc)}, ), } @@ -5117,7 +5117,7 @@ fn finishTransFnProto( _ = try appendToken(rp.c, .LParen, "("); const expr = try transCreateNodeStringLiteral( rp.c, - try std.fmt.allocPrint(rp.c.arena, "\"{}\"", .{str_ptr[0..str_len]}), + try std.fmt.allocPrint(rp.c.arena, "\"{s}\"", .{str_ptr[0..str_len]}), ); _ = try appendToken(rp.c, .RParen, ")"); @@ -5214,7 +5214,7 @@ fn revertAndWarn( fn emitWarning(c: *Context, loc: clang.SourceLocation, comptime format: []const u8, args: anytype) !void { const args_prefix = .{c.locStr(loc)}; - _ = try appendTokenFmt(c, .LineComment, "// {}: warning: " ++ format, args_prefix ++ args); + _ = try appendTokenFmt(c, .LineComment, "// {s}: warning: " ++ format, args_prefix ++ args); } pub fn failDecl(c: *Context, loc: clang.SourceLocation, name: []const u8, comptime format: []const u8, args: anytype) !void { @@ -5228,7 +5228,7 @@ pub fn failDecl(c: *Context, loc: clang.SourceLocation, name: []const u8, compti const msg_tok = try appendTokenFmt(c, .StringLiteral, "\"" ++ format ++ "\"", args); const rparen_tok = try appendToken(c, .RParen, ")"); const semi_tok = try appendToken(c, .Semicolon, ";"); - _ = try appendTokenFmt(c, .LineComment, "// {}", .{c.locStr(loc)}); + _ = try appendTokenFmt(c, .LineComment, "// {s}", .{c.locStr(loc)}); const msg_node = try c.arena.create(ast.Node.OneToken); msg_node.* = .{ @@ -5258,7 +5258,7 @@ pub fn failDecl(c: *Context, loc: clang.SourceLocation, name: []const u8, compti fn appendToken(c: *Context, token_id: Token.Id, bytes: []const u8) !ast.TokenIndex { std.debug.assert(token_id != .Identifier); // use appendIdentifier - return appendTokenFmt(c, token_id, "{}", .{bytes}); + return appendTokenFmt(c, token_id, "{s}", .{bytes}); } fn appendTokenFmt(c: *Context, token_id: Token.Id, comptime format: []const u8, args: anytype) !ast.TokenIndex { @@ -5329,7 +5329,7 @@ fn transCreateNodeIdentifier(c: *Context, name: []const u8) !*ast.Node { } fn transCreateNodeIdentifierUnchecked(c: *Context, name: []const u8) !*ast.Node { - const token_index = try appendTokenFmt(c, .Identifier, "{}", .{name}); + const token_index = try appendTokenFmt(c, .Identifier, "{s}", .{name}); const identifier = try c.arena.create(ast.Node.OneToken); identifier.* = .{ .base = .{ .tag = .Identifier }, @@ -5390,7 +5390,7 @@ fn transPreprocessorEntities(c: *Context, unit: *clang.ASTUnit) Error!void { const name = try c.str(raw_name); // TODO https://github.com/ziglang/zig/issues/3756 // TODO https://github.com/ziglang/zig/issues/1802 - const mangled_name = if (isZigPrimitiveType(name)) try std.fmt.allocPrint(c.arena, "{}_{}", .{ name, c.getMangle() }) else name; + const mangled_name = if (isZigPrimitiveType(name)) try std.fmt.allocPrint(c.arena, "{s}_{d}", .{ name, c.getMangle() }) else name; if (scope.containsNow(mangled_name)) { continue; } @@ -5468,7 +5468,7 @@ fn transMacroDefine(c: *Context, m: *MacroCtx) ParseError!void { const init_node = try parseCExpr(c, m, scope); const last = m.next().?; if (last != .Eof and last != .Nl) - return m.fail(c, "unable to translate C expr: unexpected token .{}", .{@tagName(last)}); + return m.fail(c, "unable to translate C expr: unexpected token .{s}", .{@tagName(last)}); const semicolon_token = try appendToken(c, .Semicolon, ";"); const node = try ast.Node.VarDecl.create(c.arena, .{ @@ -5540,7 +5540,7 @@ fn transMacroFnDefine(c: *Context, m: *MacroCtx) ParseError!void { const expr = try parseCExpr(c, m, scope); const last = m.next().?; if (last != .Eof and last != .Nl) - return m.fail(c, "unable to translate C expr: unexpected token .{}", .{@tagName(last)}); + return m.fail(c, "unable to translate C expr: unexpected token .{s}", .{@tagName(last)}); _ = try appendToken(c, .Semicolon, ";"); const type_of_arg = if (!expr.tag.isBlock()) expr else blk: { const stmts = expr.blockStatements(); @@ -5623,11 +5623,11 @@ fn parseCNumLit(c: *Context, m: *MacroCtx) ParseError!*ast.Node { switch (lit_bytes[1]) { '0'...'7' => { // Octal - lit_bytes = try std.fmt.allocPrint(c.arena, "0o{}", .{lit_bytes}); + lit_bytes = try std.fmt.allocPrint(c.arena, "0o{s}", .{lit_bytes}); }, 'X' => { // Hexadecimal with capital X, valid in C but not in Zig - lit_bytes = try std.fmt.allocPrint(c.arena, "0x{}", .{lit_bytes[2..]}); + lit_bytes = try std.fmt.allocPrint(c.arena, "0x{s}", .{lit_bytes[2..]}); }, else => {}, } @@ -5659,7 +5659,7 @@ fn parseCNumLit(c: *Context, m: *MacroCtx) ParseError!*ast.Node { }, .FloatLiteral => |suffix| { if (lit_bytes[0] == '.') - lit_bytes = try std.fmt.allocPrint(c.arena, "0{}", .{lit_bytes}); + lit_bytes = try std.fmt.allocPrint(c.arena, "0{s}", .{lit_bytes}); if (suffix == .none) { return transCreateNodeFloat(c, lit_bytes); } @@ -5937,7 +5937,7 @@ fn parseCPrimaryExprInner(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!* const next_id = m.next().?; if (next_id != .RParen) { - try m.fail(c, "unable to translate C expr: expected ')' instead got: {}", .{@tagName(next_id)}); + try m.fail(c, "unable to translate C expr: expected ')' instead got: {s}", .{@tagName(next_id)}); return error.ParseError; } var saw_l_paren = false; @@ -5995,7 +5995,7 @@ fn parseCPrimaryExprInner(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!* return &group_node.base; }, else => { - try m.fail(c, "unable to translate C expr: unexpected token .{}", .{@tagName(tok)}); + try m.fail(c, "unable to translate C expr: unexpected token .{s}", .{@tagName(tok)}); return error.ParseError; }, } diff --git a/src/value.zig b/src/value.zig index 8180df2f9e..10f58fa44f 100644 --- a/src/value.zig +++ b/src/value.zig @@ -464,7 +464,7 @@ pub const Value = extern union { .ty => return val.castTag(.ty).?.data.format("", options, out_stream), .int_type => { const int_type = val.castTag(.int_type).?.data; - return out_stream.print("{}{}", .{ + return out_stream.print("{s}{d}", .{ if (int_type.signed) "s" else "u", int_type.bits, }); @@ -507,7 +507,7 @@ pub const Value = extern union { } return out_stream.writeAll("}"); }, - .@"error" => return out_stream.print("error.{}", .{val.castTag(.@"error").?.data.name}), + .@"error" => return out_stream.print("error.{s}", .{val.castTag(.@"error").?.data.name}), .inferred_alloc => return out_stream.writeAll("(inferred allocation value)"), }; } diff --git a/src/zir.zig b/src/zir.zig index ce8498d7e8..94e2b24b0c 100644 --- a/src/zir.zig +++ b/src/zir.zig @@ -1150,7 +1150,7 @@ pub const Module = struct { for (self.decls) |decl, i| { write.next_instr_index = 0; - try stream.print("@{} ", .{decl.name}); + try stream.print("@{s} ", .{decl.name}); try write.writeInstToStream(stream, decl.inst); try stream.writeByte('\n'); } @@ -1206,13 +1206,13 @@ const Writer = struct { if (@typeInfo(arg_field.field_type) == .Optional) { if (@field(inst.kw_args, arg_field.name)) |non_optional| { if (need_comma) try stream.writeAll(", "); - try stream.print("{}=", .{arg_field.name}); + try stream.print("{s}=", .{arg_field.name}); try self.writeParamToStream(stream, &non_optional); need_comma = true; } } else { if (need_comma) try stream.writeAll(", "); - try stream.print("{}=", .{arg_field.name}); + try stream.print("{s}=", .{arg_field.name}); try self.writeParamToStream(stream, &@field(inst.kw_args, arg_field.name)); need_comma = true; } @@ -1334,16 +1334,16 @@ const Writer = struct { if (info.index) |i| { try stream.print("%{}", .{info.index}); } else { - try stream.print("@{}", .{info.name}); + try stream.print("@{s}", .{info.name}); } } else if (inst.cast(Inst.DeclVal)) |decl_val| { - try stream.print("@{}", .{decl_val.positionals.name}); + try stream.print("@{s}", .{decl_val.positionals.name}); } else if (inst.cast(Inst.DeclValInModule)) |decl_val| { - try stream.print("@{}", .{decl_val.positionals.decl.name}); + try stream.print("@{s}", .{decl_val.positionals.decl.name}); } else { // This should be unreachable in theory, but since ZIR is used for debugging the compiler // we output some debug text instead. - try stream.print("?{}?", .{@tagName(inst.tag)}); + try stream.print("?{s}?", .{@tagName(inst.tag)}); } } }; @@ -1424,7 +1424,7 @@ const Parser = struct { const decl = try parseInstruction(self, &body_context, ident); const ident_index = body_context.instructions.items.len; if (try body_context.name_map.fetchPut(ident, decl.inst)) |_| { - return self.fail("redefinition of identifier '{}'", .{ident}); + return self.fail("redefinition of identifier '{s}'", .{ident}); } try body_context.instructions.append(decl.inst); continue; @@ -1510,7 +1510,7 @@ const Parser = struct { const decl = try parseInstruction(self, null, ident); const ident_index = self.decls.items.len; if (try self.global_name_map.fetchPut(ident, decl.inst)) |_| { - return self.fail("redefinition of identifier '{}'", .{ident}); + return self.fail("redefinition of identifier '{s}'", .{ident}); } try self.decls.append(self.allocator, decl); }, @@ -1538,7 +1538,7 @@ const Parser = struct { for (bytes) |byte| { if (self.source[self.i] != byte) { self.i = start; - return self.fail("expected '{}'", .{bytes}); + return self.fail("expected '{s}'", .{bytes}); } self.i += 1; } @@ -1585,7 +1585,7 @@ const Parser = struct { return parseInstructionGeneric(self, field.name, tag.Type(), tag, body_ctx, name, contents_start); } } - return self.fail("unknown instruction '{}'", .{fn_name}); + return self.fail("unknown instruction '{s}'", .{fn_name}); } fn parseInstructionGeneric( @@ -1621,7 +1621,7 @@ const Parser = struct { self.i += 1; skipSpace(self); } else if (self.source[self.i] == ')') { - return self.fail("expected positional parameter '{}'", .{arg_field.name}); + return self.fail("expected positional parameter '{s}'", .{arg_field.name}); } @field(inst_specific.positionals, arg_field.name) = try parseParameterGeneric( self, @@ -1648,7 +1648,7 @@ const Parser = struct { break; } } else { - return self.fail("unrecognized keyword parameter: '{}'", .{name}); + return self.fail("unrecognized keyword parameter: '{s}'", .{name}); } skipSpace(self); } @@ -1672,7 +1672,7 @@ const Parser = struct { ' ', '\n', ',', ')' => { const enum_name = self.source[start..self.i]; return std.meta.stringToEnum(T, enum_name) orelse { - return self.fail("tag '{}' not a member of enum '{}'", .{ enum_name, @typeName(T) }); + return self.fail("tag '{s}' not a member of enum '{s}'", .{ enum_name, @typeName(T) }); }; }, 0 => return self.failByte(0), @@ -1710,7 +1710,7 @@ const Parser = struct { BigIntConst => return self.parseIntegerLiteral(), usize => { const big_int = try self.parseIntegerLiteral(); - return big_int.to(usize) catch |err| return self.fail("integer literal: {}", .{@errorName(err)}); + return big_int.to(usize) catch |err| return self.fail("integer literal: {s}", .{@errorName(err)}); }, TypedValue => return self.fail("'const' is a special instruction; not legal in ZIR text", .{}), *IrModule.Decl => return self.fail("'declval_in_module' is a special instruction; not legal in ZIR text", .{}), @@ -1759,7 +1759,7 @@ const Parser = struct { }, else => @compileError("Unimplemented: ir parseParameterGeneric for type " ++ @typeName(T)), } - return self.fail("TODO parse parameter {}", .{@typeName(T)}); + return self.fail("TODO parse parameter {s}", .{@typeName(T)}); } fn parseParameterInst(self: *Parser, body_ctx: ?*Body) !*Inst { @@ -1788,7 +1788,7 @@ const Parser = struct { const src = name_start - 1; if (local_ref) { self.i = src; - return self.fail("unrecognized identifier: {}", .{bad_name}); + return self.fail("unrecognized identifier: {s}", .{bad_name}); } else { const declval = try self.arena.allocator.create(Inst.DeclVal); declval.* = .{ @@ -1873,7 +1873,7 @@ pub fn dumpFn(old_module: IrModule, module_fn: *IrModule.Fn) void { const fn_ty = module_fn.owner_decl.typed_value.most_recent.typed_value.ty; _ = ctx.emitFn(module_fn, 0, fn_ty) catch |err| { - std.debug.print("unable to dump function: {}\n", .{err}); + std.debug.print("unable to dump function: {s}\n", .{@errorName(err)}); return; }; var module = Module{ @@ -2203,7 +2203,7 @@ const EmitZIR = struct { }; return self.emitStringLiteral(src, bytes); }, - else => |t| std.debug.panic("TODO implement emitTypedValue for pointer to {}", .{@tagName(t)}), + else => |t| std.debug.panic("TODO implement emitTypedValue for pointer to {s}", .{@tagName(t)}), } }, .ComptimeInt => return self.emitComptimeIntVal(src, typed_value.val), @@ -2274,7 +2274,7 @@ const EmitZIR = struct { }; return self.emitUnnamedDecl(&inst.base); }, - else => |t| std.debug.panic("TODO implement emitTypedValue for {}", .{@tagName(t)}), + else => |t| std.debug.panic("TODO implement emitTypedValue for {s}", .{@tagName(t)}), } } @@ -2947,7 +2947,7 @@ pub fn dumpZir(allocator: *Allocator, kind: []const u8, decl_name: [*:0]const u8 try write.inst_table.ensureCapacity(@intCast(u32, instructions.len)); const stderr = std.io.getStdErr().outStream(); - try stderr.print("{} {s} {{ // unanalyzed\n", .{ kind, decl_name }); + try stderr.print("{s} {s} {{ // unanalyzed\n", .{ kind, decl_name }); for (instructions) |inst| { const my_i = write.next_instr_index; @@ -2967,5 +2967,5 @@ pub fn dumpZir(allocator: *Allocator, kind: []const u8, decl_name: [*:0]const u8 try stderr.writeByte('\n'); } - try stderr.print("}} // {} {s}\n\n", .{ kind, decl_name }); + try stderr.print("}} // {s} {s}\n\n", .{ kind, decl_name }); } diff --git a/src/zir_sema.zig b/src/zir_sema.zig index 629dfa0c9a..72a1b04238 100644 --- a/src/zir_sema.zig +++ b/src/zir_sema.zig @@ -274,7 +274,7 @@ pub fn resolveInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError! const entry = if (old_inst.cast(zir.Inst.DeclVal)) |declval| blk: { const decl_name = declval.positionals.name; const entry = zir_module.contents.module.findDecl(decl_name) orelse - return mod.fail(scope, old_inst.src, "decl '{}' not found", .{decl_name}); + return mod.fail(scope, old_inst.src, "decl '{s}' not found", .{decl_name}); break :blk entry; } else blk: { // If this assert trips, the instruction that was referenced did not get @@ -564,14 +564,14 @@ fn analyzeInstStr(mod: *Module, scope: *Scope, str_inst: *zir.Inst.Str) InnerErr fn analyzeInstExport(mod: *Module, scope: *Scope, export_inst: *zir.Inst.Export) InnerError!*Inst { const symbol_name = try resolveConstString(mod, scope, export_inst.positionals.symbol_name); const exported_decl = mod.lookupDeclName(scope, export_inst.positionals.decl_name) orelse - return mod.fail(scope, export_inst.base.src, "decl '{}' not found", .{export_inst.positionals.decl_name}); + return mod.fail(scope, export_inst.base.src, "decl '{s}' not found", .{export_inst.positionals.decl_name}); try mod.analyzeExport(scope, export_inst.base.src, symbol_name, exported_decl); return mod.constVoid(scope, export_inst.base.src); } fn analyzeInstCompileError(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const msg = try resolveConstString(mod, scope, inst.positionals.operand); - return mod.fail(scope, inst.base.src, "{}", .{msg}); + return mod.fail(scope, inst.base.src, "{s}", .{msg}); } fn analyzeInstArg(mod: *Module, scope: *Scope, inst: *zir.Inst.Arg) InnerError!*Inst { @@ -918,7 +918,7 @@ fn analyzeInstErrorSet(mod: *Module, scope: *Scope, inst: *zir.Inst.ErrorSet) In for (inst.positionals.fields) |field_name| { const entry = try mod.getErrorValue(field_name); if (payload.data.fields.fetchPutAssumeCapacity(entry.key, entry.value)) |prev| { - return mod.fail(scope, inst.base.src, "duplicate error: '{}'", .{field_name}); + return mod.fail(scope, inst.base.src, "duplicate error: '{s}'", .{field_name}); } } // TODO create name in format "error:line:column" @@ -1068,7 +1068,7 @@ fn analyzeInstFieldPtr(mod: *Module, scope: *Scope, fieldptr: *zir.Inst.FieldPtr return mod.fail( scope, fieldptr.positionals.field_name.src, - "no member named '{}' in '{}'", + "no member named '{s}' in '{}'", .{ field_name, elem_ty }, ); } @@ -1089,7 +1089,7 @@ fn analyzeInstFieldPtr(mod: *Module, scope: *Scope, fieldptr: *zir.Inst.FieldPtr return mod.fail( scope, fieldptr.positionals.field_name.src, - "no member named '{}' in '{}'", + "no member named '{s}' in '{}'", .{ field_name, elem_ty }, ); } @@ -1107,7 +1107,7 @@ fn analyzeInstFieldPtr(mod: *Module, scope: *Scope, fieldptr: *zir.Inst.FieldPtr // TODO resolve inferred error sets const entry = if (val.castTag(.error_set)) |payload| (payload.data.fields.getEntry(field_name) orelse - return mod.fail(scope, fieldptr.base.src, "no error named '{}' in '{}'", .{ field_name, child_type })).* + return mod.fail(scope, fieldptr.base.src, "no error named '{s}' in '{}'", .{ field_name, child_type })).* else try mod.getErrorValue(field_name); @@ -1135,9 +1135,9 @@ fn analyzeInstFieldPtr(mod: *Module, scope: *Scope, fieldptr: *zir.Inst.FieldPtr } if (&container_scope.file_scope.base == mod.root_scope) { - return mod.fail(scope, fieldptr.base.src, "root source file has no member called '{}'", .{field_name}); + return mod.fail(scope, fieldptr.base.src, "root source file has no member called '{s}'", .{field_name}); } else { - return mod.fail(scope, fieldptr.base.src, "container '{}' has no member called '{}'", .{ child_type, field_name }); + return mod.fail(scope, fieldptr.base.src, "container '{}' has no member called '{s}'", .{ child_type, field_name }); } }, else => return mod.fail(scope, fieldptr.base.src, "type '{}' does not support field access", .{child_type}), @@ -1503,14 +1503,14 @@ fn analyzeInstImport(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerErr const file_scope = mod.analyzeImport(scope, inst.base.src, operand) catch |err| switch (err) { error.ImportOutsidePkgPath => { - return mod.fail(scope, inst.base.src, "import of file outside package path: '{}'", .{operand}); + return mod.fail(scope, inst.base.src, "import of file outside package path: '{s}'", .{operand}); }, error.FileNotFound => { - return mod.fail(scope, inst.base.src, "unable to find '{}'", .{operand}); + return mod.fail(scope, inst.base.src, "unable to find '{s}'", .{operand}); }, else => { // TODO user friendly error to string - return mod.fail(scope, inst.base.src, "unable to open '{}': {}", .{ operand, @errorName(err) }); + return mod.fail(scope, inst.base.src, "unable to open '{s}': {s}", .{ operand, @errorName(err) }); }, }; return mod.constType(scope, inst.base.src, file_scope.root_container.ty); @@ -1637,7 +1637,7 @@ fn analyzeInstArithmetic(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) Inn const is_float = scalar_tag == .Float or scalar_tag == .ComptimeFloat; if (!is_int and !(is_float and floatOpAllowed(inst.base.tag))) { - return mod.fail(scope, inst.base.src, "invalid operands to binary expression: '{}' and '{}'", .{ @tagName(lhs.ty.zigTypeTag()), @tagName(rhs.ty.zigTypeTag()) }); + return mod.fail(scope, inst.base.src, "invalid operands to binary expression: '{s}' and '{s}'", .{ @tagName(lhs.ty.zigTypeTag()), @tagName(rhs.ty.zigTypeTag()) }); } if (casted_lhs.value()) |lhs_val| { @@ -1656,7 +1656,7 @@ fn analyzeInstArithmetic(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) Inn const ir_tag = switch (inst.base.tag) { .add => Inst.Tag.add, .sub => Inst.Tag.sub, - else => return mod.fail(scope, inst.base.src, "TODO implement arithmetic for operand '{}''", .{@tagName(inst.base.tag)}), + else => return mod.fail(scope, inst.base.src, "TODO implement arithmetic for operand '{s}''", .{@tagName(inst.base.tag)}), }; return mod.addBinOp(b, inst.base.src, scalar_type, ir_tag, casted_lhs, casted_rhs); @@ -1689,7 +1689,7 @@ fn analyzeInstComptimeOp(mod: *Module, scope: *Scope, res_type: Type, inst: *zir mod.floatSub(scope, res_type, inst.base.src, lhs_val, rhs_val); break :blk val; }, - else => return mod.fail(scope, inst.base.src, "TODO Implement arithmetic operand '{}'", .{@tagName(inst.base.tag)}), + else => return mod.fail(scope, inst.base.src, "TODO Implement arithmetic operand '{s}'", .{@tagName(inst.base.tag)}), }; return mod.constInst(scope, inst.base.src, .{ @@ -1781,7 +1781,7 @@ fn analyzeInstCmp( return mod.fail(scope, inst.base.src, "TODO implement equality comparison between a union's tag value and an enum literal", .{}); } else if (lhs_ty_tag == .ErrorSet and rhs_ty_tag == .ErrorSet) { if (!is_equality_cmp) { - return mod.fail(scope, inst.base.src, "{} operator not allowed for errors", .{@tagName(op)}); + return mod.fail(scope, inst.base.src, "{s} operator not allowed for errors", .{@tagName(op)}); } return mod.fail(scope, inst.base.src, "TODO implement equality comparison between errors", .{}); } else if (lhs.ty.isNumeric() and rhs.ty.isNumeric()) { @@ -1962,7 +1962,7 @@ fn analyzeDeclVal(mod: *Module, scope: *Scope, inst: *zir.Inst.DeclVal) InnerErr const decl_name = inst.positionals.name; const zir_module = scope.namespace().cast(Scope.ZIRModule).?; const src_decl = zir_module.contents.module.findDecl(decl_name) orelse - return mod.fail(scope, inst.base.src, "use of undeclared identifier '{}'", .{decl_name}); + return mod.fail(scope, inst.base.src, "use of undeclared identifier '{s}'", .{decl_name}); const decl = try resolveCompleteZirDecl(mod, scope, src_decl.decl); -- cgit v1.2.3 From 9362f382ab7023592cc1d71044217b847b122406 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 2 Jan 2021 12:32:30 -0700 Subject: stage2: implement function call inlining in the frontend * remove the -Ddump-zir thing. that's handled through --verbose-ir * rework Fn to have an is_inline flag without requiring any more memory on the heap per function. * implement a rough first version of dumping typed zir (tzir) which is a lot more helpful for debugging than what we had before. We don't have a way to parse it though. * keep track of whether the inline-ness of a function changes because if it does we have to go update callsites. * add compile error for inline and export used together. inline function calls and comptime function calls are implemented the same way. A block instruction is set up to capture the result, and then a scope is set up that has a flag for is_comptime and some state if the scope is being inlined. when analyzing `ret` instructions, zig looks for inlining state in the scope, and if found, treats `ret` as a `break` instruction instead, with the target block being the one set up at the inline callsite. Follow-up items: * Complete out the debug TZIR dumping code. * Don't redundantly generate ZIR for each inline/comptime function call. Instead we should add a new state enum tag to Fn. * comptime and inlining branch quotas. * Add more test cases. --- build.zig | 2 - src/Compilation.zig | 14 +- src/Module.zig | 150 ++++++++++++----- src/codegen.zig | 10 +- src/codegen/c.zig | 2 +- src/codegen/wasm.zig | 2 +- src/config.zig.in | 1 - src/link/Elf.zig | 10 -- src/link/MachO/DebugSymbols.zig | 10 -- src/llvm_backend.zig | 2 +- src/zir.zig | 349 ++++++++++++++++++++++++++++------------ src/zir_sema.zig | 239 ++++++++++++++++++--------- test/stage2/zir.zig | 12 +- 13 files changed, 549 insertions(+), 254 deletions(-) (limited to 'src/codegen.zig') diff --git a/build.zig b/build.zig index 2b0685d19e..f86b0d3bec 100644 --- a/build.zig +++ b/build.zig @@ -220,7 +220,6 @@ pub fn build(b: *Builder) !void { } const log_scopes = b.option([]const []const u8, "log", "Which log scopes to enable") orelse &[0][]const u8{}; - const zir_dumps = b.option([]const []const u8, "dump-zir", "Which functions to dump ZIR for before codegen") orelse &[0][]const u8{}; const opt_version_string = b.option([]const u8, "version-string", "Override Zig version string. Default is to find out with git."); const version = if (opt_version_string) |version| version else v: { @@ -277,7 +276,6 @@ pub fn build(b: *Builder) !void { exe.addBuildOption(std.SemanticVersion, "semver", semver); exe.addBuildOption([]const []const u8, "log_scopes", log_scopes); - exe.addBuildOption([]const []const u8, "zir_dumps", zir_dumps); exe.addBuildOption(bool, "enable_tracy", tracy != null); exe.addBuildOption(bool, "is_stage1", is_stage1); if (tracy) |tracy_path| { diff --git a/src/Compilation.zig b/src/Compilation.zig index de115b9b40..a6f39a3154 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1459,10 +1459,10 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor const module = self.bin_file.options.module.?; if (decl.typed_value.most_recent.typed_value.val.castTag(.function)) |payload| { const func = payload.data; - switch (func.analysis) { + switch (func.bits.state) { .queued => module.analyzeFnBody(decl, func) catch |err| switch (err) { error.AnalysisFail => { - assert(func.analysis != .in_progress); + assert(func.bits.state != .in_progress); continue; }, error.OutOfMemory => return error.OutOfMemory, @@ -1471,12 +1471,16 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor .sema_failure, .dependency_failure => continue, .success => {}, } - // Here we tack on additional allocations to the Decl's arena. The allocations are - // lifetime annotations in the ZIR. + // Here we tack on additional allocations to the Decl's arena. The allocations + // are lifetime annotations in the ZIR. var decl_arena = decl.typed_value.most_recent.arena.?.promote(module.gpa); defer decl.typed_value.most_recent.arena.?.* = decl_arena.state; log.debug("analyze liveness of {s}\n", .{decl.name}); - try liveness.analyze(module.gpa, &decl_arena.allocator, func.analysis.success); + try liveness.analyze(module.gpa, &decl_arena.allocator, func.data.body); + + if (self.verbose_ir) { + func.dump(module.*); + } } assert(decl.typed_value.most_recent.typed_value.ty.hasCodeGenBits()); diff --git a/src/Module.zig b/src/Module.zig index 29c19c09a0..db76ecd5db 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -286,23 +286,40 @@ pub const Decl = struct { /// Extern functions do not have this data structure; they are represented by /// the `Decl` only, with a `Value` tag of `extern_fn`. pub const Fn = struct { - /// This memory owned by the Decl's TypedValue.Managed arena allocator. - analysis: union(enum) { + bits: packed struct { + /// Get and set this field via `analysis` and `setAnalysis`. + state: Analysis.Tag, + /// We carry this state into `Fn` instead of leaving it in the AST so that + /// analysis of function calls can happen even on functions whose AST has + /// been unloaded from memory. + is_inline: bool, + unused_bits: u4 = 0, + }, + /// Get and set this data via `analysis` and `setAnalysis`. + data: union { + none: void, + zir: *ZIR, + body: Body, + }, + owner_decl: *Decl, + + pub const Analysis = union(Tag) { queued: *ZIR, in_progress, - /// There will be a corresponding ErrorMsg in Module.failed_decls sema_failure, - /// This Fn might be OK but it depends on another Decl which did not successfully complete - /// semantic analysis. dependency_failure, success: Body, - }, - owner_decl: *Decl, - /// This memory is temporary and points to stack memory for the duration - /// of Fn analysis. - pub const Analysis = struct { - inner_block: Scope.Block, + pub const Tag = enum(u3) { + queued, + in_progress, + /// There will be a corresponding ErrorMsg in Module.failed_decls + sema_failure, + /// This Fn might be OK but it depends on another Decl which did not + /// successfully complete semantic analysis. + dependency_failure, + success, + }; }; /// Contains un-analyzed ZIR instructions generated from Zig source AST. @@ -311,22 +328,37 @@ pub const Fn = struct { arena: std.heap.ArenaAllocator.State, }; - /// For debugging purposes. - pub fn dump(self: *Fn, mod: Module) void { - std.debug.print("Module.Function(name={s}) ", .{self.owner_decl.name}); - switch (self.analysis) { - .queued => { - std.debug.print("queued\n", .{}); + pub fn analysis(self: Fn) Analysis { + return switch (self.bits.state) { + .queued => .{ .queued = self.data.zir }, + .success => .{ .success = self.data.body }, + .in_progress => .in_progress, + .sema_failure => .sema_failure, + .dependency_failure => .dependency_failure, + }; + } + + pub fn setAnalysis(self: *Fn, anal: Analysis) void { + switch (anal) { + .queued => |zir_ptr| { + self.bits.state = .queued; + self.data = .{ .zir = zir_ptr }; }, - .in_progress => { - std.debug.print("in_progress\n", .{}); + .success => |body| { + self.bits.state = .success; + self.data = .{ .body = body }; }, - else => { - std.debug.print("\n", .{}); - zir.dumpFn(mod, self); + .in_progress, .sema_failure, .dependency_failure => { + self.bits.state = anal; + self.data = .{ .none = {} }; }, } } + + /// For debugging purposes. + pub fn dump(self: *Fn, mod: Module) void { + zir.dumpFn(mod, self); + } }; pub const Var = struct { @@ -773,13 +805,33 @@ pub const Scope = struct { instructions: ArrayListUnmanaged(*Inst), /// Points to the arena allocator of DeclAnalysis arena: *Allocator, - label: ?Label = null, + label: Label = Label.none, is_comptime: bool, - pub const Label = struct { - zir_block: *zir.Inst.Block, - results: ArrayListUnmanaged(*Inst), - block_inst: *Inst.Block, + pub const Label = union(enum) { + none, + /// This `Block` maps a block ZIR instruction to the corresponding + /// TZIR instruction for break instruction analysis. + breaking: struct { + zir_block: *zir.Inst.Block, + merges: Merges, + }, + /// This `Block` indicates that an inline function call is happening + /// and return instructions should be analyzed as a break instruction + /// to this TZIR block instruction. + inlining: struct { + /// We use this to count from 0 so that arg instructions know + /// which parameter index they are, without having to store + /// a parameter index with each arg instruction. + param_index: usize, + casted_args: []*Inst, + merges: Merges, + }, + + pub const Merges = struct { + results: ArrayListUnmanaged(*Inst), + block_inst: *Inst.Block, + }; }; /// For debugging purposes. @@ -1189,8 +1241,21 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { break :blk fn_zir; }; + const is_inline = blk: { + if (fn_proto.getExternExportInlineToken()) |maybe_inline_token| { + if (tree.token_ids[maybe_inline_token] == .Keyword_inline) { + break :blk true; + } + } + break :blk false; + }; + new_func.* = .{ - .analysis = .{ .queued = fn_zir }, + .bits = .{ + .state = .queued, + .is_inline = is_inline, + }, + .data = .{ .zir = fn_zir }, .owner_decl = decl, }; fn_payload.* = .{ @@ -1199,11 +1264,16 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { }; var prev_type_has_bits = false; + var prev_is_inline = false; var type_changed = true; if (decl.typedValueManaged()) |tvm| { prev_type_has_bits = tvm.typed_value.ty.hasCodeGenBits(); type_changed = !tvm.typed_value.ty.eql(fn_type); + if (tvm.typed_value.val.castTag(.function)) |payload| { + const prev_func = payload.data; + prev_is_inline = prev_func.bits.is_inline; + } tvm.deinit(self.gpa); } @@ -1221,18 +1291,26 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { decl.analysis = .complete; decl.generation = self.generation; - if (fn_type.hasCodeGenBits()) { + if (!is_inline and fn_type.hasCodeGenBits()) { // We don't fully codegen the decl until later, but we do need to reserve a global // offset table index for it. This allows us to codegen decls out of dependency order, // increasing how many computations can be done in parallel. try self.comp.bin_file.allocateDeclIndexes(decl); try self.comp.work_queue.writeItem(.{ .codegen_decl = decl }); - } else if (prev_type_has_bits) { + } else if (!prev_is_inline and prev_type_has_bits) { self.comp.bin_file.freeDecl(decl); } if (fn_proto.getExternExportInlineToken()) |maybe_export_token| { if (tree.token_ids[maybe_export_token] == .Keyword_export) { + if (is_inline) { + return self.failTok( + &block_scope.base, + maybe_export_token, + "export of inline function", + .{}, + ); + } const export_src = tree.token_locs[maybe_export_token].start; const name_loc = tree.token_locs[fn_proto.getNameToken().?]; const name = tree.tokenSliceLoc(name_loc); @@ -1240,7 +1318,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { try self.analyzeExport(&block_scope.base, export_src, name, decl); } } - return type_changed; + return type_changed or is_inline != prev_is_inline; }, .VarDecl => { const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", ast_node); @@ -1824,15 +1902,15 @@ pub fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void { }; defer inner_block.instructions.deinit(self.gpa); - const fn_zir = func.analysis.queued; + const fn_zir = func.data.zir; defer fn_zir.arena.promote(self.gpa).deinit(); - func.analysis = .{ .in_progress = {} }; + func.setAnalysis(.in_progress); log.debug("set {s} to in_progress\n", .{decl.name}); try zir_sema.analyzeBody(self, &inner_block.base, fn_zir.body); const instructions = try arena.allocator.dupe(*Inst, inner_block.instructions.items); - func.analysis = .{ .success = .{ .instructions = instructions } }; + func.setAnalysis(.{ .success = .{ .instructions = instructions } }); log.debug("set {s} to success\n", .{decl.name}); } @@ -2329,7 +2407,7 @@ pub fn analyzeDeclRef(self: *Module, scope: *Scope, src: usize, decl: *Decl) Inn self.ensureDeclAnalyzed(decl) catch |err| { if (scope.cast(Scope.Block)) |block| { if (block.func) |func| { - func.analysis = .dependency_failure; + func.setAnalysis(.dependency_failure); } else { block.decl.analysis = .dependency_failure; } @@ -3029,7 +3107,7 @@ fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *Com .block => { const block = scope.cast(Scope.Block).?; if (block.func) |func| { - func.analysis = .sema_failure; + func.setAnalysis(.sema_failure); } else { block.decl.analysis = .sema_failure; block.decl.generation = self.generation; diff --git a/src/codegen.zig b/src/codegen.zig index 6530b687e5..588c3dec4c 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -532,7 +532,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.code.items.len += 4; try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.analysis.success); + try self.genBody(self.mod_fn.data.body); const stack_end = self.max_end_stack; if (stack_end > math.maxInt(i32)) @@ -576,7 +576,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }); } else { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.analysis.success); + try self.genBody(self.mod_fn.data.body); try self.dbgSetEpilogueBegin(); } }, @@ -593,7 +593,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.analysis.success); + try self.genBody(self.mod_fn.data.body); // Backpatch stack offset const stack_end = self.max_end_stack; @@ -638,13 +638,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.pop(.al, .{ .fp, .pc }).toU32()); } else { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.analysis.success); + try self.genBody(self.mod_fn.data.body); try self.dbgSetEpilogueBegin(); } }, else => { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.analysis.success); + try self.genBody(self.mod_fn.data.body); try self.dbgSetEpilogueBegin(); }, } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 684a03eb79..712d663af0 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -275,7 +275,7 @@ pub fn generate(file: *C, module: *Module, decl: *Decl) !void { try writer.writeAll(" {"); const func: *Module.Fn = func_payload.data; - const instructions = func.analysis.success.instructions; + const instructions = func.data.body.instructions; if (instructions.len > 0) { try writer.writeAll("\n"); for (instructions) |inst| { diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index c7ad59f5d1..1eb4f5bc29 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -63,7 +63,7 @@ pub fn genCode(buf: *ArrayList(u8), decl: *Decl) !void { // TODO: check for and handle death of instructions const tv = decl.typed_value.most_recent.typed_value; const mod_fn = tv.val.castTag(.function).?.data; - for (mod_fn.analysis.success.instructions) |inst| try genInst(buf, decl, inst); + for (mod_fn.data.body.instructions) |inst| try genInst(buf, decl, inst); // Write 'end' opcode try writer.writeByte(0x0B); diff --git a/src/config.zig.in b/src/config.zig.in index 9d16cf3824..0dbd3f3c91 100644 --- a/src/config.zig.in +++ b/src/config.zig.in @@ -2,7 +2,6 @@ pub const have_llvm = true; pub const version: [:0]const u8 = "@ZIG_VERSION@"; pub const semver = try @import("std").SemanticVersion.parse(version); pub const log_scopes: []const []const u8 = &[_][]const u8{}; -pub const zir_dumps: []const []const u8 = &[_][]const u8{}; pub const enable_tracy = false; pub const is_stage1 = true; pub const skip_non_native = false; diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 116d7c9859..d74236f8c1 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2178,16 +2178,6 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { else => false, }; if (is_fn) { - const zir_dumps = if (std.builtin.is_test) &[0][]const u8{} else build_options.zir_dumps; - if (zir_dumps.len != 0) { - for (zir_dumps) |fn_name| { - if (mem.eql(u8, mem.spanZ(decl.name), fn_name)) { - std.debug.print("\n{s}\n", .{decl.name}); - typed_value.val.castTag(.function).?.data.dump(module.*); - } - } - } - // For functions we need to add a prologue to the debug line program. try dbg_line_buffer.ensureCapacity(26); diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index c70fcc5825..11f87d5495 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -936,16 +936,6 @@ pub fn initDeclDebugBuffers( const typed_value = decl.typed_value.most_recent.typed_value; switch (typed_value.ty.zigTypeTag()) { .Fn => { - const zir_dumps = if (std.builtin.is_test) &[0][]const u8{} else build_options.zir_dumps; - if (zir_dumps.len != 0) { - for (zir_dumps) |fn_name| { - if (mem.eql(u8, mem.spanZ(decl.name), fn_name)) { - std.debug.print("\n{}\n", .{decl.name}); - typed_value.val.cast(Value.Payload.Function).?.func.dump(module.*); - } - } - } - // For functions we need to add a prologue to the debug line program. try dbg_line_buffer.ensureCapacity(26); diff --git a/src/llvm_backend.zig b/src/llvm_backend.zig index 51d1a0840e..5814aa7e7e 100644 --- a/src/llvm_backend.zig +++ b/src/llvm_backend.zig @@ -294,7 +294,7 @@ pub const LLVMIRModule = struct { const entry_block = llvm_func.appendBasicBlock("Entry"); self.builder.positionBuilderAtEnd(entry_block); - const instructions = func.analysis.success.instructions; + const instructions = func.data.body.instructions; for (instructions) |inst| { switch (inst.tag) { .breakpoint => try self.genBreakpoint(inst.castTag(.breakpoint).?), diff --git a/src/zir.zig b/src/zir.zig index 043c54faf0..64b74f24d9 100644 --- a/src/zir.zig +++ b/src/zir.zig @@ -793,7 +793,9 @@ pub const Inst = struct { fn_type: *Inst, body: Module.Body, }, - kw_args: struct {}, + kw_args: struct { + is_inline: bool = false, + }, }; pub const FnType = struct { @@ -1847,83 +1849,258 @@ pub fn emit(allocator: *Allocator, old_module: *IrModule) !Module { /// For debugging purposes, prints a function representation to stderr. pub fn dumpFn(old_module: IrModule, module_fn: *IrModule.Fn) void { const allocator = old_module.gpa; - var ctx: EmitZIR = .{ + var ctx: DumpTzir = .{ .allocator = allocator, - .decls = .{}, .arena = std.heap.ArenaAllocator.init(allocator), .old_module = &old_module, - .next_auto_name = 0, - .names = std.StringArrayHashMap(void).init(allocator), - .primitive_table = std.AutoHashMap(Inst.Primitive.Builtin, *Decl).init(allocator), - .indent = 0, - .block_table = std.AutoHashMap(*ir.Inst.Block, *Inst.Block).init(allocator), - .loop_table = std.AutoHashMap(*ir.Inst.Loop, *Inst.Loop).init(allocator), - .metadata = std.AutoHashMap(*Inst, Module.MetaData).init(allocator), - .body_metadata = std.AutoHashMap(*Module.Body, Module.BodyMetaData).init(allocator), + .module_fn = module_fn, + .indent = 2, + .inst_table = DumpTzir.InstTable.init(allocator), + .partial_inst_table = DumpTzir.InstTable.init(allocator), + .const_table = DumpTzir.InstTable.init(allocator), }; - defer ctx.metadata.deinit(); - defer ctx.body_metadata.deinit(); - defer ctx.block_table.deinit(); - defer ctx.loop_table.deinit(); - defer ctx.decls.deinit(allocator); - defer ctx.names.deinit(); - defer ctx.primitive_table.deinit(); + defer ctx.inst_table.deinit(); + defer ctx.partial_inst_table.deinit(); + defer ctx.const_table.deinit(); defer ctx.arena.deinit(); - const fn_ty = module_fn.owner_decl.typed_value.most_recent.typed_value.ty; - _ = ctx.emitFn(module_fn, 0, fn_ty) catch |err| { - std.debug.print("unable to dump function: {s}\n", .{@errorName(err)}); - return; - }; - var module = Module{ - .decls = ctx.decls.items, - .arena = ctx.arena, - .metadata = ctx.metadata, - .body_metadata = ctx.body_metadata, - }; - - module.dump(); + switch (module_fn.analysis()) { + .queued => std.debug.print("(queued)", .{}), + .in_progress => std.debug.print("(in_progress)", .{}), + .sema_failure => std.debug.print("(sema_failure)", .{}), + .dependency_failure => std.debug.print("(dependency_failure)", .{}), + .success => |body| { + ctx.dump(body, std.io.getStdErr().writer()) catch @panic("failed to dump TZIR"); + }, + } } -/// For debugging purposes, prints a function representation to stderr. -pub fn dumpBlock(old_module: IrModule, module_block: *IrModule.Scope.Block) void { - const allocator = old_module.gpa; - var ctx: EmitZIR = .{ - .allocator = allocator, - .decls = .{}, - .arena = std.heap.ArenaAllocator.init(allocator), - .old_module = &old_module, - .next_auto_name = 0, - .names = std.StringArrayHashMap(void).init(allocator), - .primitive_table = std.AutoHashMap(Inst.Primitive.Builtin, *Decl).init(allocator), - .indent = 0, - .block_table = std.AutoHashMap(*ir.Inst.Block, *Inst.Block).init(allocator), - .loop_table = std.AutoHashMap(*ir.Inst.Loop, *Inst.Loop).init(allocator), - .metadata = std.AutoHashMap(*Inst, Module.MetaData).init(allocator), - .body_metadata = std.AutoHashMap(*Module.Body, Module.BodyMetaData).init(allocator), - }; - defer ctx.metadata.deinit(); - defer ctx.body_metadata.deinit(); - defer ctx.block_table.deinit(); - defer ctx.loop_table.deinit(); - defer ctx.decls.deinit(allocator); - defer ctx.names.deinit(); - defer ctx.primitive_table.deinit(); - defer ctx.arena.deinit(); +const DumpTzir = struct { + allocator: *Allocator, + arena: std.heap.ArenaAllocator, + old_module: *const IrModule, + module_fn: *IrModule.Fn, + indent: usize, + inst_table: InstTable, + partial_inst_table: InstTable, + const_table: InstTable, + next_index: usize = 0, + next_partial_index: usize = 0, + next_const_index: usize = 0, + + const InstTable = std.AutoArrayHashMap(*ir.Inst, usize); + + fn dump(dtz: *DumpTzir, body: ir.Body, writer: std.fs.File.Writer) !void { + // First pass to pre-populate the table so that we can show even invalid references. + // Must iterate the same order we iterate the second time. + // We also look for constants and put them in the const_table. + for (body.instructions) |inst| { + try dtz.inst_table.put(inst, dtz.next_index); + dtz.next_index += 1; + switch (inst.tag) { + .alloc, + .retvoid, + .unreach, + .breakpoint, + .dbg_stmt, + => {}, - _ = ctx.emitBlock(module_block, 0) catch |err| { - std.debug.print("unable to dump function: {}\n", .{err}); - return; - }; - var module = Module{ - .decls = ctx.decls.items, - .arena = ctx.arena, - .metadata = ctx.metadata, - .body_metadata = ctx.body_metadata, - }; + .ref, + .ret, + .bitcast, + .not, + .isnonnull, + .isnull, + .iserr, + .ptrtoint, + .floatcast, + .intcast, + .load, + .unwrap_optional, + .wrap_optional, + => { + const un_op = inst.cast(ir.Inst.UnOp).?; + try dtz.findConst(un_op.operand); + }, - module.dump(); -} + .add, + .sub, + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .store, + .booland, + .boolor, + .bitand, + .bitor, + .xor, + => { + const bin_op = inst.cast(ir.Inst.BinOp).?; + try dtz.findConst(bin_op.lhs); + try dtz.findConst(bin_op.rhs); + }, + + .arg => {}, + + // TODO fill out this debug printing + .assembly, + .block, + .br, + .brvoid, + .call, + .condbr, + .constant, + .loop, + .varptr, + .switchbr, + => {}, + } + } + + std.debug.print("Module.Function(name={s}):\n", .{dtz.module_fn.owner_decl.name}); + + for (dtz.const_table.items()) |entry| { + const constant = entry.key.castTag(.constant).?; + try writer.print(" @{d}: {} = {};\n", .{ + entry.value, constant.base.ty, constant.val, + }); + } + + return dtz.dumpBody(body, writer); + } + + fn dumpBody(dtz: *DumpTzir, body: ir.Body, writer: std.fs.File.Writer) !void { + for (body.instructions) |inst| { + const my_index = dtz.next_partial_index; + try dtz.partial_inst_table.put(inst, my_index); + dtz.next_partial_index += 1; + + try writer.writeByteNTimes(' ', dtz.indent); + try writer.print("%{d}: {} = {s}(", .{ + my_index, inst.ty, @tagName(inst.tag), + }); + switch (inst.tag) { + .alloc, + .retvoid, + .unreach, + .breakpoint, + .dbg_stmt, + => try writer.writeAll(")\n"), + + .ref, + .ret, + .bitcast, + .not, + .isnonnull, + .isnull, + .iserr, + .ptrtoint, + .floatcast, + .intcast, + .load, + .unwrap_optional, + .wrap_optional, + => { + const un_op = inst.cast(ir.Inst.UnOp).?; + if (dtz.partial_inst_table.get(un_op.operand)) |operand_index| { + try writer.print("%{d})\n", .{operand_index}); + } else if (dtz.const_table.get(un_op.operand)) |operand_index| { + try writer.print("@{d})\n", .{operand_index}); + } else if (dtz.inst_table.get(un_op.operand)) |operand_index| { + try writer.print("%{d}) // Instruction does not dominate all uses!\n", .{ + operand_index, + }); + } else { + try writer.writeAll("!BADREF!)\n"); + } + }, + + .add, + .sub, + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .store, + .booland, + .boolor, + .bitand, + .bitor, + .xor, + => { + var lhs_kinky: ?usize = null; + var rhs_kinky: ?usize = null; + + const bin_op = inst.cast(ir.Inst.BinOp).?; + if (dtz.partial_inst_table.get(bin_op.lhs)) |operand_index| { + try writer.print("%{d}, ", .{operand_index}); + } else if (dtz.const_table.get(bin_op.lhs)) |operand_index| { + try writer.print("@{d}, ", .{operand_index}); + } else if (dtz.inst_table.get(bin_op.lhs)) |operand_index| { + lhs_kinky = operand_index; + try writer.print("%{d}, ", .{operand_index}); + } else { + try writer.writeAll("!BADREF!, "); + } + if (dtz.partial_inst_table.get(bin_op.rhs)) |operand_index| { + try writer.print("%{d}", .{operand_index}); + } else if (dtz.const_table.get(bin_op.rhs)) |operand_index| { + try writer.print("@{d}", .{operand_index}); + } else if (dtz.inst_table.get(bin_op.rhs)) |operand_index| { + rhs_kinky = operand_index; + try writer.print("%{d}", .{operand_index}); + } else { + try writer.writeAll("!BADREF!"); + } + if (lhs_kinky != null or rhs_kinky != null) { + try writer.writeAll(") // Instruction does not dominate all uses!"); + if (lhs_kinky) |lhs| { + try writer.print(" %{d}", .{lhs}); + } + if (rhs_kinky) |rhs| { + try writer.print(" %{d}", .{rhs}); + } + try writer.writeAll("\n"); + } else { + try writer.writeAll(")\n"); + } + }, + + .arg => { + const arg = inst.castTag(.arg).?; + try writer.print("{s})\n", .{arg.name}); + }, + + // TODO fill out this debug printing + .assembly, + .block, + .br, + .brvoid, + .call, + .condbr, + .constant, + .loop, + .varptr, + .switchbr, + => { + try writer.writeAll("!TODO!)\n"); + }, + } + } + } + + fn findConst(dtz: *DumpTzir, operand: *ir.Inst) !void { + if (operand.tag == .constant) { + try dtz.const_table.put(operand, dtz.next_const_index); + dtz.next_const_index += 1; + } + } +}; const EmitZIR = struct { allocator: *Allocator, @@ -2105,36 +2282,6 @@ const EmitZIR = struct { return &declref_inst.base; } - fn emitBlock(self: *EmitZIR, module_block: *IrModule.Scope.Block, src: usize) Allocator.Error!*Decl { - var inst_table = std.AutoHashMap(*ir.Inst, *Inst).init(self.allocator); - defer inst_table.deinit(); - - var instructions = std.ArrayList(*Inst).init(self.allocator); - defer instructions.deinit(); - - const body: ir.Body = .{ .instructions = module_block.instructions.items }; - try self.emitBody(body, &inst_table, &instructions); - - const fn_type = try self.emitType(src, Type.initTag(.void)); - - const arena_instrs = try self.arena.allocator.alloc(*Inst, instructions.items.len); - mem.copy(*Inst, arena_instrs, instructions.items); - - const fn_inst = try self.arena.allocator.create(Inst.Fn); - fn_inst.* = .{ - .base = .{ - .src = src, - .tag = Inst.Fn.base_tag, - }, - .positionals = .{ - .fn_type = fn_type.inst, - .body = .{ .instructions = arena_instrs }, - }, - .kw_args = .{}, - }; - return self.emitUnnamedDecl(&fn_inst.base); - } - fn emitFn(self: *EmitZIR, module_fn: *IrModule.Fn, src: usize, ty: Type) Allocator.Error!*Decl { var inst_table = std.AutoHashMap(*ir.Inst, *Inst).init(self.allocator); defer inst_table.deinit(); @@ -2142,7 +2289,7 @@ const EmitZIR = struct { var instructions = std.ArrayList(*Inst).init(self.allocator); defer instructions.deinit(); - switch (module_fn.analysis) { + switch (module_fn.analysis()) { .queued => unreachable, .in_progress => unreachable, .success => |body| { @@ -2224,7 +2371,9 @@ const EmitZIR = struct { .fn_type = fn_type.inst, .body = .{ .instructions = arena_instrs }, }, - .kw_args = .{}, + .kw_args = .{ + .is_inline = module_fn.bits.is_inline, + }, }; return self.emitUnnamedDecl(&fn_inst.base); } diff --git a/src/zir_sema.zig b/src/zir_sema.zig index 96a19f20f4..9365996bb6 100644 --- a/src/zir_sema.zig +++ b/src/zir_sema.zig @@ -577,7 +577,15 @@ fn analyzeInstCompileError(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) In } fn analyzeInstArg(mod: *Module, scope: *Scope, inst: *zir.Inst.Arg) InnerError!*Inst { - const b = try mod.requireRuntimeBlock(scope, inst.base.src); + const b = try mod.requireFunctionBlock(scope, inst.base.src); + switch (b.label) { + .none, .breaking => {}, + .inlining => |*inlining| { + const param_index = inlining.param_index; + inlining.param_index += 1; + return inlining.casted_args[param_index]; + }, + } const fn_ty = b.func.?.owner_decl.typed_value.most_recent.typed_value.ty; const param_index = b.instructions.items.len; const param_count = fn_ty.fnParamLen(); @@ -636,7 +644,7 @@ fn analyzeInstBlockFlat(mod: *Module, scope: *Scope, inst: *zir.Inst.Block, is_c .decl = parent_block.decl, .instructions = .{}, .arena = parent_block.arena, - .label = null, + .label = .none, .is_comptime = parent_block.is_comptime or is_comptime, }; defer child_block.instructions.deinit(mod.gpa); @@ -674,41 +682,56 @@ fn analyzeInstBlock(mod: *Module, scope: *Scope, inst: *zir.Inst.Block, is_compt .decl = parent_block.decl, .instructions = .{}, .arena = parent_block.arena, - // TODO @as here is working around a stage1 miscompilation bug :( - .label = @as(?Scope.Block.Label, Scope.Block.Label{ - .zir_block = inst, - .results = .{}, - .block_inst = block_inst, - }), + .label = Scope.Block.Label{ + .breaking = .{ + .zir_block = inst, + .merges = .{ + .results = .{}, + .block_inst = block_inst, + }, + }, + }, .is_comptime = is_comptime or parent_block.is_comptime, }; - const label = &child_block.label.?; + const merges = &child_block.label.breaking.merges; defer child_block.instructions.deinit(mod.gpa); - defer label.results.deinit(mod.gpa); + defer merges.results.deinit(mod.gpa); try analyzeBody(mod, &child_block.base, inst.positionals.body); + return analyzeBlockBody(mod, scope, &child_block, merges); +} + +fn analyzeBlockBody( + mod: *Module, + scope: *Scope, + child_block: *Scope.Block, + merges: *Scope.Block.Label.Merges, +) InnerError!*Inst { + const parent_block = scope.cast(Scope.Block).?; + // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); assert(child_block.instructions.items[child_block.instructions.items.len - 1].ty.isNoReturn()); - if (label.results.items.len == 0) { - // No need for a block instruction. We can put the new instructions directly into the parent block. + if (merges.results.items.len == 0) { + // No need for a block instruction. We can put the new instructions + // directly into the parent block. const copied_instructions = try parent_block.arena.dupe(*Inst, child_block.instructions.items); try parent_block.instructions.appendSlice(mod.gpa, copied_instructions); return copied_instructions[copied_instructions.len - 1]; } - if (label.results.items.len == 1) { + if (merges.results.items.len == 1) { const last_inst_index = child_block.instructions.items.len - 1; const last_inst = child_block.instructions.items[last_inst_index]; if (last_inst.breakBlock()) |br_block| { - if (br_block == block_inst) { + if (br_block == merges.block_inst) { // No need for a block instruction. We can put the new instructions directly into the parent block. // Here we omit the break instruction. const copied_instructions = try parent_block.arena.dupe(*Inst, child_block.instructions.items[0..last_inst_index]); try parent_block.instructions.appendSlice(mod.gpa, copied_instructions); - return label.results.items[0]; + return merges.results.items[0]; } } } @@ -717,10 +740,10 @@ fn analyzeInstBlock(mod: *Module, scope: *Scope, inst: *zir.Inst.Block, is_compt // Need to set the type and emit the Block instruction. This allows machine code generation // to emit a jump instruction to after the block when it encounters the break. - try parent_block.instructions.append(mod.gpa, &block_inst.base); - block_inst.base.ty = try mod.resolvePeerTypes(scope, label.results.items); - block_inst.body = .{ .instructions = try parent_block.arena.dupe(*Inst, child_block.instructions.items) }; - return &block_inst.base; + try parent_block.instructions.append(mod.gpa, &merges.block_inst.base); + merges.block_inst.base.ty = try mod.resolvePeerTypes(scope, merges.results.items); + merges.block_inst.body = .{ .instructions = try parent_block.arena.dupe(*Inst, child_block.instructions.items) }; + return &merges.block_inst.base; } fn analyzeInstBreakpoint(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!*Inst { @@ -829,14 +852,32 @@ fn analyzeInstCall(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError const ret_type = func.ty.fnReturnType(); const b = try mod.requireFunctionBlock(scope, inst.base.src); - if (b.is_comptime) { - const fn_val = try mod.resolveConstValue(scope, func); - const module_fn = switch (fn_val.tag()) { - .function => fn_val.castTag(.function).?.data, - .extern_fn => return mod.fail(scope, inst.base.src, "comptime call of extern function", .{}), + const is_comptime_call = b.is_comptime or inst.kw_args.modifier == .compile_time; + const is_inline_call = is_comptime_call or inst.kw_args.modifier == .always_inline or blk: { + // This logic will get simplified by + // https://github.com/ziglang/zig/issues/6429 + if (try mod.resolveDefinedValue(scope, func)) |func_val| { + const module_fn = switch (func_val.tag()) { + .function => func_val.castTag(.function).?.data, + else => break :blk false, + }; + break :blk module_fn.bits.is_inline; + } + break :blk false; + }; + if (is_inline_call) { + const func_val = try mod.resolveConstValue(scope, func); + const module_fn = switch (func_val.tag()) { + .function => func_val.castTag(.function).?.data, + .extern_fn => return mod.fail(scope, inst.base.src, "{s} call of extern function", .{ + @as([]const u8, if (is_comptime_call) "comptime" else "inline"), + }), else => unreachable, }; const callee_decl = module_fn.owner_decl; + // TODO: De-duplicate this with the code in Module.zig that generates + // ZIR for the same function and re-use the same ZIR for runtime function + // generation and for inline/comptime calls. const callee_file_scope = callee_decl.getFileScope(); const tree = mod.getAstTree(callee_file_scope) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, @@ -859,23 +900,31 @@ fn analyzeInstCall(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError }; defer gen_scope.instructions.deinit(mod.gpa); - // Add a const instruction for each parameter. + // We need an instruction for each parameter, and they must be first in the body. + try gen_scope.instructions.resize(mod.gpa, fn_proto.params_len); var params_scope = &gen_scope.base; for (fn_proto.params()) |param, i| { const name_token = param.name_token.?; const src = tree.token_locs[name_token].start; const param_name = try mod.identifierTokenString(scope, name_token); - const arg_val = try mod.resolveConstValue(scope, casted_args[i]); - const arg = try astgen.addZIRInstConst(mod, params_scope, src, .{ - .ty = casted_args[i].ty, - .val = arg_val, - }); + const arg = try call_arena.allocator.create(zir.Inst.Arg); + arg.* = .{ + .base = .{ + .tag = .arg, + .src = src, + }, + .positionals = .{ + .name = param_name, + }, + .kw_args = .{}, + }; + gen_scope.instructions.items[i] = &arg.base; const sub_scope = try call_arena.allocator.create(Scope.LocalVal); sub_scope.* = .{ .parent = params_scope, .gen_zir = &gen_scope, .name = param_name, - .inst = arg, + .inst = &arg.base, }; params_scope = &sub_scope.base; } @@ -896,42 +945,52 @@ fn analyzeInstCall(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError zir.dumpZir(mod.gpa, "fn_body_callee", callee_decl.name, gen_scope.instructions.items) catch {}; } - // Analyze the ZIR. - var inner_block: Scope.Block = .{ + // Analyze the ZIR. The same ZIR gets analyzed into a runtime function + // or an inlined call depending on what union tag the `label` field is + // set to in the `Scope.Block`. + // This block instruction will be used to capture the return value from the + // inlined function. + const block_inst = try scope.arena().create(Inst.Block); + block_inst.* = .{ + .base = .{ + .tag = Inst.Block.base_tag, + .ty = ret_type, + .src = inst.base.src, + }, + .body = undefined, + }; + var child_block: Scope.Block = .{ .parent = null, .func = module_fn, - .decl = callee_decl, + // Note that we pass the caller's Decl, not the callee. This causes + // compile errors to be attached (correctly) to the caller's Decl. + .decl = scope.decl().?, .instructions = .{}, - .arena = &call_arena.allocator, - .is_comptime = true, + .arena = scope.arena(), + .label = Scope.Block.Label{ + .inlining = .{ + .param_index = 0, + .casted_args = casted_args, + .merges = .{ + .results = .{}, + .block_inst = block_inst, + }, + }, + }, + .is_comptime = is_comptime_call, }; - defer inner_block.instructions.deinit(mod.gpa); + const merges = &child_block.label.inlining.merges; + + defer child_block.instructions.deinit(mod.gpa); + defer merges.results.deinit(mod.gpa); - // TODO make sure compile errors that happen from this analyzeBody are reported correctly - // and attach to the caller Decl not the callee. - try analyzeBody(mod, &inner_block.base, .{ + // This will have return instructions analyzed as break instructions to + // the block_inst above. + try analyzeBody(mod, &child_block.base, .{ .instructions = gen_scope.instructions.items, }); - if (mod.comp.verbose_ir) { - inner_block.dump(mod.*); - } - - assert(inner_block.instructions.items.len == 1); - const only_inst = inner_block.instructions.items[0]; - switch (only_inst.tag) { - .ret => { - const ret_inst = only_inst.castTag(.ret).?; - const operand = ret_inst.operand; - const callee_arena = scope.arena(); - return mod.constInst(scope, inst.base.src, .{ - .ty = try operand.ty.copy(callee_arena), - .val = try operand.value().?.copy(callee_arena), - }); - }, - .retvoid => return mod.constVoid(scope, inst.base.src), - else => unreachable, - } + return analyzeBlockBody(mod, scope, &child_block, merges); } return mod.addCall(b, inst.base.src, ret_type, func, casted_args); @@ -954,7 +1013,11 @@ fn analyzeInstFn(mod: *Module, scope: *Scope, fn_inst: *zir.Inst.Fn) InnerError! }; const new_func = try scope.arena().create(Module.Fn); new_func.* = .{ - .analysis = .{ .queued = fn_zir }, + .bits = .{ + .state = .queued, + .is_inline = fn_inst.kw_args.is_inline, + }, + .data = .{ .zir = fn_zir }, .owner_decl = scope.decl().?, }; return mod.constInst(scope, fn_inst.base.src, .{ @@ -2020,21 +2083,41 @@ fn analyzeInstUnreachable( fn analyzeInstRet(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const operand = try resolveInst(mod, scope, inst.positionals.operand); const b = try mod.requireFunctionBlock(scope, inst.base.src); - return mod.addUnOp(b, inst.base.src, Type.initTag(.noreturn), .ret, operand); + + switch (b.label) { + .inlining => |*inlining| { + // We are inlining a function call; rewrite the `ret` as a `break`. + try inlining.merges.results.append(mod.gpa, operand); + return mod.addBr(b, inst.base.src, inlining.merges.block_inst, operand); + }, + .none, .breaking => { + return mod.addUnOp(b, inst.base.src, Type.initTag(.noreturn), .ret, operand); + }, + } } fn analyzeInstRetVoid(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!*Inst { const b = try mod.requireFunctionBlock(scope, inst.base.src); - if (b.func) |func| { - // Need to emit a compile error if returning void is not allowed. - const void_inst = try mod.constVoid(scope, inst.base.src); - const fn_ty = func.owner_decl.typed_value.most_recent.typed_value.ty; - const casted_void = try mod.coerce(scope, fn_ty.fnReturnType(), void_inst); - if (casted_void.ty.zigTypeTag() != .Void) { - return mod.addUnOp(b, inst.base.src, Type.initTag(.noreturn), .ret, casted_void); - } + switch (b.label) { + .inlining => |*inlining| { + // We are inlining a function call; rewrite the `retvoid` as a `breakvoid`. + const void_inst = try mod.constVoid(scope, inst.base.src); + try inlining.merges.results.append(mod.gpa, void_inst); + return mod.addBr(b, inst.base.src, inlining.merges.block_inst, void_inst); + }, + .none, .breaking => { + if (b.func) |func| { + // Need to emit a compile error if returning void is not allowed. + const void_inst = try mod.constVoid(scope, inst.base.src); + const fn_ty = func.owner_decl.typed_value.most_recent.typed_value.ty; + const casted_void = try mod.coerce(scope, fn_ty.fnReturnType(), void_inst); + if (casted_void.ty.zigTypeTag() != .Void) { + return mod.addUnOp(b, inst.base.src, Type.initTag(.noreturn), .ret, casted_void); + } + } + return mod.addNoOp(b, inst.base.src, Type.initTag(.noreturn), .retvoid); + }, } - return mod.addNoOp(b, inst.base.src, Type.initTag(.noreturn), .retvoid); } fn floatOpAllowed(tag: zir.Inst.Tag) bool { @@ -2054,12 +2137,16 @@ fn analyzeBreak( ) InnerError!*Inst { var opt_block = scope.cast(Scope.Block); while (opt_block) |block| { - if (block.label) |*label| { - if (label.zir_block == zir_block) { - try label.results.append(mod.gpa, operand); - const b = try mod.requireRuntimeBlock(scope, src); - return mod.addBr(b, src, label.block_inst, operand); - } + switch (block.label) { + .none => {}, + .breaking => |*label| { + if (label.zir_block == zir_block) { + try label.merges.results.append(mod.gpa, operand); + const b = try mod.requireFunctionBlock(scope, src); + return mod.addBr(b, src, label.merges.block_inst, operand); + } + }, + .inlining => unreachable, // Invalid `break` ZIR inside inline function call. } opt_block = block.parent; } else unreachable; diff --git a/test/stage2/zir.zig b/test/stage2/zir.zig index da4038e792..c29e636cd4 100644 --- a/test/stage2/zir.zig +++ b/test/stage2/zir.zig @@ -30,7 +30,7 @@ pub fn addCases(ctx: *TestContext) !void { \\@unnamed$7 = fntype([], @void, cc=C) \\@entry = fn(@unnamed$7, { \\ %0 = returnvoid() ; deaths=0b1000000000000000 - \\}) + \\}, is_inline=0) \\ ); ctx.transformZIR("elemptr, add, cmp, condbr, return, breakpoint", linux_x64, @@ -78,7 +78,7 @@ pub fn addCases(ctx: *TestContext) !void { \\@unnamed$6 = fntype([], @void, cc=C) \\@entry = fn(@unnamed$6, { \\ %0 = returnvoid() ; deaths=0b1000000000000000 - \\}) + \\}, is_inline=0) \\@entry__anon_1 = str("2\x08\x01\n") \\@9 = declref("9__anon_0") \\@9__anon_0 = str("entry") @@ -123,17 +123,17 @@ pub fn addCases(ctx: *TestContext) !void { \\@entry = fn(@unnamed$7, { \\ %0 = call(@a, [], modifier=auto) ; deaths=0b1000000000000001 \\ %1 = returnvoid() ; deaths=0b1000000000000000 - \\}) + \\}, is_inline=0) \\@unnamed$9 = fntype([], @void, cc=C) \\@a = fn(@unnamed$9, { \\ %0 = call(@b, [], modifier=auto) ; deaths=0b1000000000000001 \\ %1 = returnvoid() ; deaths=0b1000000000000000 - \\}) + \\}, is_inline=0) \\@unnamed$11 = fntype([], @void, cc=C) \\@b = fn(@unnamed$11, { \\ %0 = call(@a, [], modifier=auto) ; deaths=0b1000000000000001 \\ %1 = returnvoid() ; deaths=0b1000000000000000 - \\}) + \\}, is_inline=0) \\ ); // Now we introduce a compile error @@ -203,7 +203,7 @@ pub fn addCases(ctx: *TestContext) !void { \\@unnamed$7 = fntype([], @void, cc=C) \\@entry = fn(@unnamed$7, { \\ %0 = returnvoid() ; deaths=0b1000000000000000 - \\}) + \\}, is_inline=0) \\ ); } -- cgit v1.2.3 From 006e7f68056af62ae7713d7ef228841d11874735 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 2 Jan 2021 13:40:23 -0700 Subject: stage2: re-use ZIR for comptime and inline calls Instead of freeing ZIR after semantic analysis, we keep it around so that it can be used for comptime calls, inline calls, and generic function calls. ZIR memory is now managed by the Decl arena. Debug dump() functions are conditionally compiled; only available in Debug builds of the compiler. Add a test for an inline function call. --- src/Compilation.zig | 9 ++-- src/Module.zig | 136 +++++++++++++++------------------------------------ src/codegen.zig | 10 ++-- src/codegen/c.zig | 2 +- src/codegen/wasm.zig | 2 +- src/llvm_backend.zig | 2 +- src/zir.zig | 17 ++++--- src/zir_sema.zig | 99 ++----------------------------------- test/stage2/test.zig | 25 ++++++++++ 9 files changed, 93 insertions(+), 209 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index a6f39a3154..9a06aee561 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1459,15 +1459,16 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor const module = self.bin_file.options.module.?; if (decl.typed_value.most_recent.typed_value.val.castTag(.function)) |payload| { const func = payload.data; - switch (func.bits.state) { + switch (func.state) { .queued => module.analyzeFnBody(decl, func) catch |err| switch (err) { error.AnalysisFail => { - assert(func.bits.state != .in_progress); + assert(func.state != .in_progress); continue; }, error.OutOfMemory => return error.OutOfMemory, }, .in_progress => unreachable, + .inline_only => unreachable, // don't queue work for this .sema_failure, .dependency_failure => continue, .success => {}, } @@ -1476,9 +1477,9 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor var decl_arena = decl.typed_value.most_recent.arena.?.promote(module.gpa); defer decl.typed_value.most_recent.arena.?.* = decl_arena.state; log.debug("analyze liveness of {s}\n", .{decl.name}); - try liveness.analyze(module.gpa, &decl_arena.allocator, func.data.body); + try liveness.analyze(module.gpa, &decl_arena.allocator, func.body); - if (self.verbose_ir) { + if (std.builtin.mode == .Debug and self.verbose_ir) { func.dump(module.*); } } diff --git a/src/Module.zig b/src/Module.zig index db76ecd5db..be6ca0df63 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -286,75 +286,29 @@ pub const Decl = struct { /// Extern functions do not have this data structure; they are represented by /// the `Decl` only, with a `Value` tag of `extern_fn`. pub const Fn = struct { - bits: packed struct { - /// Get and set this field via `analysis` and `setAnalysis`. - state: Analysis.Tag, - /// We carry this state into `Fn` instead of leaving it in the AST so that - /// analysis of function calls can happen even on functions whose AST has - /// been unloaded from memory. - is_inline: bool, - unused_bits: u4 = 0, - }, - /// Get and set this data via `analysis` and `setAnalysis`. - data: union { - none: void, - zir: *ZIR, - body: Body, - }, owner_decl: *Decl, - - pub const Analysis = union(Tag) { - queued: *ZIR, + /// Contains un-analyzed ZIR instructions generated from Zig source AST. + /// Even after we finish analysis, the ZIR is kept in memory, so that + /// comptime and inline function calls can happen. + zir: zir.Module.Body, + /// undefined unless analysis state is `success`. + body: Body, + state: Analysis, + + pub const Analysis = enum { + queued, + /// This function intentionally only has ZIR generated because it is marked + /// inline, which means no runtime version of the function will be generated. + inline_only, in_progress, + /// There will be a corresponding ErrorMsg in Module.failed_decls sema_failure, + /// This Fn might be OK but it depends on another Decl which did not + /// successfully complete semantic analysis. dependency_failure, - success: Body, - - pub const Tag = enum(u3) { - queued, - in_progress, - /// There will be a corresponding ErrorMsg in Module.failed_decls - sema_failure, - /// This Fn might be OK but it depends on another Decl which did not - /// successfully complete semantic analysis. - dependency_failure, - success, - }; + success, }; - /// Contains un-analyzed ZIR instructions generated from Zig source AST. - pub const ZIR = struct { - body: zir.Module.Body, - arena: std.heap.ArenaAllocator.State, - }; - - pub fn analysis(self: Fn) Analysis { - return switch (self.bits.state) { - .queued => .{ .queued = self.data.zir }, - .success => .{ .success = self.data.body }, - .in_progress => .in_progress, - .sema_failure => .sema_failure, - .dependency_failure => .dependency_failure, - }; - } - - pub fn setAnalysis(self: *Fn, anal: Analysis) void { - switch (anal) { - .queued => |zir_ptr| { - self.bits.state = .queued; - self.data = .{ .zir = zir_ptr }; - }, - .success => |body| { - self.bits.state = .success; - self.data = .{ .body = body }; - }, - .in_progress, .sema_failure, .dependency_failure => { - self.bits.state = anal; - self.data = .{ .none = {} }; - }, - } - } - /// For debugging purposes. pub fn dump(self: *Fn, mod: Module) void { zir.dumpFn(mod, self); @@ -1124,7 +1078,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { .param_types = param_types, }, .{}); - if (self.comp.verbose_ir) { + if (std.builtin.mode == .Debug and self.comp.verbose_ir) { zir.dumpZir(self.gpa, "fn_type", decl.name, fn_type_scope.instructions.items) catch {}; } @@ -1175,14 +1129,11 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { const new_func = try decl_arena.allocator.create(Fn); const fn_payload = try decl_arena.allocator.create(Value.Payload.Function); - const fn_zir = blk: { - // This scope's arena memory is discarded after the ZIR generation - // pass completes, and semantic analysis of it completes. - var gen_scope_arena = std.heap.ArenaAllocator.init(self.gpa); - errdefer gen_scope_arena.deinit(); + const fn_zir: zir.Module.Body = blk: { + // We put the ZIR inside the Decl arena. var gen_scope: Scope.GenZIR = .{ .decl = decl, - .arena = &gen_scope_arena.allocator, + .arena = &decl_arena.allocator, .parent = decl.scope, }; defer gen_scope.instructions.deinit(self.gpa); @@ -1194,7 +1145,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { const name_token = param.name_token.?; const src = tree.token_locs[name_token].start; const param_name = try self.identifierTokenString(&gen_scope.base, name_token); - const arg = try gen_scope_arena.allocator.create(zir.Inst.Arg); + const arg = try decl_arena.allocator.create(zir.Inst.Arg); arg.* = .{ .base = .{ .tag = .arg, @@ -1206,7 +1157,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { .kw_args = .{}, }; gen_scope.instructions.items[i] = &arg.base; - const sub_scope = try gen_scope_arena.allocator.create(Scope.LocalVal); + const sub_scope = try decl_arena.allocator.create(Scope.LocalVal); sub_scope.* = .{ .parent = params_scope, .gen_zir = &gen_scope, @@ -1227,18 +1178,13 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { _ = try astgen.addZIRNoOp(self, &gen_scope.base, src, .returnvoid); } - if (self.comp.verbose_ir) { + if (std.builtin.mode == .Debug and self.comp.verbose_ir) { zir.dumpZir(self.gpa, "fn_body", decl.name, gen_scope.instructions.items) catch {}; } - const fn_zir = try gen_scope_arena.allocator.create(Fn.ZIR); - fn_zir.* = .{ - .body = .{ - .instructions = try gen_scope.arena.dupe(*zir.Inst, gen_scope.instructions.items), - }, - .arena = gen_scope_arena.state, + break :blk .{ + .instructions = try gen_scope.arena.dupe(*zir.Inst, gen_scope.instructions.items), }; - break :blk fn_zir; }; const is_inline = blk: { @@ -1249,13 +1195,12 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { } break :blk false; }; + const anal_state = ([2]Fn.Analysis{ .queued, .inline_only })[@boolToInt(is_inline)]; new_func.* = .{ - .bits = .{ - .state = .queued, - .is_inline = is_inline, - }, - .data = .{ .zir = fn_zir }, + .state = anal_state, + .zir = fn_zir, + .body = undefined, .owner_decl = decl, }; fn_payload.* = .{ @@ -1272,7 +1217,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { type_changed = !tvm.typed_value.ty.eql(fn_type); if (tvm.typed_value.val.castTag(.function)) |payload| { const prev_func = payload.data; - prev_is_inline = prev_func.bits.is_inline; + prev_is_inline = prev_func.state == .inline_only; } tvm.deinit(self.gpa); @@ -1391,7 +1336,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { const src = tree.token_locs[init_node.firstToken()].start; const init_inst = try astgen.expr(self, &gen_scope.base, init_result_loc, init_node); - if (self.comp.verbose_ir) { + if (std.builtin.mode == .Debug and self.comp.verbose_ir) { zir.dumpZir(self.gpa, "var_init", decl.name, gen_scope.instructions.items) catch {}; } @@ -1435,7 +1380,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { .val = Value.initTag(.type_type), }); const var_type = try astgen.expr(self, &type_scope.base, .{ .ty = type_type }, type_node); - if (self.comp.verbose_ir) { + if (std.builtin.mode == .Debug and self.comp.verbose_ir) { zir.dumpZir(self.gpa, "var_type", decl.name, type_scope.instructions.items) catch {}; } @@ -1511,7 +1456,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { defer gen_scope.instructions.deinit(self.gpa); _ = try astgen.comptimeExpr(self, &gen_scope.base, .none, comptime_decl.expr); - if (self.comp.verbose_ir) { + if (std.builtin.mode == .Debug and self.comp.verbose_ir) { zir.dumpZir(self.gpa, "comptime_block", decl.name, gen_scope.instructions.items) catch {}; } @@ -1902,15 +1847,14 @@ pub fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void { }; defer inner_block.instructions.deinit(self.gpa); - const fn_zir = func.data.zir; - defer fn_zir.arena.promote(self.gpa).deinit(); - func.setAnalysis(.in_progress); + func.state = .in_progress; log.debug("set {s} to in_progress\n", .{decl.name}); - try zir_sema.analyzeBody(self, &inner_block.base, fn_zir.body); + try zir_sema.analyzeBody(self, &inner_block.base, func.zir); const instructions = try arena.allocator.dupe(*Inst, inner_block.instructions.items); - func.setAnalysis(.{ .success = .{ .instructions = instructions } }); + func.state = .success; + func.body = .{ .instructions = instructions }; log.debug("set {s} to success\n", .{decl.name}); } @@ -2407,7 +2351,7 @@ pub fn analyzeDeclRef(self: *Module, scope: *Scope, src: usize, decl: *Decl) Inn self.ensureDeclAnalyzed(decl) catch |err| { if (scope.cast(Scope.Block)) |block| { if (block.func) |func| { - func.setAnalysis(.dependency_failure); + func.state = .dependency_failure; } else { block.decl.analysis = .dependency_failure; } @@ -3107,7 +3051,7 @@ fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *Com .block => { const block = scope.cast(Scope.Block).?; if (block.func) |func| { - func.setAnalysis(.sema_failure); + func.state = .sema_failure; } else { block.decl.analysis = .sema_failure; block.decl.generation = self.generation; diff --git a/src/codegen.zig b/src/codegen.zig index 588c3dec4c..58be73a31c 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -532,7 +532,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.code.items.len += 4; try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.data.body); + try self.genBody(self.mod_fn.body); const stack_end = self.max_end_stack; if (stack_end > math.maxInt(i32)) @@ -576,7 +576,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }); } else { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.data.body); + try self.genBody(self.mod_fn.body); try self.dbgSetEpilogueBegin(); } }, @@ -593,7 +593,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.data.body); + try self.genBody(self.mod_fn.body); // Backpatch stack offset const stack_end = self.max_end_stack; @@ -638,13 +638,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.pop(.al, .{ .fp, .pc }).toU32()); } else { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.data.body); + try self.genBody(self.mod_fn.body); try self.dbgSetEpilogueBegin(); } }, else => { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.data.body); + try self.genBody(self.mod_fn.body); try self.dbgSetEpilogueBegin(); }, } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 712d663af0..1a89e22d48 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -275,7 +275,7 @@ pub fn generate(file: *C, module: *Module, decl: *Decl) !void { try writer.writeAll(" {"); const func: *Module.Fn = func_payload.data; - const instructions = func.data.body.instructions; + const instructions = func.body.instructions; if (instructions.len > 0) { try writer.writeAll("\n"); for (instructions) |inst| { diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 1eb4f5bc29..036243dcca 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -63,7 +63,7 @@ pub fn genCode(buf: *ArrayList(u8), decl: *Decl) !void { // TODO: check for and handle death of instructions const tv = decl.typed_value.most_recent.typed_value; const mod_fn = tv.val.castTag(.function).?.data; - for (mod_fn.data.body.instructions) |inst| try genInst(buf, decl, inst); + for (mod_fn.body.instructions) |inst| try genInst(buf, decl, inst); // Write 'end' opcode try writer.writeByte(0x0B); diff --git a/src/llvm_backend.zig b/src/llvm_backend.zig index 5814aa7e7e..97406797b6 100644 --- a/src/llvm_backend.zig +++ b/src/llvm_backend.zig @@ -294,7 +294,7 @@ pub const LLVMIRModule = struct { const entry_block = llvm_func.appendBasicBlock("Entry"); self.builder.positionBuilderAtEnd(entry_block); - const instructions = func.data.body.instructions; + const instructions = func.body.instructions; for (instructions) |inst| { switch (inst.tag) { .breakpoint => try self.genBreakpoint(inst.castTag(.breakpoint).?), diff --git a/src/zir.zig b/src/zir.zig index 64b74f24d9..56ddee919c 100644 --- a/src/zir.zig +++ b/src/zir.zig @@ -1864,13 +1864,15 @@ pub fn dumpFn(old_module: IrModule, module_fn: *IrModule.Fn) void { defer ctx.const_table.deinit(); defer ctx.arena.deinit(); - switch (module_fn.analysis()) { + switch (module_fn.state) { .queued => std.debug.print("(queued)", .{}), + .inline_only => std.debug.print("(inline_only)", .{}), .in_progress => std.debug.print("(in_progress)", .{}), .sema_failure => std.debug.print("(sema_failure)", .{}), .dependency_failure => std.debug.print("(dependency_failure)", .{}), - .success => |body| { - ctx.dump(body, std.io.getStdErr().writer()) catch @panic("failed to dump TZIR"); + .success => { + const writer = std.io.getStdErr().writer(); + ctx.dump(module_fn.body, writer) catch @panic("failed to dump TZIR"); }, } } @@ -2289,11 +2291,12 @@ const EmitZIR = struct { var instructions = std.ArrayList(*Inst).init(self.allocator); defer instructions.deinit(); - switch (module_fn.analysis()) { + switch (module_fn.state) { .queued => unreachable, .in_progress => unreachable, - .success => |body| { - try self.emitBody(body, &inst_table, &instructions); + .inline_only => unreachable, + .success => { + try self.emitBody(module_fn.body, &inst_table, &instructions); }, .sema_failure => { const err_msg = self.old_module.failed_decls.get(module_fn.owner_decl).?; @@ -2372,7 +2375,7 @@ const EmitZIR = struct { .body = .{ .instructions = arena_instrs }, }, .kw_args = .{ - .is_inline = module_fn.bits.is_inline, + .is_inline = module_fn.state == .inline_only, }, }; return self.emitUnnamedDecl(&fn_inst.base); diff --git a/src/zir_sema.zig b/src/zir_sema.zig index 9365996bb6..e8d995dd5e 100644 --- a/src/zir_sema.zig +++ b/src/zir_sema.zig @@ -25,8 +25,6 @@ const trace = @import("tracy.zig").trace; const Scope = Module.Scope; const InnerError = Module.InnerError; const Decl = Module.Decl; -const astgen = @import("astgen.zig"); -const ast = std.zig.ast; pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Inst { switch (old_inst.tag) { @@ -861,7 +859,7 @@ fn analyzeInstCall(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError .function => func_val.castTag(.function).?.data, else => break :blk false, }; - break :blk module_fn.bits.is_inline; + break :blk module_fn.state == .inline_only; } break :blk false; }; @@ -874,76 +872,6 @@ fn analyzeInstCall(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError }), else => unreachable, }; - const callee_decl = module_fn.owner_decl; - // TODO: De-duplicate this with the code in Module.zig that generates - // ZIR for the same function and re-use the same ZIR for runtime function - // generation and for inline/comptime calls. - const callee_file_scope = callee_decl.getFileScope(); - const tree = mod.getAstTree(callee_file_scope) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => return error.AnalysisFail, - // TODO: make sure this gets retried and not cached - else => return mod.fail(scope, inst.base.src, "failed to load {s}: {s}", .{ - callee_file_scope.sub_file_path, @errorName(err), - }), - }; - const ast_node = tree.root_node.decls()[callee_decl.src_index]; - const fn_proto = ast_node.castTag(.FnProto).?; - - var call_arena = std.heap.ArenaAllocator.init(mod.gpa); - defer call_arena.deinit(); - - var gen_scope: Scope.GenZIR = .{ - .decl = callee_decl, - .arena = &call_arena.allocator, - .parent = callee_decl.scope, - }; - defer gen_scope.instructions.deinit(mod.gpa); - - // We need an instruction for each parameter, and they must be first in the body. - try gen_scope.instructions.resize(mod.gpa, fn_proto.params_len); - var params_scope = &gen_scope.base; - for (fn_proto.params()) |param, i| { - const name_token = param.name_token.?; - const src = tree.token_locs[name_token].start; - const param_name = try mod.identifierTokenString(scope, name_token); - const arg = try call_arena.allocator.create(zir.Inst.Arg); - arg.* = .{ - .base = .{ - .tag = .arg, - .src = src, - }, - .positionals = .{ - .name = param_name, - }, - .kw_args = .{}, - }; - gen_scope.instructions.items[i] = &arg.base; - const sub_scope = try call_arena.allocator.create(Scope.LocalVal); - sub_scope.* = .{ - .parent = params_scope, - .gen_zir = &gen_scope, - .name = param_name, - .inst = &arg.base, - }; - params_scope = &sub_scope.base; - } - - const body_node = fn_proto.getBodyNode().?; // We handle extern functions above. - const body_block = body_node.cast(ast.Node.Block).?; - - try astgen.blockExpr(mod, params_scope, body_block); - - if (gen_scope.instructions.items.len == 0 or - !gen_scope.instructions.items[gen_scope.instructions.items.len - 1].tag.isNoReturn()) - { - const src = tree.token_locs[body_block.rbrace].start; - _ = try astgen.addZIRNoOp(mod, &gen_scope.base, src, .returnvoid); - } - - if (mod.comp.verbose_ir) { - zir.dumpZir(mod.gpa, "fn_body_callee", callee_decl.name, gen_scope.instructions.items) catch {}; - } // Analyze the ZIR. The same ZIR gets analyzed into a runtime function // or an inlined call depending on what union tag the `label` field is @@ -986,9 +914,7 @@ fn analyzeInstCall(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError // This will have return instructions analyzed as break instructions to // the block_inst above. - try analyzeBody(mod, &child_block.base, .{ - .instructions = gen_scope.instructions.items, - }); + try analyzeBody(mod, &child_block.base, module_fn.zir); return analyzeBlockBody(mod, scope, &child_block, merges); } @@ -998,26 +924,11 @@ fn analyzeInstCall(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError fn analyzeInstFn(mod: *Module, scope: *Scope, fn_inst: *zir.Inst.Fn) InnerError!*Inst { const fn_type = try resolveType(mod, scope, fn_inst.positionals.fn_type); - const fn_zir = blk: { - var fn_arena = std.heap.ArenaAllocator.init(mod.gpa); - errdefer fn_arena.deinit(); - - const fn_zir = try scope.arena().create(Module.Fn.ZIR); - fn_zir.* = .{ - .body = .{ - .instructions = fn_inst.positionals.body.instructions, - }, - .arena = fn_arena.state, - }; - break :blk fn_zir; - }; const new_func = try scope.arena().create(Module.Fn); new_func.* = .{ - .bits = .{ - .state = .queued, - .is_inline = fn_inst.kw_args.is_inline, - }, - .data = .{ .zir = fn_zir }, + .state = if (fn_inst.kw_args.is_inline) .inline_only else .queued, + .zir = fn_inst.positionals.body, + .body = undefined, .owner_decl = scope.decl().?, }; return mod.constInst(scope, fn_inst.base.src, .{ diff --git a/test/stage2/test.zig b/test/stage2/test.zig index 18c54b367e..79f5c3a73e 100644 --- a/test/stage2/test.zig +++ b/test/stage2/test.zig @@ -342,6 +342,7 @@ pub fn addCases(ctx: *TestContext) !void { , "", ); + // comptime function call case.addCompareOutput( \\export fn _start() noreturn { \\ exit(); @@ -365,6 +366,30 @@ pub fn addCases(ctx: *TestContext) !void { , "", ); + // Inline function call + case.addCompareOutput( + \\export fn _start() noreturn { + \\ var x: usize = 3; + \\ const y = add(1, 2, x); + \\ exit(y - 6); + \\} + \\ + \\inline fn add(a: usize, b: usize, c: usize) usize { + \\ return a + b + c; + \\} + \\ + \\fn exit(code: usize) noreturn { + \\ asm volatile ("syscall" + \\ : + \\ : [number] "{rax}" (231), + \\ [arg1] "{rdi}" (code) + \\ : "rcx", "r11", "memory" + \\ ); + \\ unreachable; + \\} + , + "", + ); } { -- cgit v1.2.3 From 2a410baa2b82528cf2bf8bcbfe0ce030ce972cec Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sun, 3 Jan 2021 23:01:22 +0100 Subject: stage2: implement basic function params aarch64 Implement missing `.register` prong for `aarch64` `genSetReg`. --- src/codegen.zig | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 66 insertions(+), 1 deletion(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 58be73a31c..cd6ea15737 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -2837,7 +2837,19 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movk(reg, @intCast(u16, x >> 48), 48).toU32()); } }, - .register => return self.fail(src, "TODO implement genSetReg for aarch64 {}", .{mcv}), + .register => |src_reg| { + // If the registers are the same, nothing to do. + if (src_reg.id() == reg.id()) + return; + + // mov reg, src_reg + writeInt(u32, try self.code.addManyAsArray(4), Instruction.orr( + reg, + .xzr, + src_reg, + Instruction.Shift.none, + ).toU32()); + }, .memory => |addr| { if (self.bin_file.options.pie) { // For MachO, the binary, with the exception of object files, has to be a PIE. @@ -3475,6 +3487,59 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { else => return self.fail(src, "TODO implement function parameters for {} on arm", .{cc}), } }, + .aarch64 => { + switch (cc) { + .Naked => { + assert(result.args.len == 0); + result.return_value = .{ .unreach = {} }; + result.stack_byte_count = 0; + result.stack_align = 1; + return result; + }, + .Unspecified, .C => { + // ARM64 Procedure Call Standard + var ncrn: usize = 0; // Next Core Register Number + var nsaa: u32 = 0; // Next stacked argument address + + for (param_types) |ty, i| { + // We round up NCRN only for non-Apple platforms which allow the 16-byte aligned + // values to spread across odd-numbered registers. + if (ty.abiAlignment(self.target.*) == 16 and !self.target.isDarwin()) { + // Round up NCRN to the next even number + ncrn += ncrn % 2; + } + + const param_size = @intCast(u32, ty.abiSize(self.target.*)); + if (std.math.divCeil(u32, param_size, 8) catch unreachable <= 8 - ncrn) { + if (param_size <= 8) { + result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] }; + ncrn += 1; + } else { + return self.fail(src, "TODO MCValues with multiple registers", .{}); + } + } else if (ncrn < 8 and nsaa == 0) { + return self.fail(src, "TODO MCValues split between registers and stack", .{}); + } else { + ncrn = 8; + // TODO Apple allows the arguments on the stack to be non-8-byte aligned provided + // that the entire stack space consumed by the arguments is 8-byte aligned. + if (ty.abiAlignment(self.target.*) == 8) { + if (nsaa % 8 != 0) { + nsaa += 8 - (nsaa % 8); + } + } + + result.args[i] = .{ .stack_offset = nsaa }; + nsaa += param_size; + } + } + + result.stack_byte_count = nsaa; + result.stack_align = 16; + }, + else => return self.fail(src, "TODO implement function parameters for {} on aarch64", .{cc}), + } + }, else => if (param_types.len != 0) return self.fail(src, "TODO implement codegen parameters for {}", .{self.target.cpu.arch}), } -- cgit v1.2.3 From aa0906e9aaaf36bc928b5502bdb34e7a0409b2c0 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sat, 2 Jan 2021 18:53:11 +0100 Subject: stage2 x86_64: fix bug in Function.gen Previously, the x86_64 backend would remove code for exitlude relocs if the jump amount were 0. This causes issues as earlier jumps rely on the jump being present at the same address. --- src/codegen.zig | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index cd6ea15737..c2537a1ca0 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -543,13 +543,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (self.code.items.len >= math.maxInt(i32)) { return self.fail(self.src, "unable to perform relocation: jump too far", .{}); } - for (self.exitlude_jump_relocs.items) |jmp_reloc| { + if (self.exitlude_jump_relocs.items.len == 1) { + self.code.items.len -= 5; + } else for (self.exitlude_jump_relocs.items) |jmp_reloc| { const amt = self.code.items.len - (jmp_reloc + 4); - // If it wouldn't jump at all, elide it. - if (amt == 0) { - self.code.items.len -= 5; - continue; - } const s32_amt = @intCast(i32, amt); mem.writeIntLittle(i32, self.code.items[jmp_reloc..][0..4], s32_amt); } -- cgit v1.2.3 From d92ea56884c4cdc3a0cff8b6ed1e31f959ee0fa8 Mon Sep 17 00:00:00 2001 From: Alex Cameron Date: Sat, 26 Dec 2020 15:30:19 +1100 Subject: std: Support equivalent ArrayList operations in ArrayHashMap --- lib/std/array_hash_map.zig | 365 +++++++++++++++++++++++++++++++++++++++++---- src/Module.zig | 16 +- src/codegen.zig | 2 +- 3 files changed, 341 insertions(+), 42 deletions(-) (limited to 'src/codegen.zig') diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index ddc15666bb..b6478d4094 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -99,6 +99,16 @@ pub fn ArrayHashMap( }; } + /// `ArrayHashMap` takes ownership of the passed in array list. The array list must have + /// been allocated with `allocator`. + /// Deinitialize with `deinit`. + pub fn fromOwnedArrayList(allocator: *Allocator, entries: std.ArrayListUnmanaged(Entry)) !Self { + return Self{ + .unmanaged = try Unmanaged.fromOwnedArrayList(allocator, entries), + .allocator = allocator, + }; + } + pub fn deinit(self: *Self) void { self.unmanaged.deinit(self.allocator); self.* = undefined; @@ -214,9 +224,19 @@ pub fn ArrayHashMap( } /// If there is an `Entry` with a matching key, it is deleted from - /// the hash map, and then returned from this function. - pub fn remove(self: *Self, key: K) ?Entry { - return self.unmanaged.remove(key); + /// the hash map, and then returned from this function. The entry is + /// removed from the underlying array by swapping it with the last + /// element. + pub fn swapRemove(self: *Self, key: K) ?Entry { + return self.unmanaged.swapRemove(key); + } + + /// If there is an `Entry` with a matching key, it is deleted from + /// the hash map, and then returned from this function. The entry is + /// removed from the underlying array by shifting all elements forward + /// thereby maintaining the current ordering. + pub fn orderedRemove(self: *Self, key: K) ?Entry { + return self.unmanaged.orderedRemove(key); } /// Asserts there is an `Entry` with matching key, deletes it from the hash map, @@ -233,6 +253,29 @@ pub fn ArrayHashMap( var other = try self.unmanaged.clone(self.allocator); return other.promote(self.allocator); } + + /// Rebuilds the key indexes. If the underlying entries has been modified directly, users + /// can call `reIndex` to update the indexes to account for these new entries. + pub fn reIndex(self: *Self) !void { + return self.unmanaged.reIndex(self.allocator); + } + + /// Shrinks the underlying `Entry` array to `new_len` elements and discards any associated + /// index entries. Keeps capacity the same. + pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { + return self.unmanaged.shrinkRetainingCapacity(new_len); + } + + /// Shrinks the underlying `Entry` array to `new_len` elements and discards any associated + /// index entries. Reduces allocated capacity. + pub fn shrinkAndFree(self: *Self, new_len: usize) void { + return self.unmanaged.shrinkAndFree(self.allocator, new_len); + } + + /// Removes the last inserted `Entry` in the hash map and returns it. + pub fn pop(self: *Self) Entry { + return self.unmanaged.pop(); + } }; } @@ -286,6 +329,7 @@ pub fn ArrayHashMapUnmanaged( pub const GetOrPutResult = struct { entry: *Entry, found_existing: bool, + index: usize, }; pub const Managed = ArrayHashMap(K, V, hash, eql, store_hash); @@ -294,6 +338,12 @@ pub fn ArrayHashMapUnmanaged( const linear_scan_max = 8; + const RemovalType = enum { + swap, + ordered, + index_only, + }; + pub fn promote(self: Self, allocator: *Allocator) Managed { return .{ .unmanaged = self, @@ -301,6 +351,15 @@ pub fn ArrayHashMapUnmanaged( }; } + /// `ArrayHashMapUnmanaged` takes ownership of the passed in array list. The array list must + /// have been allocated with `allocator`. + /// Deinitialize with `deinit`. + pub fn fromOwnedArrayList(allocator: *Allocator, entries: std.ArrayListUnmanaged(Entry)) !Self { + var array_hash_map = Self{ .entries = entries }; + try array_hash_map.reIndex(allocator); + return array_hash_map; + } + pub fn deinit(self: *Self, allocator: *Allocator) void { self.entries.deinit(allocator); if (self.index_header) |header| { @@ -343,9 +402,11 @@ pub fn ArrayHashMapUnmanaged( pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult { self.ensureCapacity(allocator, self.entries.items.len + 1) catch |err| { // "If key exists this function cannot fail." + const index = self.getIndex(key) orelse return err; return GetOrPutResult{ - .entry = self.getEntry(key) orelse return err, + .entry = &self.entries.items[index], .found_existing = true, + .index = index, }; }; return self.getOrPutAssumeCapacity(key); @@ -362,11 +423,12 @@ pub fn ArrayHashMapUnmanaged( const header = self.index_header orelse { // Linear scan. const h = if (store_hash) hash(key) else {}; - for (self.entries.items) |*item| { + for (self.entries.items) |*item, i| { if (item.hash == h and eql(key, item.key)) { return GetOrPutResult{ .entry = item, .found_existing = true, + .index = i, }; } } @@ -379,6 +441,7 @@ pub fn ArrayHashMapUnmanaged( return GetOrPutResult{ .entry = new_entry, .found_existing = false, + .index = self.entries.items.len - 1, }; }; @@ -524,30 +587,25 @@ pub fn ArrayHashMapUnmanaged( } /// If there is an `Entry` with a matching key, it is deleted from - /// the hash map, and then returned from this function. - pub fn remove(self: *Self, key: K) ?Entry { - const header = self.index_header orelse { - // Linear scan. - const h = if (store_hash) hash(key) else {}; - for (self.entries.items) |item, i| { - if (item.hash == h and eql(key, item.key)) { - return self.entries.swapRemove(i); - } - } - return null; - }; - switch (header.capacityIndexType()) { - .u8 => return self.removeInternal(key, header, u8), - .u16 => return self.removeInternal(key, header, u16), - .u32 => return self.removeInternal(key, header, u32), - .usize => return self.removeInternal(key, header, usize), - } + /// the hash map, and then returned from this function. The entry is + /// removed from the underlying array by swapping it with the last + /// element. + pub fn swapRemove(self: *Self, key: K) ?Entry { + return self.removeInternal(key, .swap); + } + + /// If there is an `Entry` with a matching key, it is deleted from + /// the hash map, and then returned from this function. The entry is + /// removed from the underlying array by shifting all elements forward + /// thereby maintaining the current ordering. + pub fn orderedRemove(self: *Self, key: K) ?Entry { + return self.removeInternal(key, .ordered); } /// Asserts there is an `Entry` with matching key, deletes it from the hash map, /// and discards it. pub fn removeAssertDiscard(self: *Self, key: K) void { - assert(self.remove(key) != null); + assert(self.swapRemove(key) != null); } pub fn items(self: Self) []Entry { @@ -566,9 +624,85 @@ pub fn ArrayHashMapUnmanaged( return other; } - fn removeInternal(self: *Self, key: K, header: *IndexHeader, comptime I: type) ?Entry { + /// Rebuilds the key indexes. If the underlying entries has been modified directly, users + /// can call `reIndex` to update the indexes to account for these new entries. + pub fn reIndex(self: *Self, allocator: *Allocator) !void { + if (self.entries.capacity <= linear_scan_max) return; + // We're going to rebuild the index header and replace the existing one (if any). The + // indexes should sized such that they will be at most 60% full. + const needed_len = self.entries.capacity * 5 / 3; + const new_indexes_len = math.ceilPowerOfTwo(usize, needed_len) catch unreachable; + const new_header = try IndexHeader.alloc(allocator, new_indexes_len); + self.insertAllEntriesIntoNewHeader(new_header); + if (self.index_header) |header| + header.free(allocator); + self.index_header = new_header; + } + + /// Shrinks the underlying `Entry` array to `new_len` elements and discards any associated + /// index entries. Keeps capacity the same. + pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { + // Remove index entries from the new length onwards. + // Explicitly choose to ONLY remove index entries and not the underlying array list + // entries as we're going to remove them in the subsequent shrink call. + var i: usize = new_len; + while (i < self.entries.items.len) : (i += 1) + _ = self.removeWithHash(self.entries.items[i].key, self.entries.items[i].hash, .index_only); + self.entries.shrinkRetainingCapacity(new_len); + } + + /// Shrinks the underlying `Entry` array to `new_len` elements and discards any associated + /// index entries. Reduces allocated capacity. + pub fn shrinkAndFree(self: *Self, allocator: *Allocator, new_len: usize) void { + // Remove index entries from the new length onwards. + // Explicitly choose to ONLY remove index entries and not the underlying array list + // entries as we're going to remove them in the subsequent shrink call. + var i: usize = new_len; + while (i < self.entries.items.len) : (i += 1) + _ = self.removeWithHash(self.entries.items[i].key, self.entries.items[i].hash, .index_only); + self.entries.shrinkAndFree(allocator, new_len); + } + + /// Removes the last inserted `Entry` in the hash map and returns it. + pub fn pop(self: *Self) Entry { + const top = self.entries.pop(); + _ = self.removeWithHash(top.key, top.hash, .index_only); + return top; + } + + fn removeInternal(self: *Self, key: K, comptime removal_type: RemovalType) ?Entry { + const key_hash = if (store_hash) hash(key) else {}; + return self.removeWithHash(key, key_hash, removal_type); + } + + fn removeWithHash(self: *Self, key: K, key_hash: Hash, comptime removal_type: RemovalType) ?Entry { + const header = self.index_header orelse { + // If we're only removing index entries and we have no index header, there's no need + // to continue. + if (removal_type == .index_only) return null; + // Linear scan. + for (self.entries.items) |item, i| { + if (item.hash == key_hash and eql(key, item.key)) { + switch (removal_type) { + .swap => return self.entries.swapRemove(i), + .ordered => return self.entries.orderedRemove(i), + .index_only => unreachable, + } + } + } + return null; + }; + switch (header.capacityIndexType()) { + .u8 => return self.removeWithIndex(key, key_hash, header, u8, removal_type), + .u16 => return self.removeWithIndex(key, key_hash, header, u16, removal_type), + .u32 => return self.removeWithIndex(key, key_hash, header, u32, removal_type), + .usize => return self.removeWithIndex(key, key_hash, header, usize, removal_type), + } + } + + fn removeWithIndex(self: *Self, key: K, key_hash: Hash, header: *IndexHeader, comptime I: type, comptime removal_type: RemovalType) ?Entry { const indexes = header.indexes(I); - const h = hash(key); + const h = if (store_hash) key_hash else hash(key); const start_index = header.constrainIndex(h); var roll_over: usize = 0; while (roll_over <= header.max_distance_from_start_index) : (roll_over += 1) { @@ -583,11 +717,26 @@ pub fn ArrayHashMapUnmanaged( if (!hash_match or !eql(key, entry.key)) continue; - const removed_entry = self.entries.swapRemove(index.entry_index); - if (self.entries.items.len > 0 and self.entries.items.len != index.entry_index) { - // Because of the swap remove, now we need to update the index that was - // pointing to the last entry and is now pointing to this removed item slot. - self.updateEntryIndex(header, self.entries.items.len, index.entry_index, I, indexes); + var removed_entry: ?Entry = undefined; + switch (removal_type) { + .swap => { + removed_entry = self.entries.swapRemove(index.entry_index); + if (self.entries.items.len > 0 and self.entries.items.len != index.entry_index) { + // Because of the swap remove, now we need to update the index that was + // pointing to the last entry and is now pointing to this removed item slot. + self.updateEntryIndex(header, self.entries.items.len, index.entry_index, I, indexes); + } + }, + .ordered => { + removed_entry = self.entries.orderedRemove(index.entry_index); + var i: usize = index.entry_index; + while (i < self.entries.items.len) : (i += 1) { + // Because of the ordered remove, everything from the entry index onwards has + // been shifted forward so we'll need to update the index entries. + self.updateEntryIndex(header, i + 1, i, I, indexes); + } + }, + .index_only => removed_entry = null, } // Now we have to shift over the following indexes. @@ -658,6 +807,7 @@ pub fn ArrayHashMapUnmanaged( return .{ .found_existing = false, .entry = new_entry, + .index = self.entries.items.len - 1, }; } @@ -669,6 +819,7 @@ pub fn ArrayHashMapUnmanaged( return .{ .found_existing = true, .entry = entry, + .index = index.entry_index, }; } if (index.distance_from_start_index < distance_from_start_index) { @@ -710,6 +861,7 @@ pub fn ArrayHashMapUnmanaged( return .{ .found_existing = false, .entry = new_entry, + .index = self.entries.items.len - 1, }; } if (next_index.distance_from_start_index < distance_from_start_index) { @@ -901,11 +1053,13 @@ test "basic hash map usage" { const gop1 = try map.getOrPut(5); testing.expect(gop1.found_existing == true); testing.expect(gop1.entry.value == 55); + testing.expect(gop1.index == 4); gop1.entry.value = 77; testing.expect(map.getEntry(5).?.value == 77); const gop2 = try map.getOrPut(99); testing.expect(gop2.found_existing == false); + testing.expect(gop2.index == 5); gop2.entry.value = 42; testing.expect(map.getEntry(99).?.value == 42); @@ -919,13 +1073,32 @@ test "basic hash map usage" { testing.expect(map.getEntry(2).?.value == 22); testing.expect(map.get(2).? == 22); - const rmv1 = map.remove(2); + const rmv1 = map.swapRemove(2); testing.expect(rmv1.?.key == 2); testing.expect(rmv1.?.value == 22); - testing.expect(map.remove(2) == null); + testing.expect(map.swapRemove(2) == null); testing.expect(map.getEntry(2) == null); testing.expect(map.get(2) == null); + // Since we've used `swapRemove` above, the index of this entry should remain unchanged. + testing.expect(map.getIndex(100).? == 1); + const gop5 = try map.getOrPut(5); + testing.expect(gop5.found_existing == true); + testing.expect(gop5.entry.value == 77); + testing.expect(gop5.index == 4); + + // Whereas, if we do an `orderedRemove`, it should move the index forward one spot. + const rmv2 = map.orderedRemove(100); + testing.expect(rmv2.?.key == 100); + testing.expect(rmv2.?.value == 41); + testing.expect(map.orderedRemove(100) == null); + testing.expect(map.getEntry(100) == null); + testing.expect(map.get(100) == null); + const gop6 = try map.getOrPut(5); + testing.expect(gop6.found_existing == true); + testing.expect(gop6.entry.value == 77); + testing.expect(gop6.index == 3); + map.removeAssertDiscard(3); } @@ -1019,6 +1192,132 @@ test "clone" { } } +test "shrink" { + var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator); + defer map.deinit(); + + // This test is more interesting if we insert enough entries to allocate the index header. + const num_entries = 20; + var i: i32 = 0; + while (i < num_entries) : (i += 1) + testing.expect((try map.fetchPut(i, i * 10)) == null); + + testing.expect(map.unmanaged.index_header != null); + testing.expect(map.count() == num_entries); + + // Test `shrinkRetainingCapacity`. + map.shrinkRetainingCapacity(17); + testing.expect(map.count() == 17); + testing.expect(map.capacity() == 20); + i = 0; + while (i < num_entries) : (i += 1) { + const gop = try map.getOrPut(i); + if (i < 17) { + testing.expect(gop.found_existing == true); + testing.expect(gop.entry.value == i * 10); + } else + testing.expect(gop.found_existing == false); + } + + // Test `shrinkAndFree`. + map.shrinkAndFree(15); + testing.expect(map.count() == 15); + testing.expect(map.capacity() == 15); + i = 0; + while (i < num_entries) : (i += 1) { + const gop = try map.getOrPut(i); + if (i < 15) { + testing.expect(gop.found_existing == true); + testing.expect(gop.entry.value == i * 10); + } else + testing.expect(gop.found_existing == false); + } +} + +test "pop" { + var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator); + defer map.deinit(); + + testing.expect((try map.fetchPut(1, 11)) == null); + testing.expect((try map.fetchPut(2, 22)) == null); + testing.expect((try map.fetchPut(3, 33)) == null); + testing.expect((try map.fetchPut(4, 44)) == null); + + const pop1 = map.pop(); + testing.expect(pop1.key == 4 and pop1.value == 44); + const pop2 = map.pop(); + testing.expect(pop2.key == 3 and pop2.value == 33); + const pop3 = map.pop(); + testing.expect(pop3.key == 2 and pop3.value == 22); + const pop4 = map.pop(); + testing.expect(pop4.key == 1 and pop4.value == 11); +} + +test "reIndex" { + var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator); + defer map.deinit(); + + // Populate via the API. + const num_indexed_entries = 20; + var i: i32 = 0; + while (i < num_indexed_entries) : (i += 1) + testing.expect((try map.fetchPut(i, i * 10)) == null); + + // Make sure we allocated an index header. + testing.expect(map.unmanaged.index_header != null); + + // Now write to the underlying array list directly. + const num_unindexed_entries = 20; + const hash = getAutoHashFn(i32); + var al = &map.unmanaged.entries; + while (i < num_indexed_entries + num_unindexed_entries) : (i += 1) { + try al.append(std.testing.allocator, .{ + .key = i, + .value = i * 10, + .hash = hash(i), + }); + } + + // After reindexing, we should see everything. + try map.reIndex(); + i = 0; + while (i < num_indexed_entries + num_unindexed_entries) : (i += 1) { + const gop = try map.getOrPut(i); + testing.expect(gop.found_existing == true); + testing.expect(gop.entry.value == i * 10); + testing.expect(gop.index == i); + } +} + +test "fromOwnedArrayList" { + comptime const array_hash_map_type = AutoArrayHashMap(i32, i32); + var al = std.ArrayListUnmanaged(array_hash_map_type.Entry){}; + const hash = getAutoHashFn(i32); + + // Populate array list. + const num_entries = 20; + var i: i32 = 0; + while (i < num_entries) : (i += 1) { + try al.append(std.testing.allocator, .{ + .key = i, + .value = i * 10, + .hash = hash(i), + }); + } + + // Now instantiate using `fromOwnedArrayList`. + var map = try array_hash_map_type.fromOwnedArrayList(std.testing.allocator, al); + defer map.deinit(); + + i = 0; + while (i < num_entries) : (i += 1) { + const gop = try map.getOrPut(i); + testing.expect(gop.found_existing == true); + testing.expect(gop.entry.value == i * 10); + testing.expect(gop.index == i); + } +} + pub fn getHashPtrAddrFn(comptime K: type) (fn (K) u32) { return struct { fn hash(key: K) u32 { diff --git a/src/Module.zig b/src/Module.zig index 6a4575394a..59cd4968e5 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -561,7 +561,7 @@ pub const Scope = struct { } pub fn removeDecl(self: *Container, child: *Decl) void { - _ = self.decls.remove(child); + _ = self.decls.swapRemove(child); } pub fn fullyQualifiedNameHash(self: *Container, name: []const u8) NameHash { @@ -1660,7 +1660,7 @@ pub fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void // Update the AST Node index of the decl, even if its contents are unchanged, it may // have been re-ordered. decl.src_index = decl_i; - if (deleted_decls.remove(decl) == null) { + if (deleted_decls.swapRemove(decl) == null) { decl.analysis = .sema_failure; const err_msg = try Compilation.ErrorMsg.create(self.gpa, tree.token_locs[name_tok].start, "redefinition of '{s}'", .{decl.name}); errdefer err_msg.destroy(self.gpa); @@ -1702,7 +1702,7 @@ pub fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void // Update the AST Node index of the decl, even if its contents are unchanged, it may // have been re-ordered. decl.src_index = decl_i; - if (deleted_decls.remove(decl) == null) { + if (deleted_decls.swapRemove(decl) == null) { decl.analysis = .sema_failure; const err_msg = try Compilation.ErrorMsg.create(self.gpa, name_loc.start, "redefinition of '{s}'", .{decl.name}); errdefer err_msg.destroy(self.gpa); @@ -1832,7 +1832,7 @@ pub fn deleteDecl(self: *Module, decl: *Decl) !void { try self.markOutdatedDecl(dep); } } - if (self.failed_decls.remove(decl)) |entry| { + if (self.failed_decls.swapRemove(decl)) |entry| { entry.value.destroy(self.gpa); } self.deleteDeclExports(decl); @@ -1843,7 +1843,7 @@ pub fn deleteDecl(self: *Module, decl: *Decl) !void { /// Delete all the Export objects that are caused by this Decl. Re-analysis of /// this Decl will cause them to be re-created (or not). fn deleteDeclExports(self: *Module, decl: *Decl) void { - const kv = self.export_owners.remove(decl) orelse return; + const kv = self.export_owners.swapRemove(decl) orelse return; for (kv.value) |exp| { if (self.decl_exports.getEntry(exp.exported_decl)) |decl_exports_kv| { @@ -1870,10 +1870,10 @@ fn deleteDeclExports(self: *Module, decl: *Decl) void { if (self.comp.bin_file.cast(link.File.MachO)) |macho| { macho.deleteExport(exp.link.macho); } - if (self.failed_exports.remove(exp)) |entry| { + if (self.failed_exports.swapRemove(exp)) |entry| { entry.value.destroy(self.gpa); } - _ = self.symbol_exports.remove(exp.options.name); + _ = self.symbol_exports.swapRemove(exp.options.name); self.gpa.free(exp.options.name); self.gpa.destroy(exp); } @@ -1918,7 +1918,7 @@ pub fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void { fn markOutdatedDecl(self: *Module, decl: *Decl) !void { log.debug("mark {s} outdated\n", .{decl.name}); try self.comp.work_queue.writeItem(.{ .analyze_decl = decl }); - if (self.failed_decls.remove(decl)) |entry| { + if (self.failed_decls.swapRemove(decl)) |entry| { entry.value.destroy(self.gpa); } decl.analysis = .outdated; diff --git a/src/codegen.zig b/src/codegen.zig index c2537a1ca0..7f449082f4 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -2123,7 +2123,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try parent_branch.inst_table.ensureCapacity(self.gpa, parent_branch.inst_table.items().len + else_branch.inst_table.items().len); for (else_branch.inst_table.items()) |else_entry| { - const canon_mcv = if (saved_then_branch.inst_table.remove(else_entry.key)) |then_entry| blk: { + const canon_mcv = if (saved_then_branch.inst_table.swapRemove(else_entry.key)) |then_entry| blk: { // The instruction's MCValue is overridden in both branches. parent_branch.inst_table.putAssumeCapacity(else_entry.key, then_entry.value); if (else_entry.value == .dead) { -- cgit v1.2.3 From 480d6182ad543ea9e2d7cf0897f6fad7cbefc572 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 3 Jan 2021 16:56:47 +0100 Subject: stage2 ARM: fix offsets in exitlude jump relocations --- src/codegen.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index c2537a1ca0..981f88abc3 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -612,8 +612,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // jump self.code.items.len -= 4; } else for (self.exitlude_jump_relocs.items) |jmp_reloc| { - const amt = self.code.items.len - (jmp_reloc + 4); - if (amt == 0) { + const amt = @intCast(i32, self.code.items.len) - @intCast(i32, jmp_reloc + 8); + if (amt == -4) { // This return is at the end of the // code block. We can't just delete // the space because there may be -- cgit v1.2.3 From be6ac82ee1dc6619576da6dc7ba04213faea998d Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 3 Jan 2021 21:57:41 +0100 Subject: stage2 ARM: fix stack offsets for genSetReg and genSetStack --- src/codegen.zig | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 981f88abc3..d704744486 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -2575,12 +2575,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail(src, "TODO implement set stack variable from embedded_in_code", .{}); }, .register => |reg| { - // TODO: strh - const offset = if (stack_offset <= math.maxInt(u12)) blk: { - break :blk Instruction.Offset.imm(@intCast(u12, stack_offset)); - } else Instruction.Offset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = stack_offset }), 0); - const abi_size = ty.abiSize(self.target.*); + const adj_off = stack_offset + abi_size; + const offset = if (adj_off <= math.maxInt(u12)) blk: { + break :blk Instruction.Offset.imm(@intCast(u12, adj_off)); + } else Instruction.Offset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off }), 0); + switch (abi_size) { 1 => writeInt(u32, try self.code.addManyAsArray(4), Instruction.strb(.al, reg, .fp, .{ .offset = offset, @@ -2778,21 +2778,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldr(.al, reg, reg, .{ .offset = Instruction.Offset.none }).toU32()); }, .stack_offset => |unadjusted_off| { - // TODO: ldrh // TODO: maybe addressing from sp instead of fp - const offset = if (unadjusted_off <= math.maxInt(u12)) blk: { - break :blk Instruction.Offset.imm(@intCast(u12, unadjusted_off)); - } else Instruction.Offset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = unadjusted_off }), 0); - // TODO: supply type information to genSetReg as we do to genSetStack // const abi_size = ty.abiSize(self.target.*); const abi_size = 4; + const adj_off = unadjusted_off + abi_size; + const offset = if (adj_off <= math.maxInt(u12)) blk: { + break :blk Instruction.Offset.imm(@intCast(u12, adj_off)); + } else Instruction.Offset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off }), 0); + switch (abi_size) { 1 => writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldrb(.al, reg, .fp, .{ .offset = offset, .positive = false, }).toU32()), - 2 => return self.fail(src, "TODO implement strh", .{}), + 2 => return self.fail(src, "TODO implement ldrh", .{}), 4 => writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldr(.al, reg, .fp, .{ .offset = offset, .positive = false, -- cgit v1.2.3 From a7da90071e23f22158ef508a263cb3a4054554dd Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Fri, 8 Jan 2021 22:11:49 +0100 Subject: stage2: fix bug in genArg When an argument is unused in the function body, still increment arg_index so we still select the correct arguments in the args slice. --- src/codegen.zig | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index dd37d3b025..eba5cb7913 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1480,6 +1480,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genArg(self: *Self, inst: *ir.Inst.Arg) !MCValue { + const arg_index = self.arg_index; + self.arg_index += 1; + if (FreeRegInt == u0) { return self.fail(inst.base.src, "TODO implement Register enum for {}", .{self.target.cpu.arch}); } @@ -1488,8 +1491,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.registers.ensureCapacity(self.gpa, self.registers.count() + 1); - const result = self.args[self.arg_index]; - self.arg_index += 1; + const result = self.args[arg_index]; const name_with_null = inst.name[0 .. mem.lenZ(inst.name) + 1]; switch (result) { -- cgit v1.2.3 From 5487dd13ea23ad7e547995b9a088ba37bfe17737 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Fri, 8 Jan 2021 23:16:50 +0100 Subject: stage2: lay the groundwork in prep for extern fn This commit lays the groundwork in preparation for implementing handling of extern functions in various backends. --- src/Module.zig | 45 +++++++++++++++++++++++++++++++++++++++++++-- src/codegen.zig | 12 ++++++++++++ src/link/Coff.zig | 6 +++++- src/link/Elf.zig | 6 +++++- src/link/MachO.zig | 6 +++++- src/link/Wasm.zig | 5 ++++- 6 files changed, 74 insertions(+), 6 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/Module.zig b/src/Module.zig index 8575c59f4c..0bdeab68d0 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -22,6 +22,7 @@ const ast = std.zig.ast; const trace = @import("tracy.zig").trace; const astgen = @import("astgen.zig"); const zir_sema = @import("zir_sema.zig"); +const target_util = @import("target.zig"); const default_eval_branch_quota = 1000; @@ -1109,8 +1110,48 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { if (fn_proto.getVarArgsToken()) |var_args_token| { return self.failTok(&fn_type_scope.base, var_args_token, "TODO implement var args", .{}); } - if (fn_proto.getLibName()) |lib_name| { - return self.failNode(&fn_type_scope.base, lib_name, "TODO implement function library name", .{}); + if (fn_proto.getLibName()) |lib_name| blk: { + const lib_name_str = mem.trim(u8, tree.tokenSlice(lib_name.firstToken()), "\""); // TODO: call identifierTokenString + log.debug("extern fn symbol expected in lib '{s}'", .{lib_name_str}); + const target = self.comp.getTarget(); + if (target_util.is_libc_lib_name(target, lib_name_str)) { + if (!self.comp.bin_file.options.link_libc) { + return self.failNode( + &fn_type_scope.base, + lib_name, + "dependency on libc must be explicitly specified in the build command", + .{}, + ); + } + break :blk; + } + if (target_util.is_libcpp_lib_name(target, lib_name_str)) { + if (!self.comp.bin_file.options.link_libcpp) { + return self.failNode( + &fn_type_scope.base, + lib_name, + "dependency on libc++ must be explicitly specified in the build command", + .{}, + ); + } + break :blk; + } + if (!target.isWasm() and !self.comp.bin_file.options.pic) { + return self.failNode( + &fn_type_scope.base, + lib_name, + "dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by `-l{s}` or `-fPIC`.", + .{ lib_name, lib_name }, + ); + } + self.comp.stage1AddLinkLib(lib_name_str) catch |err| { + return self.failNode( + &fn_type_scope.base, + lib_name, + "unable to add link lib '{s}': {s}", + .{ lib_name, @errorName(err) }, + ); + }; } if (fn_proto.getAlignExpr()) |align_expr| { return self.failNode(&fn_type_scope.base, align_expr, "TODO implement function align expression", .{}); diff --git a/src/codegen.zig b/src/codegen.zig index eba5cb7913..d83eba2219 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1602,6 +1602,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.code.ensureCapacity(self.code.items.len + 7); self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 }); mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), got_addr); + } else if (func_value.castTag(.extern_fn)) |_| { + return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); } else { return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); } @@ -1628,6 +1630,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.genSetReg(inst.base.src, .ra, .{ .memory = got_addr }); mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.jalr(.ra, 0, .ra).toU32()); + } else if (func_value.castTag(.extern_fn)) |_| { + return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); } else { return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); } @@ -1672,6 +1676,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { else => return self.fail(inst.base.src, "TODO implement fn call with non-void return value", .{}), } } + } else if (func_value.castTag(.extern_fn)) |_| { + return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); } else { return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); } @@ -1733,6 +1739,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, .lr, Instruction.Operand.reg(.pc, Instruction.Operand.Shift.none)).toU32()); writeInt(u32, try self.code.addManyAsArray(4), Instruction.bx(.al, .lr).toU32()); } + } else if (func_value.castTag(.extern_fn)) |_| { + return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); } else { return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); } @@ -1787,6 +1795,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.genSetReg(inst.base.src, .x30, .{ .memory = got_addr }); writeInt(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32()); + } else if (func_value.castTag(.extern_fn)) |_| { + return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); } else { return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); } @@ -1849,6 +1859,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, else => unreachable, // unsupported architecture on MachO } + } else if (func_value.castTag(.extern_fn)) |_| { + return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); } else { return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index f7d646356c..f7cd9b69ce 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -662,10 +662,14 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { if (build_options.have_llvm) if (self.llvm_ir_module) |llvm_ir_module| return try llvm_ir_module.updateDecl(module, decl); + const typed_value = decl.typed_value.most_recent.typed_value; + if (typed_value.val.tag() == .extern_fn) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - const typed_value = decl.typed_value.most_recent.typed_value; const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .none); const code = switch (res) { .externally_managed => |x| x, diff --git a/src/link/Elf.zig b/src/link/Elf.zig index f3073824a5..8c76a4e967 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2157,6 +2157,11 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { if (build_options.have_llvm) if (self.llvm_ir_module) |llvm_ir_module| return try llvm_ir_module.updateDecl(module, decl); + const typed_value = decl.typed_value.most_recent.typed_value; + if (typed_value.val.tag() == .extern_fn) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); @@ -2175,7 +2180,6 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { dbg_info_type_relocs.deinit(self.base.allocator); } - const typed_value = decl.typed_value.most_recent.typed_value; const is_fn: bool = switch (typed_value.ty.zigTypeTag()) { .Fn => true, else => false, diff --git a/src/link/MachO.zig b/src/link/MachO.zig index efb16e4d1c..dbc982f5e2 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1118,6 +1118,11 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { const tracy = trace(@src()); defer tracy.end(); + const typed_value = decl.typed_value.most_recent.typed_value; + if (typed_value.val.tag() == .extern_fn) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); @@ -1134,7 +1139,6 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { } } - const typed_value = decl.typed_value.most_recent.typed_value; const res = if (debug_buffers) |*dbg| try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .{ .dwarf = .{ diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index cbb3e83147..576be2d306 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -100,8 +100,11 @@ pub fn deinit(self: *Wasm) void { // Generate code for the Decl, storing it in memory to be later written to // the file on flush(). pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { - if (decl.typed_value.most_recent.typed_value.ty.zigTypeTag() != .Fn) + const typed_value = decl.typed_value.most_recent.typed_value; + if (typed_value.ty.zigTypeTag() != .Fn) return error.TODOImplementNonFnDeclsForWasm; + if (typed_value.val.tag() == .extern_fn) + return error.TODOImplementExternFnDeclsForWasm; if (decl.fn_link.wasm) |*fn_data| { fn_data.functype.items.len = 0; -- cgit v1.2.3 From f44732c1b0d7516c4a8169f7381cbcf55e1ae460 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 6 Jan 2021 01:13:25 +0100 Subject: macho: populate stubs and stub_helper --- src/codegen.zig | 22 +++++++++++++-- src/link/MachO.zig | 80 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 96 insertions(+), 6 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index d83eba2219..7100227db0 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1859,8 +1859,26 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, else => unreachable, // unsupported architecture on MachO } - } else if (func_value.castTag(.extern_fn)) |_| { - return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); + } else if (func_value.castTag(.extern_fn)) |func_payload| { + const decl = func_payload.data; + const decl_name = try std.fmt.allocPrint(self.bin_file.allocator, "_{s}", .{decl.name}); + defer self.bin_file.allocator.free(decl_name); + const name = try macho_file.makeString(decl_name); + const symbol = macho_file.undef_symbols.items.len; + try macho_file.undef_symbols.append(self.bin_file.allocator, .{ + .n_strx = name, + .n_type = std.macho.N_UNDF | std.macho.N_EXT, + .n_sect = 0, + .n_desc = std.macho.REFERENCE_FLAG_UNDEFINED_NON_LAZY | std.macho.N_SYMBOL_RESOLVER, + .n_value = 0, + }); + try macho_file.stub_fixups.append(self.bin_file.allocator, .{ + .symbol = symbol, + .start = self.code.items.len, + .len = 4, + }); + // We mark the space and fix it up later. + writeInt(u32, try self.code.addManyAsArray(4), 0); } else { return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); } diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 61be154a13..079478ecfd 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -115,6 +115,7 @@ global_symbol_free_list: std.ArrayListUnmanaged(u32) = .{}, offset_table_free_list: std.ArrayListUnmanaged(u32) = .{}, dyld_stub_binder_index: ?u16 = null, +next_stub_helper_off: ?u64 = null, /// Table of symbol names aka the string table. string_table: std.ArrayListUnmanaged(u8) = .{}, @@ -162,6 +163,14 @@ last_text_block: ?*TextBlock = null, /// rather than sitting in the global scope. pie_fixups: std.ArrayListUnmanaged(PieFixup) = .{}, +stub_fixups: std.ArrayListUnmanaged(StubFixup) = .{}, + +pub const StubFixup = struct { + symbol: usize, + start: usize, + len: usize, +}; + pub const PieFixup = struct { /// Target address we wanted to address in absolute terms. address: u64, @@ -1223,7 +1232,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { } // Perform PIE fixups (if any) - const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; + const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; const got_section = text_segment.sections.items[self.got_section_index.?]; while (self.pie_fixups.popOrNull()) |fixup| { const target_addr = fixup.address; @@ -1243,6 +1252,45 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { } } + // Resolve stubs (if any) + const stubs = &text_segment.sections.items[self.stubs_section_index.?]; + const stub_h = &text_segment.sections.items[self.stub_helper_section_index.?]; + const data_segment = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; + const la_ptr = &data_segment.sections.items[self.la_symbol_ptr_section_index.?]; + while (self.stub_fixups.popOrNull()) |fixup| { + // TODO increment offset for stub writing + const stub_addr = stubs.addr; + const text_addr = symbol.n_value + fixup.start; + const displacement = @intCast(u32, stub_addr - text_addr); + var placeholder = code_buffer.items[fixup.start..][0..fixup.len]; + mem.writeIntSliceLittle(u32, placeholder, aarch64.Instruction.bl(@intCast(i28, displacement)).toU32()); + + const end = stub_h.addr + self.next_stub_helper_off.? - stub_h.offset; + var buf: [@sizeOf(u64)]u8 = undefined; + mem.writeIntLittle(u64, &buf, end); + try self.base.file.?.pwriteAll(&buf, la_ptr.offset); + + const displacement2 = la_ptr.addr - stubs.addr; + var ccode: [2 * @sizeOf(u32)]u8 = undefined; + mem.writeIntLittle(u32, ccode[0..4], aarch64.Instruction.ldr(.x16, .{ + .literal = @intCast(u19, displacement2 / 4), + }).toU32()); + mem.writeIntLittle(u32, ccode[4..8], aarch64.Instruction.br(.x16).toU32()); + try self.base.file.?.pwriteAll(&ccode, stubs.offset); + stubs.size = 2 * @sizeOf(u32); + stubs.reserved2 = 2 * @sizeOf(u32); + + const displacement3 = @intCast(i64, stub_h.addr) - @intCast(i64, end + 4); + var cccode: [3 * @sizeOf(u32)]u8 = undefined; + mem.writeIntLittle(u32, cccode[0..4], aarch64.Instruction.ldr(.w16, .{ + .literal = 0x2, + }).toU32()); + mem.writeIntLittle(u32, cccode[4..8], aarch64.Instruction.b(@intCast(i28, displacement3)).toU32()); + mem.writeIntLittle(u32, cccode[8..12], 0); + try self.base.file.?.pwriteAll(&cccode, self.next_stub_helper_off.?); + self.next_stub_helper_off = self.next_stub_helper_off.? + 3 * @sizeOf(u32); + } + const text_section = text_segment.sections.items[self.text_section_index.?]; const section_offset = symbol.n_value - text_section.addr; const file_offset = text_section.offset + section_offset; @@ -1555,7 +1603,7 @@ pub fn populateMissingMetadata(self: *MachO) !void { .sectname = makeStaticString("__stubs"), .segname = makeStaticString("__TEXT"), .addr = text_segment.inner.vmaddr + off, - .size = 0, // This will be populated later in tandem with .reserved2 field. + .size = needed_size, .offset = @intCast(u32, off), .@"align" = alignment, .reloff = 0, @@ -1582,7 +1630,7 @@ pub fn populateMissingMetadata(self: *MachO) !void { const off = text_segment.findFreeSpace(needed_size, @alignOf(u64), self.header_pad); assert(off + needed_size <= text_segment.inner.fileoff + text_segment.inner.filesize); // TODO Must expand __TEXT segment. - log.debug("found __stubs section free space 0x{x} to 0x{x}", .{ off, off + needed_size }); + log.debug("found __stub_helper section free space 0x{x} to 0x{x}", .{ off, off + needed_size }); try text_segment.addSection(self.base.allocator, .{ .sectname = makeStaticString("__stub_helper"), @@ -1987,6 +2035,30 @@ pub fn populateMissingMetadata(self: *MachO) !void { .n_value = 0, }); } + if (self.next_stub_helper_off == null) { + const text = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; + const sh = &text.sections.items[self.stub_helper_section_index.?]; + const data = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; + const data_data = &data.sections.items[self.data_section_index.?]; + const displacement = data_data.addr - sh.addr; + var code: [4 * @sizeOf(u32)]u8 = undefined; + mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.adr(.x17, @intCast(i21, displacement)).toU32()); + mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.stp( + .x16, + .x17, + aarch64.Register.sp, + aarch64.Instruction.LoadStorePairOffset.pre_index(-16), + ).toU32()); + const dc = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; + const got = &dc.sections.items[self.data_got_section_index.?]; + const displacement2 = got.addr - sh.addr - 2 * @sizeOf(u32); + mem.writeIntLittle(u32, code[8..12], aarch64.Instruction.ldr(.x16, .{ + .literal = @intCast(u19, displacement2 / 4), + }).toU32()); + mem.writeIntLittle(u32, code[12..16], aarch64.Instruction.br(.x16).toU32()); + self.next_stub_helper_off = sh.offset + 4 * @sizeOf(u32); + try self.base.file.?.pwriteAll(&code, sh.offset); + } } fn allocateTextBlock(self: *MachO, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 { @@ -2101,7 +2173,7 @@ pub fn makeStaticString(comptime bytes: []const u8) [16]u8 { return buf; } -fn makeString(self: *MachO, bytes: []const u8) !u32 { +pub fn makeString(self: *MachO, bytes: []const u8) !u32 { try self.string_table.ensureCapacity(self.base.allocator, self.string_table.items.len + bytes.len + 1); const result = @intCast(u32, self.string_table.items.len); self.string_table.appendSliceAssumeCapacity(bytes); -- cgit v1.2.3 From 44a052a65f5c59b02383178f36fcc231df28b49f Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Fri, 8 Jan 2021 22:26:18 +0100 Subject: macho: write out stubs for new externs only --- src/codegen.zig | 30 ++++++++++++------ src/link/MachO.zig | 93 ++++++++++++++++++++++++++++-------------------------- 2 files changed, 68 insertions(+), 55 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 7100227db0..84148fcdd4 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1862,18 +1862,28 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } else if (func_value.castTag(.extern_fn)) |func_payload| { const decl = func_payload.data; const decl_name = try std.fmt.allocPrint(self.bin_file.allocator, "_{s}", .{decl.name}); - defer self.bin_file.allocator.free(decl_name); - const name = try macho_file.makeString(decl_name); - const symbol = macho_file.undef_symbols.items.len; - try macho_file.undef_symbols.append(self.bin_file.allocator, .{ - .n_strx = name, - .n_type = std.macho.N_UNDF | std.macho.N_EXT, - .n_sect = 0, - .n_desc = std.macho.REFERENCE_FLAG_UNDEFINED_NON_LAZY | std.macho.N_SYMBOL_RESOLVER, - .n_value = 0, - }); + const exists: bool = macho_file.externs.contains(decl_name); + const symbol: u32 = blk: { + if (macho_file.externs.get(decl_name)) |index| { + self.bin_file.allocator.free(decl_name); + break :blk index; + } else { + const extern_index = @intCast(u32, macho_file.undef_symbols.items.len - 1); // TODO + try macho_file.externs.putNoClobber(self.bin_file.allocator, decl_name, extern_index); + const name = try macho_file.makeString(decl_name); + try macho_file.undef_symbols.append(self.bin_file.allocator, .{ + .n_strx = name, + .n_type = std.macho.N_UNDF | std.macho.N_EXT, + .n_sect = 0, + .n_desc = std.macho.REFERENCE_FLAG_UNDEFINED_NON_LAZY | std.macho.N_SYMBOL_RESOLVER, + .n_value = 0, + }); + break :blk extern_index; + } + }; try macho_file.stub_fixups.append(self.bin_file.allocator, .{ .symbol = symbol, + .exists = exists, .start = self.code.items.len, .len = 4, }); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index e83fab6294..3c876bf59d 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -167,9 +167,11 @@ last_text_block: ?*TextBlock = null, pie_fixups: std.ArrayListUnmanaged(PieFixup) = .{}, stub_fixups: std.ArrayListUnmanaged(StubFixup) = .{}, +externs: std.StringHashMapUnmanaged(u32) = .{}, pub const StubFixup = struct { - symbol: usize, + symbol: u32, + exists: bool, start: usize, len: usize, }; @@ -1263,56 +1265,57 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { const stub_h = &text_segment.sections.items[self.stub_helper_section_index.?]; const data_segment = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; const la_ptr = &data_segment.sections.items[self.la_symbol_ptr_section_index.?]; - for (self.stub_fixups.items) |fixup, idx| { - const i = @intCast(u32, idx); + for (self.stub_fixups.items) |fixup| { // TODO increment offset for stub writing - const stub_addr = stubs.addr + i * stubs.reserved2; + const stub_addr = stubs.addr + fixup.symbol * stubs.reserved2; const text_addr = symbol.n_value + fixup.start; const displacement = @intCast(u32, stub_addr - text_addr); var placeholder = code_buffer.items[fixup.start..][0..fixup.len]; mem.writeIntSliceLittle(u32, placeholder, aarch64.Instruction.bl(@intCast(i28, displacement)).toU32()); - const end = stub_h.addr + self.next_stub_helper_off.? - stub_h.offset; - var buf: [@sizeOf(u64)]u8 = undefined; - mem.writeIntLittle(u64, &buf, end); - try self.base.file.?.pwriteAll(&buf, la_ptr.offset + i * @sizeOf(u64)); - - const la_ptr_addr = la_ptr.addr + i * @sizeOf(u64); - const displacement2 = la_ptr_addr - stub_addr; - var ccode: [2 * @sizeOf(u32)]u8 = undefined; - mem.writeIntLittle(u32, ccode[0..4], aarch64.Instruction.ldr(.x16, .{ - .literal = @intCast(u19, displacement2 / 4), - }).toU32()); - mem.writeIntLittle(u32, ccode[4..8], aarch64.Instruction.br(.x16).toU32()); - try self.base.file.?.pwriteAll(&ccode, stubs.offset + i * stubs.reserved2); - - const displacement3 = @intCast(i64, stub_h.addr) - @intCast(i64, end + 4); - var cccode: [3 * @sizeOf(u32)]u8 = undefined; - mem.writeIntLittle(u32, cccode[0..4], aarch64.Instruction.ldr(.w16, .{ - .literal = 0x2, - }).toU32()); - mem.writeIntLittle(u32, cccode[4..8], aarch64.Instruction.b(@intCast(i28, displacement3)).toU32()); - mem.writeIntLittle(u32, cccode[8..12], i * 0xd); - try self.base.file.?.pwriteAll(&cccode, self.next_stub_helper_off.?); - self.next_stub_helper_off = self.next_stub_helper_off.? + 3 * @sizeOf(u32); - - try self.rebase_info_table.symbols.append(self.base.allocator, .{ - .segment = 3, - .offset = i * stubs.reserved2, - }); - self.rebase_info_dirty = true; - - const sym = self.undef_symbols.items[fixup.symbol]; - const name_str = self.getString(sym.n_strx); - var name = try self.base.allocator.alloc(u8, name_str.len); - mem.copy(u8, name, name_str); - try self.lazy_binding_info_table.symbols.append(self.base.allocator, .{ - .segment = 3, - .offset = i * @sizeOf(u64), - .dylib_ordinal = 1, - .name = name, - }); - self.lazy_binding_info_dirty = true; + if (!fixup.exists) { + const end = stub_h.addr + self.next_stub_helper_off.? - stub_h.offset; + var buf: [@sizeOf(u64)]u8 = undefined; + mem.writeIntLittle(u64, &buf, end); + try self.base.file.?.pwriteAll(&buf, la_ptr.offset + fixup.symbol * @sizeOf(u64)); + + const la_ptr_addr = la_ptr.addr + fixup.symbol * @sizeOf(u64); + const displacement2 = la_ptr_addr - stub_addr; + var ccode: [2 * @sizeOf(u32)]u8 = undefined; + mem.writeIntLittle(u32, ccode[0..4], aarch64.Instruction.ldr(.x16, .{ + .literal = @intCast(u19, displacement2 / 4), + }).toU32()); + mem.writeIntLittle(u32, ccode[4..8], aarch64.Instruction.br(.x16).toU32()); + try self.base.file.?.pwriteAll(&ccode, stubs.offset + fixup.symbol * stubs.reserved2); + + const displacement3 = @intCast(i64, stub_h.addr) - @intCast(i64, end + 4); + var cccode: [3 * @sizeOf(u32)]u8 = undefined; + mem.writeIntLittle(u32, cccode[0..4], aarch64.Instruction.ldr(.w16, .{ + .literal = 0x2, + }).toU32()); + mem.writeIntLittle(u32, cccode[4..8], aarch64.Instruction.b(@intCast(i28, displacement3)).toU32()); + mem.writeIntLittle(u32, cccode[8..12], fixup.symbol * 0xd); + try self.base.file.?.pwriteAll(&cccode, self.next_stub_helper_off.?); + self.next_stub_helper_off = self.next_stub_helper_off.? + 3 * @sizeOf(u32); + + try self.rebase_info_table.symbols.append(self.base.allocator, .{ + .segment = 3, + .offset = fixup.symbol * stubs.reserved2, + }); + self.rebase_info_dirty = true; + + const sym = self.undef_symbols.items[fixup.symbol + 1]; + const name_str = self.getString(sym.n_strx); + var name = try self.base.allocator.alloc(u8, name_str.len); + mem.copy(u8, name, name_str); + try self.lazy_binding_info_table.symbols.append(self.base.allocator, .{ + .segment = 3, + .offset = fixup.symbol * @sizeOf(u64), + .dylib_ordinal = 1, + .name = name, + }); + self.lazy_binding_info_dirty = true; + } } self.stub_fixups.shrinkRetainingCapacity(0); -- cgit v1.2.3 From b86d0e488b9dfc7d943da86a34998360e24da225 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sat, 9 Jan 2021 19:29:33 +0100 Subject: macho: refactor writing and managing externs --- src/codegen.zig | 26 ++- src/link/MachO.zig | 383 ++++++++++++++++++++++----------------- src/link/MachO/imports.zig | 440 +++++++++++++-------------------------------- 3 files changed, 359 insertions(+), 490 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 84148fcdd4..ad4215191f 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1861,29 +1861,27 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } else if (func_value.castTag(.extern_fn)) |func_payload| { const decl = func_payload.data; + // We don't free the decl_name immediately unless it already exists. + // If it doesn't, it will get autofreed when we clean up the extern symbol table. const decl_name = try std.fmt.allocPrint(self.bin_file.allocator, "_{s}", .{decl.name}); - const exists: bool = macho_file.externs.contains(decl_name); + const already_defined = macho_file.extern_lazy_symbols.contains(decl_name); const symbol: u32 = blk: { - if (macho_file.externs.get(decl_name)) |index| { + if (macho_file.extern_lazy_symbols.get(decl_name)) |sym| { self.bin_file.allocator.free(decl_name); - break :blk index; + break :blk sym.index; } else { - const extern_index = @intCast(u32, macho_file.undef_symbols.items.len - 1); // TODO - try macho_file.externs.putNoClobber(self.bin_file.allocator, decl_name, extern_index); - const name = try macho_file.makeString(decl_name); - try macho_file.undef_symbols.append(self.bin_file.allocator, .{ - .n_strx = name, - .n_type = std.macho.N_UNDF | std.macho.N_EXT, - .n_sect = 0, - .n_desc = std.macho.REFERENCE_FLAG_UNDEFINED_NON_LAZY | std.macho.N_SYMBOL_RESOLVER, - .n_value = 0, + const index = @intCast(u32, macho_file.extern_lazy_symbols.items().len); + try macho_file.extern_lazy_symbols.putNoClobber(self.bin_file.allocator, decl_name, .{ + .name = decl_name, + .dylib_ordinal = 1, // TODO this is now hardcoded, since we only support libSystem. + .index = index, }); - break :blk extern_index; + break :blk index; } }; try macho_file.stub_fixups.append(self.bin_file.allocator, .{ .symbol = symbol, - .exists = exists, + .already_defined = already_defined, .start = self.code.items.len, .len = 4, }); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 04941c4b68..7d86c27aa9 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -105,16 +105,17 @@ entry_addr: ?u64 = null, /// Table of all local symbols /// Internally references string table for names (which are optional). local_symbols: std.ArrayListUnmanaged(macho.nlist_64) = .{}, -/// Table of all defined global symbols +/// Table of all global symbols global_symbols: std.ArrayListUnmanaged(macho.nlist_64) = .{}, -/// Table of all undefined symbols -undef_symbols: std.ArrayListUnmanaged(macho.nlist_64) = .{}, +/// Table of all extern nonlazy symbols, indexed by name. +extern_nonlazy_symbols: std.StringArrayHashMapUnmanaged(ExternSymbol) = .{}, +/// Table of all extern lazy symbols, indexed by name. +extern_lazy_symbols: std.StringArrayHashMapUnmanaged(ExternSymbol) = .{}, local_symbol_free_list: std.ArrayListUnmanaged(u32) = .{}, global_symbol_free_list: std.ArrayListUnmanaged(u32) = .{}, offset_table_free_list: std.ArrayListUnmanaged(u32) = .{}, -dyld_stub_binder_index: ?u16 = null, stub_helper_stubs_start_off: ?u64 = null, /// Table of symbol names aka the string table. @@ -123,13 +124,6 @@ string_table: std.ArrayListUnmanaged(u8) = .{}, /// Table of trampolines to the actual symbols in __text section. offset_table: std.ArrayListUnmanaged(u64) = .{}, -/// Table of rebase info entries. -rebase_info_table: RebaseInfoTable = .{}, -/// Table of binding info entries. -binding_info_table: BindingInfoTable = .{}, -/// Table of lazy binding info entries. -lazy_binding_info_table: LazyBindingInfoTable = .{}, - error_flags: File.ErrorFlags = File.ErrorFlags{}, offset_table_count_dirty: bool = false, @@ -167,11 +161,10 @@ last_text_block: ?*TextBlock = null, pie_fixups: std.ArrayListUnmanaged(PieFixup) = .{}, stub_fixups: std.ArrayListUnmanaged(StubFixup) = .{}, -externs: std.StringHashMapUnmanaged(u32) = .{}, pub const StubFixup = struct { symbol: u32, - exists: bool, + already_defined: bool, start: usize, len: usize, }; @@ -920,42 +913,42 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void { return error.NoSymbolTableFound; } - // Parse dyld info - try self.parseBindingInfoTable(); - try self.parseLazyBindingInfoTable(); + // // Parse dyld info + // try self.parseBindingInfoTable(); + // try self.parseLazyBindingInfoTable(); - // Update the dylib ordinals. - self.binding_info_table.dylib_ordinal = next_ordinal; - for (self.lazy_binding_info_table.symbols.items) |*symbol| { - symbol.dylib_ordinal = next_ordinal; - } + // // Update the dylib ordinals. + // self.binding_info_table.dylib_ordinal = next_ordinal; + // for (self.lazy_binding_info_table.symbols.items) |*symbol| { + // symbol.dylib_ordinal = next_ordinal; + // } - // Write updated dyld info. - const dyld_info = self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly; - { - const size = try self.binding_info_table.calcSize(); - assert(dyld_info.bind_size >= size); + // // Write updated dyld info. + // const dyld_info = self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly; + // { + // const size = try self.binding_info_table.calcSize(); + // assert(dyld_info.bind_size >= size); - var buffer = try self.base.allocator.alloc(u8, @intCast(usize, size)); - defer self.base.allocator.free(buffer); + // var buffer = try self.base.allocator.alloc(u8, @intCast(usize, size)); + // defer self.base.allocator.free(buffer); - var stream = std.io.fixedBufferStream(buffer); - try self.binding_info_table.write(stream.writer()); + // var stream = std.io.fixedBufferStream(buffer); + // try self.binding_info_table.write(stream.writer()); - try self.base.file.?.pwriteAll(buffer, dyld_info.bind_off); - } - { - const size = try self.lazy_binding_info_table.calcSize(); - assert(dyld_info.lazy_bind_size >= size); + // try self.base.file.?.pwriteAll(buffer, dyld_info.bind_off); + // } + // { + // const size = try self.lazy_binding_info_table.calcSize(); + // assert(dyld_info.lazy_bind_size >= size); - var buffer = try self.base.allocator.alloc(u8, @intCast(usize, size)); - defer self.base.allocator.free(buffer); + // var buffer = try self.base.allocator.alloc(u8, @intCast(usize, size)); + // defer self.base.allocator.free(buffer); - var stream = std.io.fixedBufferStream(buffer); - try self.lazy_binding_info_table.write(stream.writer()); + // var stream = std.io.fixedBufferStream(buffer); + // try self.lazy_binding_info_table.write(stream.writer()); - try self.base.file.?.pwriteAll(buffer, dyld_info.lazy_bind_off); - } + // try self.base.file.?.pwriteAll(buffer, dyld_info.lazy_bind_off); + // } // Write updated load commands and the header try self.writeLoadCommands(); @@ -1037,14 +1030,13 @@ pub fn deinit(self: *MachO) void { if (self.d_sym) |*ds| { ds.deinit(self.base.allocator); } - self.binding_info_table.deinit(self.base.allocator); - self.lazy_binding_info_table.deinit(self.base.allocator); self.pie_fixups.deinit(self.base.allocator); self.text_block_free_list.deinit(self.base.allocator); self.offset_table.deinit(self.base.allocator); self.offset_table_free_list.deinit(self.base.allocator); self.string_table.deinit(self.base.allocator); - self.undef_symbols.deinit(self.base.allocator); + self.extern_lazy_symbols.deinit(self.base.allocator); + self.extern_nonlazy_symbols.deinit(self.base.allocator); self.global_symbols.deinit(self.base.allocator); self.global_symbol_free_list.deinit(self.base.allocator); self.local_symbols.deinit(self.base.allocator); @@ -1261,59 +1253,28 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { } // Resolve stubs (if any) - const stubs = &text_segment.sections.items[self.stubs_section_index.?]; - const stub_h = &text_segment.sections.items[self.stub_helper_section_index.?]; - const data_segment = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; - const la_ptr = &data_segment.sections.items[self.la_symbol_ptr_section_index.?]; + const stubs = text_segment.sections.items[self.stubs_section_index.?]; for (self.stub_fixups.items) |fixup| { - // TODO increment offset for stub writing const stub_addr = stubs.addr + fixup.symbol * stubs.reserved2; const text_addr = symbol.n_value + fixup.start; const displacement = @intCast(u32, stub_addr - text_addr); var placeholder = code_buffer.items[fixup.start..][0..fixup.len]; - mem.writeIntSliceLittle(u32, placeholder, aarch64.Instruction.bl(@intCast(i28, displacement)).toU32()); - - if (!fixup.exists) { - const stub_off = self.stub_helper_stubs_start_off.? + fixup.symbol * 3 * @sizeOf(u32); - const end = stub_h.addr + stub_off - stub_h.offset; - var buf: [@sizeOf(u64)]u8 = undefined; - mem.writeIntLittle(u64, &buf, end); - try self.base.file.?.pwriteAll(&buf, la_ptr.offset + fixup.symbol * @sizeOf(u64)); - - const la_ptr_addr = la_ptr.addr + fixup.symbol * @sizeOf(u64); - const displacement2 = la_ptr_addr - stub_addr; - var ccode: [2 * @sizeOf(u32)]u8 = undefined; - mem.writeIntLittle(u32, ccode[0..4], aarch64.Instruction.ldr(.x16, .{ - .literal = @intCast(u19, displacement2 / 4), - }).toU32()); - mem.writeIntLittle(u32, ccode[4..8], aarch64.Instruction.br(.x16).toU32()); - try self.base.file.?.pwriteAll(&ccode, stubs.offset + fixup.symbol * stubs.reserved2); - - const displacement3 = @intCast(i64, stub_h.addr) - @intCast(i64, end + 4); - var cccode: [3 * @sizeOf(u32)]u8 = undefined; - mem.writeIntLittle(u32, cccode[0..4], aarch64.Instruction.ldr(.w16, .{ - .literal = 0x2, - }).toU32()); - mem.writeIntLittle(u32, cccode[4..8], aarch64.Instruction.b(@intCast(i28, displacement3)).toU32()); - mem.writeIntLittle(u32, cccode[8..12], fixup.symbol * 0xd); - try self.base.file.?.pwriteAll(&cccode, stub_off); - - try self.rebase_info_table.symbols.append(self.base.allocator, .{ - .segment = 3, - .offset = fixup.symbol * stubs.reserved2, - }); + switch (self.base.options.target.cpu.arch) { + .x86_64 => return error.TODOImplementStubFixupsForx86_64, + .aarch64 => { + mem.writeIntSliceLittle(u32, placeholder, aarch64.Instruction.bl(@intCast(i28, displacement)).toU32()); + }, + else => unreachable, + } + if (!fixup.already_defined) { + try self.writeStub(fixup.symbol); + try self.writeStubInStubHelper(fixup.symbol); + try self.writeLazySymbolPointer(fixup.symbol); + + const extern_sym = &self.extern_lazy_symbols.items()[fixup.symbol].value; + extern_sym.segment = self.data_segment_cmd_index.?; + extern_sym.offset = fixup.symbol * @sizeOf(u64); self.rebase_info_dirty = true; - - const sym = self.undef_symbols.items[fixup.symbol + 1]; - const name_str = self.getString(sym.n_strx); - var name = try self.base.allocator.alloc(u8, name_str.len); - mem.copy(u8, name, name_str); - try self.lazy_binding_info_table.symbols.append(self.base.allocator, .{ - .segment = 3, - .offset = fixup.symbol * @sizeOf(u64), - .dylib_ordinal = 1, - .name = name, - }); self.lazy_binding_info_dirty = true; } } @@ -2080,51 +2041,51 @@ pub fn populateMissingMetadata(self: *MachO) !void { self.header_dirty = true; self.load_commands_dirty = true; } - if (self.dyld_stub_binder_index == null) { - self.dyld_stub_binder_index = @intCast(u16, self.undef_symbols.items.len); - const name = try self.makeString("dyld_stub_binder"); - try self.undef_symbols.append(self.base.allocator, .{ - .n_strx = name, - .n_type = macho.N_UNDF | macho.N_EXT, - .n_sect = 0, - .n_desc = macho.REFERENCE_FLAG_UNDEFINED_NON_LAZY | macho.N_SYMBOL_RESOLVER, - .n_value = 0, - }); - - self.binding_info_table.dylib_ordinal = 1; - const nn = self.getString(name); - var n = try self.base.allocator.alloc(u8, nn.len); - mem.copy(u8, n, nn); - try self.binding_info_table.symbols.append(self.base.allocator, .{ - .name = n, - .segment = 2, - .offset = 0, + if (!self.extern_nonlazy_symbols.contains("dyld_stub_binder")) { + const index = @intCast(u32, self.extern_nonlazy_symbols.items().len); + const name = try std.fmt.allocPrint(self.base.allocator, "dyld_stub_binder", .{}); + try self.extern_nonlazy_symbols.putNoClobber(self.base.allocator, name, .{ + .name = name, + .dylib_ordinal = 1, // TODO this is currently hardcoded. + .index = index, + .segment = self.data_const_segment_cmd_index.?, + .offset = index * @sizeOf(u64), }); self.binding_info_dirty = true; } if (self.stub_helper_stubs_start_off == null) { - const text = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; - const sh = &text.sections.items[self.stub_helper_section_index.?]; - const data = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; - const data_data = &data.sections.items[self.data_section_index.?]; - const displacement = data_data.addr - sh.addr; - var code: [4 * @sizeOf(u32)]u8 = undefined; - mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.adr(.x17, @intCast(i21, displacement)).toU32()); - mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.stp( - .x16, - .x17, - aarch64.Register.sp, - aarch64.Instruction.LoadStorePairOffset.pre_index(-16), - ).toU32()); - const dc = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; - const got = &dc.sections.items[self.data_got_section_index.?]; - const displacement2 = got.addr - sh.addr - 2 * @sizeOf(u32); - mem.writeIntLittle(u32, code[8..12], aarch64.Instruction.ldr(.x16, .{ - .literal = @intCast(u19, displacement2 / 4), - }).toU32()); - mem.writeIntLittle(u32, code[12..16], aarch64.Instruction.br(.x16).toU32()); - self.stub_helper_stubs_start_off = sh.offset + 4 * @sizeOf(u32); - try self.base.file.?.pwriteAll(&code, sh.offset); + const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; + const stub_helper = &text_segment.sections.items[self.stub_helper_section_index.?]; + const data_segment = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; + const data = &data_segment.sections.items[self.data_section_index.?]; + const data_const_segment = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; + const got = &data_const_segment.sections.items[self.data_got_section_index.?]; + switch (self.base.options.target.cpu.arch) { + .x86_64 => return error.TODOImplementStubHelperForX86_64, + .aarch64 => { + var code: [4 * @sizeOf(u32)]u8 = undefined; + { + const displacement = data.addr - stub_helper.addr; + mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.adr(.x17, @intCast(i21, displacement)).toU32()); + } + mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.stp( + .x16, + .x17, + aarch64.Register.sp, + aarch64.Instruction.LoadStorePairOffset.pre_index(-16), + ).toU32()); + { + const displacement = got.addr - stub_helper.addr - 2 * @sizeOf(u32); + mem.writeIntLittle(u32, code[8..12], aarch64.Instruction.ldr(.x16, .{ + .literal = @intCast(u19, displacement / 4), + }).toU32()); + } + mem.writeIntLittle(u32, code[12..16], aarch64.Instruction.br(.x16).toU32()); + self.stub_helper_stubs_start_off = stub_helper.offset + 4 * @sizeOf(u32); + try self.base.file.?.pwriteAll(&code, stub_helper.offset); + }, + else => unreachable, + } } } @@ -2460,11 +2421,73 @@ fn writeOffsetTableEntry(self: *MachO, index: usize) !void { try self.base.file.?.pwriteAll(&code, off); } +fn writeLazySymbolPointer(self: *MachO, index: u32) !void { + const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; + const stub_helper = text_segment.sections.items[self.stub_helper_section_index.?]; + const data_segment = self.load_commands.items[self.data_segment_cmd_index.?].Segment; + const la_symbol_ptr = data_segment.sections.items[self.la_symbol_ptr_section_index.?]; + + const stub_off = self.stub_helper_stubs_start_off.? + index * 3 * @sizeOf(u32); + const end = stub_helper.addr + stub_off - stub_helper.offset; + var buf: [@sizeOf(u64)]u8 = undefined; + mem.writeIntLittle(u64, &buf, end); + const off = la_symbol_ptr.offset + index * @sizeOf(u64); + log.debug("writing lazy symbol pointer entry 0x{x} at 0x{x}", .{ end, off }); + try self.base.file.?.pwriteAll(&buf, off); +} + +fn writeStub(self: *MachO, index: u32) !void { + const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; + const stubs = text_segment.sections.items[self.stubs_section_index.?]; + const data_segment = self.load_commands.items[self.data_segment_cmd_index.?].Segment; + const la_symbol_ptr = data_segment.sections.items[self.la_symbol_ptr_section_index.?]; + + const stub_off = stubs.offset + index * stubs.reserved2; + const stub_addr = stubs.addr + index * stubs.reserved2; + const la_ptr_addr = la_symbol_ptr.addr + index * @sizeOf(u64); + const displacement = la_ptr_addr - stub_addr; + log.debug("writing stub at 0x{x}", .{stub_off}); + switch (self.base.options.target.cpu.arch) { + .x86_64 => return error.TODOImplementWritingStubsForx86_64, + .aarch64 => { + var code: [2 * @sizeOf(u32)]u8 = undefined; + mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.ldr(.x16, .{ + .literal = @intCast(u19, displacement / 4), + }).toU32()); + mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.br(.x16).toU32()); + try self.base.file.?.pwriteAll(&code, stub_off); + }, + else => unreachable, + } +} + +fn writeStubInStubHelper(self: *MachO, index: u32) !void { + const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; + const stub_helper = text_segment.sections.items[self.stub_helper_section_index.?]; + + const stub_off = self.stub_helper_stubs_start_off.? + index * 3 * @sizeOf(u32); + const end = stub_helper.addr + stub_off - stub_helper.offset; + const displacement = @intCast(i64, stub_helper.addr) - @intCast(i64, end + 4); + switch (self.base.options.target.cpu.arch) { + .x86_64 => return error.TODOImplementWritingStubsInStubHelperForx86_64, + .aarch64 => { + var code: [3 * @sizeOf(u32)]u8 = undefined; + mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.ldr(.w16, .{ + .literal = 0x2, + }).toU32()); + mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.b(@intCast(i28, displacement)).toU32()); + mem.writeIntLittle(u32, code[8..12], index * 0xd); // TODO This is the size of lazy binding opcode block. + try self.base.file.?.pwriteAll(&code, stub_off); + }, + else => unreachable, + } +} + fn relocateSymbolTable(self: *MachO) !void { const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab; const nlocals = self.local_symbols.items.len; const nglobals = self.global_symbols.items.len; - const nundefs = self.undef_symbols.items.len; + const nundefs = self.extern_lazy_symbols.items().len + self.extern_nonlazy_symbols.items().len; const nsyms = nlocals + nglobals + nundefs; if (symtab.nsyms < nsyms) { @@ -2509,7 +2532,31 @@ fn writeAllGlobalAndUndefSymbols(self: *MachO) !void { const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab; const nlocals = self.local_symbols.items.len; const nglobals = self.global_symbols.items.len; - const nundefs = self.undef_symbols.items.len; + + const nundefs = self.extern_lazy_symbols.items().len + self.extern_nonlazy_symbols.items().len; + var undefs = std.ArrayList(macho.nlist_64).init(self.base.allocator); + defer undefs.deinit(); + try undefs.ensureCapacity(nundefs); + for (self.extern_lazy_symbols.items()) |entry| { + const name = try self.makeString(entry.key); + undefs.appendAssumeCapacity(.{ + .n_strx = name, + .n_type = std.macho.N_UNDF | std.macho.N_EXT, + .n_sect = 0, + .n_desc = std.macho.REFERENCE_FLAG_UNDEFINED_NON_LAZY | std.macho.N_SYMBOL_RESOLVER, + .n_value = 0, + }); + } + for (self.extern_nonlazy_symbols.items()) |entry| { + const name = try self.makeString(entry.key); + undefs.appendAssumeCapacity(.{ + .n_strx = name, + .n_type = std.macho.N_UNDF | std.macho.N_EXT, + .n_sect = 0, + .n_desc = std.macho.REFERENCE_FLAG_UNDEFINED_NON_LAZY | std.macho.N_SYMBOL_RESOLVER, + .n_value = 0, + }); + } const locals_off = symtab.symoff; const locals_size = nlocals * @sizeOf(macho.nlist_64); @@ -2521,8 +2568,8 @@ fn writeAllGlobalAndUndefSymbols(self: *MachO) !void { const undefs_off = globals_off + globals_size; const undefs_size = nundefs * @sizeOf(macho.nlist_64); - log.debug("writing undef symbols from 0x{x} to 0x{x}", .{ undefs_off, undefs_size + undefs_off }); - try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.undef_symbols.items), undefs_off); + log.debug("writing extern symbols from 0x{x} to 0x{x}", .{ undefs_off, undefs_size + undefs_off }); + try self.base.file.?.pwriteAll(mem.sliceAsBytes(undefs.items), undefs_off); // Update dynamic symbol table. const dysymtab = &self.load_commands.items[self.dysymtab_cmd_index.?].Dysymtab; @@ -2546,42 +2593,33 @@ fn writeIndirectSymbolTable(self: *MachO) !void { var buf: [@sizeOf(u32)]u8 = undefined; var off = dysymtab.indirectsymoff; - var idx: u32 = 0; stubs.reserved1 = 0; - for (self.undef_symbols.items) |sym, i| { - if (i == self.dyld_stub_binder_index.?) { - continue; - } - const symtab_idx = @intCast(u32, dysymtab.iundefsym + i); + for (self.extern_lazy_symbols.items()) |entry| { + const symtab_idx = @intCast(u32, dysymtab.iundefsym + entry.value.index); mem.writeIntLittle(u32, &buf, symtab_idx); try self.base.file.?.pwriteAll(&buf, off); off += @sizeOf(u32); dysymtab.nindirectsyms += 1; - idx += 1; } - got.reserved1 = @intCast(u32, self.undef_symbols.items.len - 1); - if (self.dyld_stub_binder_index) |i| { - const symtab_idx = i + dysymtab.iundefsym; + const base_id = @intCast(u32, self.extern_lazy_symbols.items().len); + got.reserved1 = base_id; + for (self.extern_nonlazy_symbols.items()) |entry| { + const symtab_idx = @intCast(u32, dysymtab.iundefsym + entry.value.index + base_id); mem.writeIntLittle(u32, &buf, symtab_idx); try self.base.file.?.pwriteAll(&buf, off); off += @sizeOf(u32); dysymtab.nindirectsyms += 1; - idx += 1; } - la.reserved1 = got.reserved1 + 1; - for (self.undef_symbols.items) |sym, i| { - if (i == self.dyld_stub_binder_index.?) { - continue; - } - const symtab_idx = @intCast(u32, dysymtab.iundefsym + i); + la.reserved1 = got.reserved1 + @intCast(u32, self.extern_nonlazy_symbols.items().len); + for (self.extern_lazy_symbols.items()) |entry| { + const symtab_idx = @intCast(u32, dysymtab.iundefsym + entry.value.index); mem.writeIntLittle(u32, &buf, symtab_idx); try self.base.file.?.pwriteAll(&buf, off); off += @sizeOf(u32); dysymtab.nindirectsyms += 1; - idx += 1; } } @@ -2689,12 +2727,19 @@ fn writeRebaseInfoTable(self: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); - const size = try self.rebase_info_table.calcSize(); + var symbols = try self.base.allocator.alloc(*const ExternSymbol, self.extern_lazy_symbols.items().len); + defer self.base.allocator.free(symbols); + + for (self.extern_lazy_symbols.items()) |*entry, i| { + symbols[i] = &entry.value; + } + + const size = try rebaseInfoSize(symbols); var buffer = try self.base.allocator.alloc(u8, @intCast(usize, size)); defer self.base.allocator.free(buffer); var stream = std.io.fixedBufferStream(buffer); - try self.rebase_info_table.write(stream.writer()); + try writeRebaseInfo(symbols, stream.writer()); const linkedit_segment = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly; @@ -2720,12 +2765,19 @@ fn writeBindingInfoTable(self: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); - const size = try self.binding_info_table.calcSize(); + var symbols = try self.base.allocator.alloc(*const ExternSymbol, self.extern_nonlazy_symbols.items().len); + defer self.base.allocator.free(symbols); + + for (self.extern_nonlazy_symbols.items()) |*entry, i| { + symbols[i] = &entry.value; + } + + const size = try bindInfoSize(symbols); var buffer = try self.base.allocator.alloc(u8, @intCast(usize, size)); defer self.base.allocator.free(buffer); var stream = std.io.fixedBufferStream(buffer); - try self.binding_info_table.write(stream.writer()); + try writeBindInfo(symbols, stream.writer()); const linkedit_segment = self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly; @@ -2748,12 +2800,19 @@ fn writeBindingInfoTable(self: *MachO) !void { fn writeLazyBindingInfoTable(self: *MachO) !void { if (!self.lazy_binding_info_dirty) return; - const size = try self.lazy_binding_info_table.calcSize(); + var symbols = try self.base.allocator.alloc(*const ExternSymbol, self.extern_lazy_symbols.items().len); + defer self.base.allocator.free(symbols); + + for (self.extern_lazy_symbols.items()) |*entry, i| { + symbols[i] = &entry.value; + } + + const size = try lazyBindInfoSize(symbols); var buffer = try self.base.allocator.alloc(u8, @intCast(usize, size)); defer self.base.allocator.free(buffer); var stream = std.io.fixedBufferStream(buffer); - try self.lazy_binding_info_table.write(stream.writer()); + try writeLazyBindInfo(symbols, stream.writer()); const linkedit_segment = self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly; @@ -3001,7 +3060,7 @@ fn parseBindingInfoTable(self: *MachO) !void { assert(nread == buffer.len); var stream = std.io.fixedBufferStream(buffer); - try self.binding_info_table.read(stream.reader(), self.base.allocator); + // try self.binding_info_table.read(stream.reader(), self.base.allocator); } fn parseLazyBindingInfoTable(self: *MachO) !void { @@ -3012,5 +3071,5 @@ fn parseLazyBindingInfoTable(self: *MachO) !void { assert(nread == buffer.len); var stream = std.io.fixedBufferStream(buffer); - try self.lazy_binding_info_table.read(stream.reader(), self.base.allocator); + // try self.lazy_binding_info_table.read(stream.reader(), self.base.allocator); } diff --git a/src/link/MachO/imports.zig b/src/link/MachO/imports.zig index 6128992af3..8e0f72e1de 100644 --- a/src/link/MachO/imports.zig +++ b/src/link/MachO/imports.zig @@ -6,365 +6,177 @@ const mem = std.mem; const assert = std.debug.assert; const Allocator = mem.Allocator; -pub const RebaseInfoTable = struct { - rebase_type: u8 = macho.REBASE_TYPE_POINTER, - symbols: std.ArrayListUnmanaged(Symbol) = .{}, +pub const ExternSymbol = struct { + /// Symbol name. + /// We own the memory, therefore we'll need to free it by calling `deinit`. + /// In self-hosted, we don't expect it to be null ever. + /// However, this is for backwards compatibility with LLD when + /// we'll be patching things up post mortem. + name: ?[]u8 = null, - pub const Symbol = struct { - segment: u8, - offset: i64, - }; - - pub fn deinit(self: *RebaseInfoTable, allocator: *Allocator) void { - self.symbols.deinit(allocator); - } - - /// Write the rebase info table to byte stream. - pub fn write(self: RebaseInfoTable, writer: anytype) !void { - for (self.symbols.items) |symbol| { - try writer.writeByte(macho.REBASE_OPCODE_SET_TYPE_IMM | @truncate(u4, self.rebase_type)); - try writer.writeByte(macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, symbol.segment)); - try leb.writeILEB128(writer, symbol.offset); - try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_IMM_TIMES | @truncate(u4, 1)); - } - - try writer.writeByte(macho.REBASE_OPCODE_DONE); - } + /// Id of the dynamic library where the specified entries can be found. + /// Id of 0 means self. + /// TODO this should really be an id into the table of all defined + /// dylibs. + dylib_ordinal: i64 = 0, - /// Calculate size in bytes of this rebase info table. - pub fn calcSize(self: *RebaseInfoTable) !u64 { - var stream = std.io.countingWriter(std.io.null_writer); - var writer = stream.writer(); - var size: u64 = 0; + segment: u16 = 0, + offset: u32 = 0, + addend: ?i32 = null, + index: u32, - for (self.symbols.items) |symbol| { - size += 2; - try leb.writeILEB128(writer, symbol.offset); - size += 1; + pub fn deinit(self: *ExternSymbol, allocator: *Allocator) void { + if (self.name) |*name| { + allocator.free(name); } - - size += 1 + stream.bytes_written; - return size; } }; -/// Table of binding info entries used to tell the dyld which -/// symbols to bind at loading time. -pub const BindingInfoTable = struct { - /// Id of the dynamic library where the specified entries can be found. - dylib_ordinal: i64 = 0, - - /// Binding type; defaults to pointer type. - binding_type: u8 = macho.BIND_TYPE_POINTER, - - symbols: std.ArrayListUnmanaged(Symbol) = .{}, +pub fn rebaseInfoSize(symbols: []*const ExternSymbol) !u64 { + var stream = std.io.countingWriter(std.io.null_writer); + var writer = stream.writer(); + var size: u64 = 0; - pub const Symbol = struct { - /// Symbol name. - name: ?[]u8 = null, + for (symbols) |symbol| { + size += 2; + try leb.writeILEB128(writer, symbol.offset); + size += 1; + } - /// Id of the segment where to bind this symbol to. - segment: u8, + size += 1 + stream.bytes_written; + return size; +} - /// Offset of this symbol wrt to the segment id encoded in `segment`. - offset: i64, +pub fn writeRebaseInfo(symbols: []*const ExternSymbol, writer: anytype) !void { + for (symbols) |symbol| { + try writer.writeByte(macho.REBASE_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.REBASE_TYPE_POINTER)); + try writer.writeByte(macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, symbol.segment)); + try leb.writeILEB128(writer, symbol.offset); + try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_IMM_TIMES | @truncate(u4, 1)); + } + try writer.writeByte(macho.REBASE_OPCODE_DONE); +} - /// Addend value (if any). - addend: ?i64 = null, - }; +pub fn bindInfoSize(symbols: []*const ExternSymbol) !u64 { + var stream = std.io.countingWriter(std.io.null_writer); + var writer = stream.writer(); + var size: u64 = 0; - pub fn deinit(self: *BindingInfoTable, allocator: *Allocator) void { - for (self.symbols.items) |*symbol| { - if (symbol.name) |name| { - allocator.free(name); - } + for (symbols) |symbol| { + size += 1; + if (symbol.dylib_ordinal > 15) { + try leb.writeULEB128(writer, @bitCast(u64, symbol.dylib_ordinal)); } - self.symbols.deinit(allocator); - } + size += 1; - /// Parse the binding info table from byte stream. - pub fn read(self: *BindingInfoTable, reader: anytype, allocator: *Allocator) !void { - var symbol: Symbol = .{ - .segment = 0, - .offset = 0, - }; + if (symbol.name) |name| { + size += 1; + size += name.len; + size += 1; + } - var dylib_ordinal_set = false; - var done = false; - while (true) { - const inst = reader.readByte() catch |err| switch (err) { - error.EndOfStream => break, - else => return err, - }; - const imm: u8 = inst & macho.BIND_IMMEDIATE_MASK; - const opcode: u8 = inst & macho.BIND_OPCODE_MASK; + size += 1; + try leb.writeILEB128(writer, symbol.offset); - switch (opcode) { - macho.BIND_OPCODE_DO_BIND => { - try self.symbols.append(allocator, symbol); - symbol = .{ - .segment = 0, - .offset = 0, - }; - }, - macho.BIND_OPCODE_DONE => { - done = true; - break; - }, - macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM => { - var name = std.ArrayList(u8).init(allocator); - var next = try reader.readByte(); - while (next != @as(u8, 0)) { - try name.append(next); - next = try reader.readByte(); - } - symbol.name = name.toOwnedSlice(); - }, - macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB => { - symbol.segment = imm; - symbol.offset = try leb.readILEB128(i64, reader); - }, - macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM, macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM => { - assert(!dylib_ordinal_set); - self.dylib_ordinal = imm; - }, - macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB => { - assert(!dylib_ordinal_set); - self.dylib_ordinal = try leb.readILEB128(i64, reader); - }, - macho.BIND_OPCODE_SET_TYPE_IMM => { - self.binding_type = imm; - }, - macho.BIND_OPCODE_SET_ADDEND_SLEB => { - symbol.addend = try leb.readILEB128(i64, reader); - }, - else => { - std.log.warn("unhandled BIND_OPCODE_: 0x{x}", .{opcode}); - }, - } + if (symbol.addend) |addend| { + size += 1; + try leb.writeILEB128(writer, addend); } - assert(done); + + size += 2; } - /// Write the binding info table to byte stream. - pub fn write(self: BindingInfoTable, writer: anytype) !void { - if (self.dylib_ordinal > 15) { + size += stream.bytes_written; + return size; +} + +pub fn writeBindInfo(symbols: []*const ExternSymbol, writer: anytype) !void { + for (symbols) |symbol| { + if (symbol.dylib_ordinal > 15) { try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB); - try leb.writeULEB128(writer, @bitCast(u64, self.dylib_ordinal)); - } else if (self.dylib_ordinal > 0) { - try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @truncate(u4, @bitCast(u64, self.dylib_ordinal))); + try leb.writeULEB128(writer, @bitCast(u64, symbol.dylib_ordinal)); + } else if (symbol.dylib_ordinal > 0) { + try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @truncate(u4, @bitCast(u64, symbol.dylib_ordinal))); } else { - try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @truncate(u4, @bitCast(u64, self.dylib_ordinal))); + try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @truncate(u4, @bitCast(u64, symbol.dylib_ordinal))); } - try writer.writeByte(macho.BIND_OPCODE_SET_TYPE_IMM | @truncate(u4, self.binding_type)); - - for (self.symbols.items) |symbol| { - if (symbol.name) |name| { - try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM); // TODO Sometimes we might want to add flags. - try writer.writeAll(name); - try writer.writeByte(0); - } + try writer.writeByte(macho.BIND_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.BIND_TYPE_POINTER)); - try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, symbol.segment)); - try leb.writeILEB128(writer, symbol.offset); + if (symbol.name) |name| { + try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM); // TODO Sometimes we might want to add flags. + try writer.writeAll(name); + try writer.writeByte(0); + } - if (symbol.addend) |addend| { - try writer.writeByte(macho.BIND_OPCODE_SET_ADDEND_SLEB); - try leb.writeILEB128(writer, addend); - } + try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, symbol.segment)); + try leb.writeILEB128(writer, symbol.offset); - try writer.writeByte(macho.BIND_OPCODE_DO_BIND); + if (symbol.addend) |addend| { + try writer.writeByte(macho.BIND_OPCODE_SET_ADDEND_SLEB); + try leb.writeILEB128(writer, addend); } + try writer.writeByte(macho.BIND_OPCODE_DO_BIND); try writer.writeByte(macho.BIND_OPCODE_DONE); } +} - /// Calculate size in bytes of this binding info table. - pub fn calcSize(self: *BindingInfoTable) !u64 { - var stream = std.io.countingWriter(std.io.null_writer); - var writer = stream.writer(); - var size: u64 = 1; - - if (self.dylib_ordinal > 15) { - try leb.writeULEB128(writer, @bitCast(u64, self.dylib_ordinal)); - } +pub fn lazyBindInfoSize(symbols: []*const ExternSymbol) !u64 { + var stream = std.io.countingWriter(std.io.null_writer); + var writer = stream.writer(); + var size: u64 = 0; + for (symbols) |symbol| { size += 1; + try leb.writeILEB128(writer, symbol.offset); - for (self.symbols.items) |symbol| { - if (symbol.name) |name| { - size += 1; - size += name.len; - size += 1; - } - - size += 1; - try leb.writeILEB128(writer, symbol.offset); - - if (symbol.addend) |addend| { - size += 1; - try leb.writeILEB128(writer, addend); - } - + if (symbol.addend) |addend| { size += 1; + try leb.writeILEB128(writer, addend); } - size += 1 + stream.bytes_written; - return size; - } -}; - -/// Table of lazy binding info entries used to tell the dyld which -/// symbols to lazily bind at first load of a dylib. -pub const LazyBindingInfoTable = struct { - symbols: std.ArrayListUnmanaged(Symbol) = .{}, - - pub const Symbol = struct { - /// Symbol name. - name: ?[]u8 = null, - - /// Offset of this symbol wrt to the segment id encoded in `segment`. - offset: i64, - - /// Id of the dylib where this symbol is expected to reside. - /// Positive ordinals point at dylibs imported with LC_LOAD_DYLIB, - /// 0 means this binary, -1 the main executable, and -2 flat lookup. - dylib_ordinal: i64, - - /// Id of the segment where to bind this symbol to. - segment: u8, - - /// Addend value (if any). - addend: ?i64 = null, - }; - - pub fn deinit(self: *LazyBindingInfoTable, allocator: *Allocator) void { - for (self.symbols.items) |*symbol| { - if (symbol.name) |name| { - allocator.free(name); - } + size += 1; + if (symbol.dylib_ordinal > 15) { + try leb.writeULEB128(writer, @bitCast(u64, symbol.dylib_ordinal)); } - self.symbols.deinit(allocator); - } - - /// Parse the binding info table from byte stream. - pub fn read(self: *LazyBindingInfoTable, reader: anytype, allocator: *Allocator) !void { - var symbol: Symbol = .{ - .offset = 0, - .segment = 0, - .dylib_ordinal = 0, - }; - - var done = false; - while (true) { - const inst = reader.readByte() catch |err| switch (err) { - error.EndOfStream => break, - else => return err, - }; - const imm: u8 = inst & macho.BIND_IMMEDIATE_MASK; - const opcode: u8 = inst & macho.BIND_OPCODE_MASK; - - switch (opcode) { - macho.BIND_OPCODE_DO_BIND => { - try self.symbols.append(allocator, symbol); - }, - macho.BIND_OPCODE_DONE => { - done = true; - symbol = .{ - .offset = 0, - .segment = 0, - .dylib_ordinal = 0, - }; - }, - macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM => { - var name = std.ArrayList(u8).init(allocator); - var next = try reader.readByte(); - while (next != @as(u8, 0)) { - try name.append(next); - next = try reader.readByte(); - } - symbol.name = name.toOwnedSlice(); - }, - macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB => { - symbol.segment = imm; - symbol.offset = try leb.readILEB128(i64, reader); - }, - macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM, macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM => { - symbol.dylib_ordinal = imm; - }, - macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB => { - symbol.dylib_ordinal = try leb.readILEB128(i64, reader); - }, - macho.BIND_OPCODE_SET_ADDEND_SLEB => { - symbol.addend = try leb.readILEB128(i64, reader); - }, - else => { - std.log.warn("unhandled BIND_OPCODE_: 0x{x}", .{opcode}); - }, - } + if (symbol.name) |name| { + size += 1; + size += name.len; + size += 1; } - assert(done); + size += 2; } - /// Write the binding info table to byte stream. - pub fn write(self: LazyBindingInfoTable, writer: anytype) !void { - for (self.symbols.items) |symbol| { - try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, symbol.segment)); - try leb.writeILEB128(writer, symbol.offset); - - if (symbol.addend) |addend| { - try writer.writeByte(macho.BIND_OPCODE_SET_ADDEND_SLEB); - try leb.writeILEB128(writer, addend); - } + size += stream.bytes_written; + return size; +} - if (symbol.dylib_ordinal > 15) { - try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB); - try leb.writeULEB128(writer, @bitCast(u64, symbol.dylib_ordinal)); - } else if (symbol.dylib_ordinal > 0) { - try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @truncate(u4, @bitCast(u64, symbol.dylib_ordinal))); - } else { - try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @truncate(u4, @bitCast(u64, symbol.dylib_ordinal))); - } +pub fn writeLazyBindInfo(symbols: []*const ExternSymbol, writer: anytype) !void { + for (symbols) |symbol| { + try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, symbol.segment)); + try leb.writeILEB128(writer, symbol.offset); - if (symbol.name) |name| { - try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM); // TODO Sometimes we might want to add flags. - try writer.writeAll(name); - try writer.writeByte(0); - } - - try writer.writeByte(macho.BIND_OPCODE_DO_BIND); - try writer.writeByte(macho.BIND_OPCODE_DONE); + if (symbol.addend) |addend| { + try writer.writeByte(macho.BIND_OPCODE_SET_ADDEND_SLEB); + try leb.writeILEB128(writer, addend); } - } - /// Calculate size in bytes of this binding info table. - pub fn calcSize(self: *LazyBindingInfoTable) !u64 { - var stream = std.io.countingWriter(std.io.null_writer); - var writer = stream.writer(); - var size: u64 = 0; - - for (self.symbols.items) |symbol| { - size += 1; - try leb.writeILEB128(writer, symbol.offset); - - if (symbol.addend) |addend| { - size += 1; - try leb.writeILEB128(writer, addend); - } + if (symbol.dylib_ordinal > 15) { + try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB); + try leb.writeULEB128(writer, @bitCast(u64, symbol.dylib_ordinal)); + } else if (symbol.dylib_ordinal > 0) { + try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @truncate(u4, @bitCast(u64, symbol.dylib_ordinal))); + } else { + try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @truncate(u4, @bitCast(u64, symbol.dylib_ordinal))); + } - size += 1; - if (symbol.dylib_ordinal > 15) { - try leb.writeULEB128(writer, @bitCast(u64, symbol.dylib_ordinal)); - } - if (symbol.name) |name| { - size += 1; - size += name.len; - size += 1; - } - size += 2; + if (symbol.name) |name| { + try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM); // TODO Sometimes we might want to add flags. + try writer.writeAll(name); + try writer.writeByte(0); } - size += stream.bytes_written; - return size; + try writer.writeByte(macho.BIND_OPCODE_DO_BIND); + try writer.writeByte(macho.BIND_OPCODE_DONE); } -}; +} -- cgit v1.2.3 From 7d40aaad2b514703995458909b315f222543c4cd Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sat, 9 Jan 2021 20:38:55 +0100 Subject: macho: document more code + add test case --- src/codegen.zig | 23 ++++++++--------- src/link/MachO.zig | 62 +++++++++++++++++++++++++++++----------------- src/link/MachO/imports.zig | 31 ++++------------------- test/stage2/aarch64.zig | 17 +++++++++++++ 4 files changed, 71 insertions(+), 62 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index ad4215191f..05898c77c8 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1865,19 +1865,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // If it doesn't, it will get autofreed when we clean up the extern symbol table. const decl_name = try std.fmt.allocPrint(self.bin_file.allocator, "_{s}", .{decl.name}); const already_defined = macho_file.extern_lazy_symbols.contains(decl_name); - const symbol: u32 = blk: { - if (macho_file.extern_lazy_symbols.get(decl_name)) |sym| { - self.bin_file.allocator.free(decl_name); - break :blk sym.index; - } else { - const index = @intCast(u32, macho_file.extern_lazy_symbols.items().len); - try macho_file.extern_lazy_symbols.putNoClobber(self.bin_file.allocator, decl_name, .{ - .name = decl_name, - .dylib_ordinal = 1, // TODO this is now hardcoded, since we only support libSystem. - .index = index, - }); - break :blk index; - } + const symbol: u32 = if (macho_file.extern_lazy_symbols.getIndex(decl_name)) |index| blk: { + self.bin_file.allocator.free(decl_name); + break :blk @intCast(u32, index); + } else blk: { + const index = @intCast(u32, macho_file.extern_lazy_symbols.items().len); + try macho_file.extern_lazy_symbols.putNoClobber(self.bin_file.allocator, decl_name, .{ + .name = decl_name, + .dylib_ordinal = 1, // TODO this is now hardcoded, since we only support libSystem. + }); + break :blk index; }; try macho_file.stub_fixups.append(self.bin_file.allocator, .{ .symbol = symbol, diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 7d86c27aa9..c3808911eb 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -159,19 +159,29 @@ last_text_block: ?*TextBlock = null, /// prior to calling `generateSymbol`, and then immediately deallocated /// rather than sitting in the global scope. pie_fixups: std.ArrayListUnmanaged(PieFixup) = .{}, - +/// A list of all stub (extern decls) fixups required for this run of the linker. +/// Warning, this is currently NOT thread-safe. See the TODO below. +/// TODO Move this list inside `updateDecl` where it should be allocated +/// prior to calling `generateSymbol`, and then immediately deallocated +/// rather than sitting in the global scope. stub_fixups: std.ArrayListUnmanaged(StubFixup) = .{}, -pub const StubFixup = struct { - symbol: u32, - already_defined: bool, +pub const PieFixup = struct { + /// Target address we wanted to address in absolute terms. + address: u64, + /// Where in the byte stream we should perform the fixup. start: usize, + /// The length of the byte stream. For x86_64, this will be + /// variable. For aarch64, it will be fixed at 4 bytes. len: usize, }; -pub const PieFixup = struct { - /// Target address we wanted to address in absolute terms. - address: u64, +pub const StubFixup = struct { + /// Id of extern (lazy) symbol. + symbol: u32, + /// Signals whether the symbol has already been declared before. If so, + /// then there is no need to rewrite the stub entry and related. + already_defined: bool, /// Where in the byte stream we should perform the fixup. start: usize, /// The length of the byte stream. For x86_64, this will be @@ -1030,13 +1040,20 @@ pub fn deinit(self: *MachO) void { if (self.d_sym) |*ds| { ds.deinit(self.base.allocator); } + for (self.extern_lazy_symbols.items()) |*entry| { + entry.value.deinit(self.base.allocator); + } + self.extern_lazy_symbols.deinit(self.base.allocator); + for (self.extern_nonlazy_symbols.items()) |*entry| { + entry.value.deinit(self.base.allocator); + } + self.extern_nonlazy_symbols.deinit(self.base.allocator); self.pie_fixups.deinit(self.base.allocator); + self.stub_fixups.deinit(self.base.allocator); self.text_block_free_list.deinit(self.base.allocator); self.offset_table.deinit(self.base.allocator); self.offset_table_free_list.deinit(self.base.allocator); self.string_table.deinit(self.base.allocator); - self.extern_lazy_symbols.deinit(self.base.allocator); - self.extern_nonlazy_symbols.deinit(self.base.allocator); self.global_symbols.deinit(self.base.allocator); self.global_symbol_free_list.deinit(self.base.allocator); self.local_symbols.deinit(self.base.allocator); @@ -2047,7 +2064,6 @@ pub fn populateMissingMetadata(self: *MachO) !void { try self.extern_nonlazy_symbols.putNoClobber(self.base.allocator, name, .{ .name = name, .dylib_ordinal = 1, // TODO this is currently hardcoded. - .index = index, .segment = self.data_const_segment_cmd_index.?, .offset = index * @sizeOf(u64), }); @@ -2582,12 +2598,12 @@ fn writeAllGlobalAndUndefSymbols(self: *MachO) !void { } fn writeIndirectSymbolTable(self: *MachO) !void { - const text_seg = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; - const stubs = &text_seg.sections.items[self.stubs_section_index.?]; - const dc_seg = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; - const got = &dc_seg.sections.items[self.data_got_section_index.?]; - const data_seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; - const la = &data_seg.sections.items[self.la_symbol_ptr_section_index.?]; + const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; + const stubs = &text_segment.sections.items[self.stubs_section_index.?]; + const data_const_seg = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; + const got = &data_const_seg.sections.items[self.data_got_section_index.?]; + const data_segment = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; + const la_symbol_ptr = &data_segment.sections.items[self.la_symbol_ptr_section_index.?]; const dysymtab = &self.load_commands.items[self.dysymtab_cmd_index.?].Dysymtab; dysymtab.nindirectsyms = 0; @@ -2595,8 +2611,8 @@ fn writeIndirectSymbolTable(self: *MachO) !void { var off = dysymtab.indirectsymoff; stubs.reserved1 = 0; - for (self.extern_lazy_symbols.items()) |entry| { - const symtab_idx = @intCast(u32, dysymtab.iundefsym + entry.value.index); + for (self.extern_lazy_symbols.items()) |_, i| { + const symtab_idx = @intCast(u32, dysymtab.iundefsym + i); mem.writeIntLittle(u32, &buf, symtab_idx); try self.base.file.?.pwriteAll(&buf, off); off += @sizeOf(u32); @@ -2605,17 +2621,17 @@ fn writeIndirectSymbolTable(self: *MachO) !void { const base_id = @intCast(u32, self.extern_lazy_symbols.items().len); got.reserved1 = base_id; - for (self.extern_nonlazy_symbols.items()) |entry| { - const symtab_idx = @intCast(u32, dysymtab.iundefsym + entry.value.index + base_id); + for (self.extern_nonlazy_symbols.items()) |_, i| { + const symtab_idx = @intCast(u32, dysymtab.iundefsym + i + base_id); mem.writeIntLittle(u32, &buf, symtab_idx); try self.base.file.?.pwriteAll(&buf, off); off += @sizeOf(u32); dysymtab.nindirectsyms += 1; } - la.reserved1 = got.reserved1 + @intCast(u32, self.extern_nonlazy_symbols.items().len); - for (self.extern_lazy_symbols.items()) |entry| { - const symtab_idx = @intCast(u32, dysymtab.iundefsym + entry.value.index); + la_symbol_ptr.reserved1 = got.reserved1 + @intCast(u32, self.extern_nonlazy_symbols.items().len); + for (self.extern_lazy_symbols.items()) |_, i| { + const symtab_idx = @intCast(u32, dysymtab.iundefsym + i); mem.writeIntLittle(u32, &buf, symtab_idx); try self.base.file.?.pwriteAll(&buf, off); off += @sizeOf(u32); diff --git a/src/link/MachO/imports.zig b/src/link/MachO/imports.zig index 8e0f72e1de..c5f6211f1a 100644 --- a/src/link/MachO/imports.zig +++ b/src/link/MachO/imports.zig @@ -20,13 +20,15 @@ pub const ExternSymbol = struct { /// dylibs. dylib_ordinal: i64 = 0, + /// Id of the segment where this symbol is defined (will have its address + /// resolved). segment: u16 = 0, + + /// Offset relative to the start address of the `segment`. offset: u32 = 0, - addend: ?i32 = null, - index: u32, pub fn deinit(self: *ExternSymbol, allocator: *Allocator) void { - if (self.name) |*name| { + if (self.name) |name| { allocator.free(name); } } @@ -77,12 +79,6 @@ pub fn bindInfoSize(symbols: []*const ExternSymbol) !u64 { size += 1; try leb.writeILEB128(writer, symbol.offset); - - if (symbol.addend) |addend| { - size += 1; - try leb.writeILEB128(writer, addend); - } - size += 2; } @@ -110,12 +106,6 @@ pub fn writeBindInfo(symbols: []*const ExternSymbol, writer: anytype) !void { try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, symbol.segment)); try leb.writeILEB128(writer, symbol.offset); - - if (symbol.addend) |addend| { - try writer.writeByte(macho.BIND_OPCODE_SET_ADDEND_SLEB); - try leb.writeILEB128(writer, addend); - } - try writer.writeByte(macho.BIND_OPCODE_DO_BIND); try writer.writeByte(macho.BIND_OPCODE_DONE); } @@ -129,12 +119,6 @@ pub fn lazyBindInfoSize(symbols: []*const ExternSymbol) !u64 { for (symbols) |symbol| { size += 1; try leb.writeILEB128(writer, symbol.offset); - - if (symbol.addend) |addend| { - size += 1; - try leb.writeILEB128(writer, addend); - } - size += 1; if (symbol.dylib_ordinal > 15) { try leb.writeULEB128(writer, @bitCast(u64, symbol.dylib_ordinal)); @@ -156,11 +140,6 @@ pub fn writeLazyBindInfo(symbols: []*const ExternSymbol, writer: anytype) !void try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, symbol.segment)); try leb.writeILEB128(writer, symbol.offset); - if (symbol.addend) |addend| { - try writer.writeByte(macho.BIND_OPCODE_SET_ADDEND_SLEB); - try leb.writeILEB128(writer, addend); - } - if (symbol.dylib_ordinal > 15) { try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB); try leb.writeULEB128(writer, @bitCast(u64, symbol.dylib_ordinal)); diff --git a/test/stage2/aarch64.zig b/test/stage2/aarch64.zig index 7d05c60bb8..1dde30e969 100644 --- a/test/stage2/aarch64.zig +++ b/test/stage2/aarch64.zig @@ -199,4 +199,21 @@ pub fn addCases(ctx: *TestContext) !void { "", ); } + + { + var case = ctx.exe("hello world linked to libc", macos_aarch64); + + // TODO rewrite this test once we handle more int conversions and return args. + case.addCompareOutput( + \\extern "c" fn write(usize, usize, usize) void; + \\extern "c" fn exit(usize) noreturn; + \\ + \\export fn _start() noreturn { + \\ write(1, @ptrToInt("Hello, World!\n"), 14); + \\ exit(0); + \\} + , + "Hello, World!\n", + ); + } } -- cgit v1.2.3 From f0d7ec6f33634edb0ddb3ba5d5b306e9f2de5418 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sun, 10 Jan 2021 00:21:34 +0100 Subject: macho: add x86_64 support --- src/codegen.zig | 22 +++++++++++-- src/link/MachO.zig | 91 ++++++++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 93 insertions(+), 20 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 05898c77c8..bfb1540e40 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1876,14 +1876,30 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }); break :blk index; }; + const start = self.code.items.len; + const len: usize = blk: { + switch (arch) { + .x86_64 => { + // callq + try self.code.ensureCapacity(self.code.items.len + 5); + self.code.appendSliceAssumeCapacity(&[5]u8{ 0xe8, 0x0, 0x0, 0x0, 0x0 }); + break :blk 5; + }, + .aarch64 => { + // bl + writeInt(u32, try self.code.addManyAsArray(4), 0); + break :blk 4; + }, + else => unreachable, // unsupported architecture on MachO + } + }; try macho_file.stub_fixups.append(self.bin_file.allocator, .{ .symbol = symbol, .already_defined = already_defined, - .start = self.code.items.len, - .len = 4, + .start = start, + .len = len, }); // We mark the space and fix it up later. - writeInt(u32, try self.code.addManyAsArray(4), 0); } else { return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); } diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 3839158f70..fed3ee7836 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1241,14 +1241,18 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { for (self.stub_fixups.items) |fixup| { const stub_addr = stubs.addr + fixup.symbol * stubs.reserved2; const text_addr = symbol.n_value + fixup.start; - const displacement = @intCast(u32, stub_addr - text_addr); - var placeholder = code_buffer.items[fixup.start..][0..fixup.len]; switch (self.base.options.target.cpu.arch) { - .x86_64 => return error.TODOImplementStubFixupsForx86_64, + .x86_64 => { + const displacement = @intCast(u32, stub_addr - text_addr - fixup.len); + var placeholder = code_buffer.items[fixup.start + fixup.len - @sizeOf(u32) ..][0..@sizeOf(u32)]; + mem.writeIntSliceLittle(u32, placeholder, displacement); + }, .aarch64 => { + const displacement = @intCast(u32, stub_addr - text_addr); + var placeholder = code_buffer.items[fixup.start..][0..fixup.len]; mem.writeIntSliceLittle(u32, placeholder, aarch64.Instruction.bl(@intCast(i28, displacement)).toU32()); }, - else => unreachable, + else => unreachable, // unsupported target architecture } if (!fixup.already_defined) { try self.writeStub(fixup.symbol); @@ -1565,6 +1569,11 @@ pub fn populateMissingMetadata(self: *MachO) !void { .aarch64 => 2, else => unreachable, // unhandled architecture type }; + const stub_size: u4 = switch (self.base.options.target.cpu.arch) { + .x86_64 => 6, + .aarch64 => 2 * @sizeOf(u32), + else => unreachable, // unhandled architecture type + }; const flags = macho.S_SYMBOL_STUBS | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS; const needed_size = @sizeOf(u64) * self.base.options.symbol_count_hint; const off = text_segment.findFreeSpace(needed_size, @alignOf(u64), self.header_pad); @@ -1583,7 +1592,7 @@ pub fn populateMissingMetadata(self: *MachO) !void { .nreloc = 0, .flags = flags, .reserved1 = 0, - .reserved2 = 2 * @sizeOf(u32), + .reserved2 = stub_size, .reserved3 = 0, }); self.header_dirty = true; @@ -2044,7 +2053,30 @@ pub fn populateMissingMetadata(self: *MachO) !void { const data_const_segment = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; const got = &data_const_segment.sections.items[self.data_got_section_index.?]; switch (self.base.options.target.cpu.arch) { - .x86_64 => return error.TODOImplementStubHelperForX86_64, + .x86_64 => { + const code_size = 15; + var code: [code_size]u8 = undefined; + // lea %r11, [rip + disp] + code[0] = 0x4c; + code[1] = 0x8d; + code[2] = 0x1d; + { + const displacement = @intCast(u32, data.addr - stub_helper.addr - 7); + mem.writeIntLittle(u32, code[3..7], displacement); + } + // push %r11 + code[7] = 0x41; + code[8] = 0x53; + // jmp [rip + disp] + code[9] = 0xff; + code[10] = 0x25; + { + const displacement = @intCast(u32, got.addr - stub_helper.addr - code_size); + mem.writeIntLittle(u32, code[11..], displacement); + } + self.stub_helper_stubs_start_off = stub_helper.offset + code_size; + try self.base.file.?.pwriteAll(&code, stub_helper.offset); + }, .aarch64 => { var code: [4 * @sizeOf(u32)]u8 = undefined; { @@ -2410,7 +2442,12 @@ fn writeLazySymbolPointer(self: *MachO, index: u32) !void { const data_segment = self.load_commands.items[self.data_segment_cmd_index.?].Segment; const la_symbol_ptr = data_segment.sections.items[self.la_symbol_ptr_section_index.?]; - const stub_off = self.stub_helper_stubs_start_off.? + index * 3 * @sizeOf(u32); + const stub_size: u4 = switch (self.base.options.target.cpu.arch) { + .x86_64 => 10, + .aarch64 => 3 * @sizeOf(u32), + else => unreachable, + }; + const stub_off = self.stub_helper_stubs_start_off.? + index * stub_size; const end = stub_helper.addr + stub_off - stub_helper.offset; var buf: [@sizeOf(u64)]u8 = undefined; mem.writeIntLittle(u64, &buf, end); @@ -2428,42 +2465,62 @@ fn writeStub(self: *MachO, index: u32) !void { const stub_off = stubs.offset + index * stubs.reserved2; const stub_addr = stubs.addr + index * stubs.reserved2; const la_ptr_addr = la_symbol_ptr.addr + index * @sizeOf(u64); - const displacement = la_ptr_addr - stub_addr; log.debug("writing stub at 0x{x}", .{stub_off}); + var code = try self.base.allocator.alloc(u8, stubs.reserved2); + defer self.base.allocator.free(code); switch (self.base.options.target.cpu.arch) { - .x86_64 => return error.TODOImplementWritingStubsForx86_64, + .x86_64 => { + const displacement = @intCast(u32, la_ptr_addr - stub_addr - stubs.reserved2); + // jmp + code[0] = 0xff; + code[1] = 0x25; + mem.writeIntLittle(u32, code[2..][0..4], displacement); + }, .aarch64 => { - var code: [2 * @sizeOf(u32)]u8 = undefined; + const displacement = la_ptr_addr - stub_addr; mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.ldr(.x16, .{ .literal = @intCast(u19, displacement / 4), }).toU32()); mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.br(.x16).toU32()); - try self.base.file.?.pwriteAll(&code, stub_off); }, else => unreachable, } + try self.base.file.?.pwriteAll(code, stub_off); } fn writeStubInStubHelper(self: *MachO, index: u32) !void { const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; const stub_helper = text_segment.sections.items[self.stub_helper_section_index.?]; - const stub_off = self.stub_helper_stubs_start_off.? + index * 3 * @sizeOf(u32); - const end = stub_helper.addr + stub_off - stub_helper.offset; - const displacement = @intCast(i64, stub_helper.addr) - @intCast(i64, end + 4); + const stub_size: u4 = switch (self.base.options.target.cpu.arch) { + .x86_64 => 10, + .aarch64 => 3 * @sizeOf(u32), + else => unreachable, + }; + const stub_off = self.stub_helper_stubs_start_off.? + index * stub_size; + var code = try self.base.allocator.alloc(u8, stub_size); + defer self.base.allocator.free(code); switch (self.base.options.target.cpu.arch) { - .x86_64 => return error.TODOImplementWritingStubsInStubHelperForx86_64, + .x86_64 => { + const displacement = @intCast(i32, @intCast(i64, stub_helper.offset) - @intCast(i64, stub_off) - stub_size); + // pushq + code[0] = 0x68; + mem.writeIntLittle(u32, code[1..][0..4], index * 0xd); // TODO + // jmpq + code[5] = 0xe9; + mem.writeIntLittle(u32, code[6..][0..4], @bitCast(u32, displacement)); + }, .aarch64 => { - var code: [3 * @sizeOf(u32)]u8 = undefined; + const displacement = @intCast(i64, stub_helper.offset) - @intCast(i64, stub_off) - 4; mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.ldr(.w16, .{ .literal = 0x2, }).toU32()); mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.b(@intCast(i28, displacement)).toU32()); mem.writeIntLittle(u32, code[8..12], index * 0xd); // TODO This is the size of lazy binding opcode block. - try self.base.file.?.pwriteAll(&code, stub_off); }, else => unreachable, } + try self.base.file.?.pwriteAll(code, stub_off); } fn relocateSymbolTable(self: *MachO) !void { -- cgit v1.2.3 From fbd5fbe729b7d3f085d2d479ed9957decc019332 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sat, 16 Jan 2021 14:15:40 +0100 Subject: stage2 AArch64: add very basic return values --- src/codegen.zig | 20 +++++++++++++++++--- src/codegen/aarch64.zig | 27 ++++++++++++++------------- 2 files changed, 31 insertions(+), 16 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index bfb1540e40..7c67a9191b 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -2935,8 +2935,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { ).toU32()); // ldr x28, [sp], #16 mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(.x28, .{ - .rn = Register.sp, - .offset = Instruction.LoadStoreOffset.imm_post_index(16), + .register = .{ + .rn = Register.sp, + .offset = Instruction.LoadStoreOffset.imm_post_index(16), + }, }).toU32()); } else { // stp x0, x28, [sp, #-16] @@ -2978,7 +2980,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. try self.genSetReg(src, reg, .{ .immediate = addr }); - mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(reg, .{ .rn = reg }).toU32()); + mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(reg, .{ .register = .{ .rn = reg } }).toU32()); } }, else => return self.fail(src, "TODO implement genSetReg for aarch64 {}", .{mcv}), @@ -3620,6 +3622,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, else => return self.fail(src, "TODO implement function return values for {}", .{cc}), }, + .aarch64 => switch (cc) { + .Naked => unreachable, + .Unspecified, .C => { + const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + if (ret_ty_size <= 8) { + result.return_value = .{ .register = c_abi_int_return_regs[0] }; + } else { + return self.fail(src, "TODO support more return types for ARM backend", .{}); + } + }, + else => return self.fail(src, "TODO implement function return values for {}", .{cc}), + }, else => return self.fail(src, "TODO implement codegen return values for {}", .{self.target.cpu.arch}), } return result; diff --git a/src/codegen/aarch64.zig b/src/codegen/aarch64.zig index 50cdf6a262..5fba1ea7e1 100644 --- a/src/codegen/aarch64.zig +++ b/src/codegen/aarch64.zig @@ -64,7 +64,7 @@ pub const callee_preserved_regs = [_]Register{ }; pub const c_abi_int_param_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 }; -pub const c_abi_int_return_regs = [_]Register{ .x0, .x1 }; +pub const c_abi_int_return_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 }; test "Register.id" { testing.expectEqual(@as(u5, 0), Register.x0.id()); @@ -699,17 +699,18 @@ pub const Instruction = union(enum) { // Load or store register - pub const LdrArgs = struct { - rn: ?Register = null, - offset: LoadStoreOffset = LoadStoreOffset.none, - literal: ?u19 = null, + pub const LdrArgs = union(enum) { + register: struct { + rn: Register, + offset: LoadStoreOffset = LoadStoreOffset.none, + }, + literal: u19, }; pub fn ldr(rt: Register, args: LdrArgs) Instruction { - if (args.rn) |rn| { - return loadStoreRegister(rt, rn, args.offset, true); - } else { - return loadLiteral(rt, args.literal.?); + switch (args) { + .register => |info| return loadStoreRegister(rt, info.rn, info.offset, true), + .literal => |literal| return loadLiteral(rt, literal), } } @@ -911,19 +912,19 @@ test "serialize instructions" { .expected = 0b1_00101_00_0000_0000_0000_0000_0000_0100, }, .{ // ldr x2, [x1] - .inst = Instruction.ldr(.x2, .{ .rn = .x1 }), + .inst = Instruction.ldr(.x2, .{ .register = .{ .rn = .x1 } }), .expected = 0b11_111_0_01_01_000000000000_00001_00010, }, .{ // ldr x2, [x1, #1]! - .inst = Instruction.ldr(.x2, .{ .rn = .x1, .offset = Instruction.LoadStoreOffset.imm_pre_index(1) }), + .inst = Instruction.ldr(.x2, .{ .register = .{ .rn = .x1, .offset = Instruction.LoadStoreOffset.imm_pre_index(1) } }), .expected = 0b11_111_0_00_01_0_000000001_11_00001_00010, }, .{ // ldr x2, [x1], #-1 - .inst = Instruction.ldr(.x2, .{ .rn = .x1, .offset = Instruction.LoadStoreOffset.imm_post_index(-1) }), + .inst = Instruction.ldr(.x2, .{ .register = .{ .rn = .x1, .offset = Instruction.LoadStoreOffset.imm_post_index(-1) } }), .expected = 0b11_111_0_00_01_0_111111111_01_00001_00010, }, .{ // ldr x2, [x1], (x3) - .inst = Instruction.ldr(.x2, .{ .rn = .x1, .offset = Instruction.LoadStoreOffset.reg(.x3) }), + .inst = Instruction.ldr(.x2, .{ .register = .{ .rn = .x1, .offset = Instruction.LoadStoreOffset.reg(.x3) } }), .expected = 0b11_111_0_00_01_1_00011_011_0_10_00001_00010, }, .{ // ldr x2, label -- cgit v1.2.3 From d2a297c2b3eb293b393f41640892ff7a5a71027f Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 10 Jan 2021 20:50:38 +0100 Subject: stage2 ARM: add extra load/store instructions --- src/codegen.zig | 76 ++++++++++++++++++++++++++------------- src/codegen/arm.zig | 100 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 151 insertions(+), 25 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 7c67a9191b..709c91a635 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -2630,21 +2630,34 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .register => |reg| { const abi_size = ty.abiSize(self.target.*); const adj_off = stack_offset + abi_size; - const offset = if (adj_off <= math.maxInt(u12)) blk: { - break :blk Instruction.Offset.imm(@intCast(u12, adj_off)); - } else Instruction.Offset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off }), 0); switch (abi_size) { - 1 => writeInt(u32, try self.code.addManyAsArray(4), Instruction.strb(.al, reg, .fp, .{ - .offset = offset, - .positive = false, - }).toU32()), - 2 => return self.fail(src, "TODO implement strh", .{}), - 4 => writeInt(u32, try self.code.addManyAsArray(4), Instruction.str(.al, reg, .fp, .{ - .offset = offset, - .positive = false, - }).toU32()), - else => return self.fail(src, "TODO a type of size {} is not allowed in a register", .{abi_size}), + 1, 4 => { + const offset = if (adj_off <= math.maxInt(u12)) blk: { + break :blk Instruction.Offset.imm(@intCast(u12, adj_off)); + } else Instruction.Offset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off }), 0); + const str = switch (abi_size) { + 1 => Instruction.strb, + 4 => Instruction.str, + else => unreachable, + }; + + writeInt(u32, try self.code.addManyAsArray(4), str(.al, reg, .fp, .{ + .offset = offset, + .positive = false, + }).toU32()); + }, + 2 => { + const offset = if (adj_off <= math.maxInt(u8)) blk: { + break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, adj_off)); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off })); + + writeInt(u32, try self.code.addManyAsArray(4), Instruction.strh(.al, reg, .fp, .{ + .offset = offset, + .positive = false, + }).toU32()); + }, + else => return self.fail(src, "TODO implement storing other types abi_size={}", .{abi_size}), } }, .memory => |vaddr| { @@ -2836,20 +2849,33 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // const abi_size = ty.abiSize(self.target.*); const abi_size = 4; const adj_off = unadjusted_off + abi_size; - const offset = if (adj_off <= math.maxInt(u12)) blk: { - break :blk Instruction.Offset.imm(@intCast(u12, adj_off)); - } else Instruction.Offset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off }), 0); switch (abi_size) { - 1 => writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldrb(.al, reg, .fp, .{ - .offset = offset, - .positive = false, - }).toU32()), - 2 => return self.fail(src, "TODO implement ldrh", .{}), - 4 => writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldr(.al, reg, .fp, .{ - .offset = offset, - .positive = false, - }).toU32()), + 1, 4 => { + const offset = if (adj_off <= math.maxInt(u12)) blk: { + break :blk Instruction.Offset.imm(@intCast(u12, adj_off)); + } else Instruction.Offset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off }), 0); + const ldr = switch (abi_size) { + 1 => Instruction.ldrb, + 4 => Instruction.ldr, + else => unreachable, + }; + + writeInt(u32, try self.code.addManyAsArray(4), ldr(.al, reg, .fp, .{ + .offset = offset, + .positive = false, + }).toU32()); + }, + 2 => { + const offset = if (adj_off <= math.maxInt(u8)) blk: { + break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, adj_off)); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off })); + + writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldrh(.al, reg, .fp, .{ + .offset = offset, + .positive = false, + }).toU32()); + }, else => return self.fail(src, "TODO a type of size {} is not allowed in a register", .{abi_size}), } }, diff --git a/src/codegen/arm.zig b/src/codegen/arm.zig index 978c653cb0..94f1ae951d 100644 --- a/src/codegen/arm.zig +++ b/src/codegen/arm.zig @@ -240,6 +240,22 @@ pub const Instruction = union(enum) { fixed: u2 = 0b01, cond: u4, }, + ExtraLoadStore: packed struct { + imm4l: u4, + fixed_1: u1 = 0b1, + op2: u2, + fixed_2: u1 = 0b1, + imm4h: u4, + rt: u4, + rn: u4, + o1: u1, + write_back: u1, + imm: u1, + up_down: u1, + pre_index: u1, + fixed_3: u3 = 0b000, + cond: u4, + }, BlockDataTransfer: packed struct { register_list: u16, rn: u4, @@ -468,6 +484,29 @@ pub const Instruction = union(enum) { } }; + /// Represents the offset operand of an extra load or store + /// instruction. + pub const ExtraLoadStoreOffset = union(enum) { + immediate: u8, + register: u4, + + pub const none = ExtraLoadStoreOffset{ + .immediate = 0, + }; + + pub fn reg(register: Register) ExtraLoadStoreOffset { + return ExtraLoadStoreOffset{ + .register = register.id(), + }; + } + + pub fn imm(immediate: u8) ExtraLoadStoreOffset { + return ExtraLoadStoreOffset{ + .immediate = immediate, + }; + } + }; + /// Represents the register list operand to a block data transfer /// instruction pub const RegisterList = packed struct { @@ -495,6 +534,7 @@ pub const Instruction = union(enum) { .Multiply => |v| @bitCast(u32, v), .MultiplyLong => |v| @bitCast(u32, v), .SingleDataTransfer => |v| @bitCast(u32, v), + .ExtraLoadStore => |v| @bitCast(u32, v), .BlockDataTransfer => |v| @bitCast(u32, v), .Branch => |v| @bitCast(u32, v), .BranchExchange => |v| @bitCast(u32, v), @@ -617,6 +657,43 @@ pub const Instruction = union(enum) { }; } + fn extraLoadStore( + cond: Condition, + pre_index: bool, + positive: bool, + write_back: bool, + o1: u1, + op2: u2, + rn: Register, + rt: Register, + offset: ExtraLoadStoreOffset, + ) Instruction { + const imm4l: u4 = switch (offset) { + .immediate => |imm| @truncate(u4, imm), + .register => |reg| reg, + }; + const imm4h: u4 = switch (offset) { + .immediate => |imm| @truncate(u4, imm >> 4), + .register => |reg| 0b0000, + }; + + return Instruction{ + .ExtraLoadStore = .{ + .imm4l = imm4l, + .op2 = op2, + .imm4h = imm4h, + .rt = rt.id(), + .rn = rn.id(), + .o1 = o1, + .write_back = @boolToInt(write_back), + .imm = @boolToInt(offset == .immediate), + .up_down = @boolToInt(positive), + .pre_index = @boolToInt(pre_index), + .cond = @enumToInt(cond), + }, + }; + } + fn blockDataTransfer( cond: Condition, rn: Register, @@ -913,6 +990,23 @@ pub const Instruction = union(enum) { return singleDataTransfer(cond, rd, rn, args.offset, args.pre_index, args.positive, 1, args.write_back, 0); } + // Extra load/store + + pub const ExtraLoadStoreOffsetArgs = struct { + pre_index: bool = true, + positive: bool = true, + offset: ExtraLoadStoreOffset, + write_back: bool = false, + }; + + pub fn strh(cond: Condition, rt: Register, rn: Register, args: ExtraLoadStoreOffsetArgs) Instruction { + return extraLoadStore(cond, args.pre_index, args.positive, args.write_back, 0, 0b01, rn, rt, args.offset); + } + + pub fn ldrh(cond: Condition, rt: Register, rn: Register, args: ExtraLoadStoreOffsetArgs) Instruction { + return extraLoadStore(cond, args.pre_index, args.positive, args.write_back, 1, 0b01, rn, rt, args.offset); + } + // Block data transfer pub fn ldmda(cond: Condition, rn: Register, write_back: bool, reg_list: RegisterList) Instruction { @@ -1093,6 +1187,12 @@ test "serialize instructions" { }), .expected = 0b1110_01_0_1_1_0_0_0_0011_0000_000000000000, }, + .{ // strh r1, [r5] + .inst = Instruction.strh(.al, .r1, .r5, .{ + .offset = Instruction.ExtraLoadStoreOffset.none, + }), + .expected = 0b1110_000_1_1_1_0_0_0101_0001_0000_1011_0000, + }, .{ // b #12 .inst = Instruction.b(.al, 12), .expected = 0b1110_101_0_0000_0000_0000_0000_0000_0011, -- cgit v1.2.3 From 8c9ac4db978c80246b4872c899b1618b1b195ec2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 16 Jan 2021 22:51:01 -0700 Subject: stage2: implement error notes and regress -femit-zir * Implement error notes - note: other symbol exported here - note: previous else prong is here - note: previous '_' prong is here * Add Compilation.CObject.ErrorMsg. This object properly converts to AllErrors.Message when the time comes. * Add Compilation.CObject.failure_retryable. Properly handles out-of-memory and other transient failures. * Introduce Module.SrcLoc which has not only a byte offset but also references the file which the byte offset applies to. * Scope.Block now contains both a pointer to the "owner" Decl and the "source" Decl. As an example, during inline function call, the "owner" will be the Decl of the caller and the "source" will be the Decl of the callee. * Module.ErrorMsg now sports a `file_scope` field so that notes can refer to source locations in a file other than the parent error message. * Some instances where a `*Scope` was stored, now store a `*Scope.Container`. * Some methods in the `Scope` namespace were moved to the more specific type, since there was only an implementation for one particular tag. - `removeDecl` moved to `Scope.Container` - `destroy` moved to `Scope.File` * Two kinds of Scope deleted: - zir_module - decl * astgen: properly use DeclVal / DeclRef. DeclVal was incorrectly changed to be a reference; this commit fixes it. Fewer ZIR instructions processed as a result. - declval_in_module is renamed to declval - previous declval ZIR instruction is deleted; it was only for .zir files. * Test harness: friendlier diagnostics when an unexpected set of errors is encountered. * zir_sema: fix analyzeInstBlockFlat by properly calling resolvingInst on the last zir instruction in the block. Compile log implementation: * Write to a buffer rather than directly to stderr. * Only keep track of 1 callsite per Decl. * No longer mutate the ZIR Inst struct data. * "Compile log statement found" errors are only emitted when there are no other compile errors. -femit-zir and support for .zir source files is regressed. If we wanted to support this again, outputting .zir would need to be done as yet another backend rather than in the haphazard way it was previously implemented. For parsing .zir, it was implemented previously in a way that was not helpful for debugging. We need tighter integration with the test harness for it to be useful; so clearly a rewrite is needed. Given that a rewrite is needed, and it was getting in the way of progress and organization of the rest of stage2, I regressed the feature. --- src/Compilation.zig | 305 +++++--- src/Module.zig | 764 +++++++------------ src/astgen.zig | 77 +- src/codegen.zig | 166 ++-- src/codegen/c.zig | 7 +- src/codegen/llvm.zig | 13 +- src/link/Coff.zig | 6 +- src/link/Elf.zig | 33 +- src/link/MachO.zig | 8 +- src/link/MachO/DebugSymbols.zig | 27 +- src/main.zig | 47 +- src/test.zig | 212 +++--- src/type/Enum.zig | 2 +- src/type/Struct.zig | 2 +- src/type/Union.zig | 2 +- src/zir.zig | 1610 +-------------------------------------- src/zir_sema.zig | 179 ++--- test/stage2/test.zig | 62 +- 18 files changed, 904 insertions(+), 2618 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index 225a91e5d2..ad99e40541 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -51,7 +51,7 @@ c_object_work_queue: std.fifo.LinearFifo(*CObject, .Dynamic), /// The ErrorMsg memory is owned by the `CObject`, using Compilation's general purpose allocator. /// This data is accessed by multiple threads and is protected by `mutex`. -failed_c_objects: std.AutoArrayHashMapUnmanaged(*CObject, *ErrorMsg) = .{}, +failed_c_objects: std.AutoArrayHashMapUnmanaged(*CObject, *CObject.ErrorMsg) = .{}, keep_source_files_loaded: bool, use_clang: bool, @@ -215,13 +215,29 @@ pub const CObject = struct { }, /// There will be a corresponding ErrorMsg in Compilation.failed_c_objects. failure, + /// A transient failure happened when trying to compile the C Object; it may + /// succeed if we try again. There may be a corresponding ErrorMsg in + /// Compilation.failed_c_objects. If there is not, the failure is out of memory. + failure_retryable, }, + pub const ErrorMsg = struct { + msg: []const u8, + line: u32, + column: u32, + + pub fn destroy(em: *ErrorMsg, gpa: *Allocator) void { + gpa.free(em.msg); + gpa.destroy(em); + em.* = undefined; + } + }; + /// Returns if there was failure. pub fn clearStatus(self: *CObject, gpa: *Allocator) bool { switch (self.status) { .new => return false, - .failure => { + .failure, .failure_retryable => { self.status = .new; return true; }, @@ -240,6 +256,11 @@ pub const CObject = struct { } }; +/// To support incremental compilation, errors are stored in various places +/// so that they can be created and destroyed appropriately. This structure +/// is used to collect all the errors from the various places into one +/// convenient place for API users to consume. It is allocated into 1 heap +/// and freed all at once. pub const AllErrors = struct { arena: std.heap.ArenaAllocator.State, list: []const Message, @@ -251,23 +272,32 @@ pub const AllErrors = struct { column: usize, byte_offset: usize, msg: []const u8, + notes: []Message = &.{}, }, plain: struct { msg: []const u8, }, - pub fn renderToStdErr(self: Message) void { - switch (self) { + pub fn renderToStdErr(msg: Message) void { + return msg.renderToStdErrInner("error"); + } + + fn renderToStdErrInner(msg: Message, kind: []const u8) void { + switch (msg) { .src => |src| { - std.debug.print("{s}:{d}:{d}: error: {s}\n", .{ + std.debug.print("{s}:{d}:{d}: {s}: {s}\n", .{ src.src_path, src.line + 1, src.column + 1, + kind, src.msg, }); + for (src.notes) |note| { + note.renderToStdErrInner("note"); + } }, .plain => |plain| { - std.debug.print("error: {s}\n", .{plain.msg}); + std.debug.print("{s}: {s}\n", .{ kind, plain.msg }); }, } } @@ -278,20 +308,38 @@ pub const AllErrors = struct { } fn add( + module: *Module, arena: *std.heap.ArenaAllocator, errors: *std.ArrayList(Message), - sub_file_path: []const u8, - source: []const u8, - simple_err_msg: ErrorMsg, + module_err_msg: Module.ErrorMsg, ) !void { - const loc = std.zig.findLineColumn(source, simple_err_msg.byte_offset); + const notes = try arena.allocator.alloc(Message, module_err_msg.notes.len); + for (notes) |*note, i| { + const module_note = module_err_msg.notes[i]; + const source = try module_note.src_loc.file_scope.getSource(module); + const loc = std.zig.findLineColumn(source, module_note.src_loc.byte_offset); + const sub_file_path = module_note.src_loc.file_scope.sub_file_path; + note.* = .{ + .src = .{ + .src_path = try arena.allocator.dupe(u8, sub_file_path), + .msg = try arena.allocator.dupe(u8, module_note.msg), + .byte_offset = module_note.src_loc.byte_offset, + .line = loc.line, + .column = loc.column, + }, + }; + } + const source = try module_err_msg.src_loc.file_scope.getSource(module); + const loc = std.zig.findLineColumn(source, module_err_msg.src_loc.byte_offset); + const sub_file_path = module_err_msg.src_loc.file_scope.sub_file_path; try errors.append(.{ .src = .{ .src_path = try arena.allocator.dupe(u8, sub_file_path), - .msg = try arena.allocator.dupe(u8, simple_err_msg.msg), - .byte_offset = simple_err_msg.byte_offset, + .msg = try arena.allocator.dupe(u8, module_err_msg.msg), + .byte_offset = module_err_msg.src_loc.byte_offset, .line = loc.line, .column = loc.column, + .notes = notes, }, }); } @@ -849,17 +897,9 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation { .ty = struct_ty, }, }; - break :rs &root_scope.base; + break :rs root_scope; } else if (mem.endsWith(u8, root_pkg.root_src_path, ".zir")) { - const root_scope = try gpa.create(Module.Scope.ZIRModule); - root_scope.* = .{ - .sub_file_path = root_pkg.root_src_path, - .source = .{ .unloaded = {} }, - .contents = .{ .not_available = {} }, - .status = .never_loaded, - .decls = .{}, - }; - break :rs &root_scope.base; + return error.ZirFilesUnsupported; } else { unreachable; } @@ -1258,32 +1298,23 @@ pub fn update(self: *Compilation) !void { const use_stage1 = build_options.is_stage1 and self.bin_file.options.use_llvm; if (!use_stage1) { if (self.bin_file.options.module) |module| { + module.compile_log_text.shrinkAndFree(module.gpa, 0); module.generation += 1; // TODO Detect which source files changed. // Until then we simulate a full cache miss. Source files could have been loaded for any reason; // to force a refresh we unload now. - if (module.root_scope.cast(Module.Scope.File)) |zig_file| { - zig_file.unload(module.gpa); - module.failed_root_src_file = null; - module.analyzeContainer(&zig_file.root_container) catch |err| switch (err) { - error.AnalysisFail => { - assert(self.totalErrorCount() != 0); - }, - error.OutOfMemory => return error.OutOfMemory, - else => |e| { - module.failed_root_src_file = e; - }, - }; - } else if (module.root_scope.cast(Module.Scope.ZIRModule)) |zir_module| { - zir_module.unload(module.gpa); - module.analyzeRootZIRModule(zir_module) catch |err| switch (err) { - error.AnalysisFail => { - assert(self.totalErrorCount() != 0); - }, - else => |e| return e, - }; - } + module.root_scope.unload(module.gpa); + module.failed_root_src_file = null; + module.analyzeContainer(&module.root_scope.root_container) catch |err| switch (err) { + error.AnalysisFail => { + assert(self.totalErrorCount() != 0); + }, + error.OutOfMemory => return error.OutOfMemory, + else => |e| { + module.failed_root_src_file = e; + }, + }; // TODO only analyze imports if they are still referenced for (module.import_table.items()) |entry| { @@ -1359,14 +1390,18 @@ pub fn totalErrorCount(self: *Compilation) usize { module.failed_exports.items().len + module.failed_files.items().len + @boolToInt(module.failed_root_src_file != null); - for (module.compile_log_decls.items()) |entry| { - total += entry.value.items.len; - } } // The "no entry point found" error only counts if there are no other errors. if (total == 0) { - return @boolToInt(self.link_error_flags.no_entry_point_found); + total += @boolToInt(self.link_error_flags.no_entry_point_found); + } + + // Compile log errors only count if there are no other errors. + if (total == 0) { + if (self.bin_file.options.module) |module| { + total += @boolToInt(module.compile_log_decls.items().len != 0); + } } return total; @@ -1382,32 +1417,32 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors { for (self.failed_c_objects.items()) |entry| { const c_object = entry.key; const err_msg = entry.value; - try AllErrors.add(&arena, &errors, c_object.src.src_path, "", err_msg.*); + // TODO these fields will need to be adjusted when we have proper + // C error reporting bubbling up. + try errors.append(.{ + .src = .{ + .src_path = try arena.allocator.dupe(u8, c_object.src.src_path), + .msg = try std.fmt.allocPrint(&arena.allocator, "unable to build C object: {s}", .{ + err_msg.msg, + }), + .byte_offset = 0, + .line = err_msg.line, + .column = err_msg.column, + }, + }); } if (self.bin_file.options.module) |module| { for (module.failed_files.items()) |entry| { - const scope = entry.key; - const err_msg = entry.value; - const source = try scope.getSource(module); - try AllErrors.add(&arena, &errors, scope.subFilePath(), source, err_msg.*); + try AllErrors.add(module, &arena, &errors, entry.value.*); } for (module.failed_decls.items()) |entry| { - const decl = entry.key; - const err_msg = entry.value; - const source = try decl.scope.getSource(module); - try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*); + try AllErrors.add(module, &arena, &errors, entry.value.*); } for (module.emit_h_failed_decls.items()) |entry| { - const decl = entry.key; - const err_msg = entry.value; - const source = try decl.scope.getSource(module); - try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*); + try AllErrors.add(module, &arena, &errors, entry.value.*); } for (module.failed_exports.items()) |entry| { - const decl = entry.key.owner_decl; - const err_msg = entry.value; - const source = try decl.scope.getSource(module); - try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*); + try AllErrors.add(module, &arena, &errors, entry.value.*); } if (module.failed_root_src_file) |err| { const file_path = try module.root_pkg.root_src_directory.join(&arena.allocator, &[_][]const u8{ @@ -1418,15 +1453,6 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors { }); try AllErrors.addPlain(&arena, &errors, msg); } - for (module.compile_log_decls.items()) |entry| { - const decl = entry.key; - const path = decl.scope.subFilePath(); - const source = try decl.scope.getSource(module); - for (entry.value.items) |src_loc| { - const err_msg = ErrorMsg{ .byte_offset = src_loc, .msg = "found compile log statement" }; - try AllErrors.add(&arena, &errors, path, source, err_msg); - } - } } if (errors.items.len == 0 and self.link_error_flags.no_entry_point_found) { @@ -1437,6 +1463,28 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors { }); } + if (self.bin_file.options.module) |module| { + const compile_log_items = module.compile_log_decls.items(); + if (errors.items.len == 0 and compile_log_items.len != 0) { + // First one will be the error; subsequent ones will be notes. + const err_msg = Module.ErrorMsg{ + .src_loc = compile_log_items[0].value, + .msg = "found compile log statement", + .notes = try self.gpa.alloc(Module.ErrorMsg, compile_log_items.len - 1), + }; + defer self.gpa.free(err_msg.notes); + + for (compile_log_items[1..]) |entry, i| { + err_msg.notes[i] = .{ + .src_loc = entry.value, + .msg = "also here", + }; + } + + try AllErrors.add(module, &arena, &errors, err_msg); + } + } + assert(errors.items.len == self.totalErrorCount()); return AllErrors{ @@ -1445,6 +1493,11 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors { }; } +pub fn getCompileLogOutput(self: *Compilation) []const u8 { + const module = self.bin_file.options.module orelse return &[0]u8{}; + return module.compile_log_text.items; +} + pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemory }!void { var progress: std.Progress = .{}; var main_progress_node = try progress.start("", 0); @@ -1517,9 +1570,9 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor }, else => { try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.items().len + 1); - module.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( + module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( module.gpa, - decl.src(), + decl.srcLoc(), "unable to codegen: {s}", .{@errorName(err)}, )); @@ -1586,9 +1639,9 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor const module = self.bin_file.options.module.?; self.bin_file.updateDeclLineNumber(module, decl) catch |err| { try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.items().len + 1); - module.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( + module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( module.gpa, - decl.src(), + decl.srcLoc(), "unable to update line number: {s}", .{@errorName(err)}, )); @@ -1858,26 +1911,38 @@ fn workerUpdateCObject( comp.updateCObject(c_object, progress_node) catch |err| switch (err) { error.AnalysisFail => return, else => { - { - const lock = comp.mutex.acquire(); - defer lock.release(); - comp.failed_c_objects.ensureCapacity(comp.gpa, comp.failed_c_objects.items().len + 1) catch { - fatal("TODO handle this by setting c_object.status = oom failure", .{}); - }; - comp.failed_c_objects.putAssumeCapacityNoClobber(c_object, ErrorMsg.create( - comp.gpa, - 0, - "unable to build C object: {s}", - .{@errorName(err)}, - ) catch { - fatal("TODO handle this by setting c_object.status = oom failure", .{}); - }); - } - c_object.status = .{ .failure = {} }; + comp.reportRetryableCObjectError(c_object, err) catch |oom| switch (oom) { + // Swallowing this error is OK because it's implied to be OOM when + // there is a missing failed_c_objects error message. + error.OutOfMemory => {}, + }; }, }; } +fn reportRetryableCObjectError( + comp: *Compilation, + c_object: *CObject, + err: anyerror, +) error{OutOfMemory}!void { + c_object.status = .failure_retryable; + + const c_obj_err_msg = try comp.gpa.create(CObject.ErrorMsg); + errdefer comp.gpa.destroy(c_obj_err_msg); + const msg = try std.fmt.allocPrint(comp.gpa, "unable to build C object: {s}", .{@errorName(err)}); + errdefer comp.gpa.free(msg); + c_obj_err_msg.* = .{ + .msg = msg, + .line = 0, + .column = 0, + }; + { + const lock = comp.mutex.acquire(); + defer lock.release(); + try comp.failed_c_objects.putNoClobber(comp.gpa, c_object, c_obj_err_msg); + } +} + fn updateCObject(comp: *Compilation, c_object: *CObject, c_comp_progress_node: *std.Progress.Node) !void { if (!build_options.have_llvm) { return comp.failCObj(c_object, "clang not available: compiler built without LLVM extensions", .{}); @@ -1892,7 +1957,9 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_comp_progress_node: * // There was previous failure. const lock = comp.mutex.acquire(); defer lock.release(); - comp.failed_c_objects.removeAssertDiscard(c_object); + // If the failure was OOM, there will not be an entry here, so we do + // not assert discard. + _ = comp.failed_c_objects.swapRemove(c_object); } var man = comp.obtainCObjectCacheManifest(); @@ -2343,11 +2410,27 @@ pub fn addCCArgs( fn failCObj(comp: *Compilation, c_object: *CObject, comptime format: []const u8, args: anytype) InnerError { @setCold(true); - const err_msg = try ErrorMsg.create(comp.gpa, 0, "unable to build C object: " ++ format, args); + const err_msg = blk: { + const msg = try std.fmt.allocPrint(comp.gpa, format, args); + errdefer comp.gpa.free(msg); + const err_msg = try comp.gpa.create(CObject.ErrorMsg); + errdefer comp.gpa.destroy(err_msg); + err_msg.* = .{ + .msg = msg, + .line = 0, + .column = 0, + }; + break :blk err_msg; + }; return comp.failCObjWithOwnedErrorMsg(c_object, err_msg); } -fn failCObjWithOwnedErrorMsg(comp: *Compilation, c_object: *CObject, err_msg: *ErrorMsg) InnerError { +fn failCObjWithOwnedErrorMsg( + comp: *Compilation, + c_object: *CObject, + err_msg: *CObject.ErrorMsg, +) InnerError { + @setCold(true); { const lock = comp.mutex.acquire(); defer lock.release(); @@ -2361,36 +2444,6 @@ fn failCObjWithOwnedErrorMsg(comp: *Compilation, c_object: *CObject, err_msg: *E return error.AnalysisFail; } -pub const ErrorMsg = struct { - byte_offset: usize, - msg: []const u8, - - pub fn create(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: anytype) !*ErrorMsg { - const self = try gpa.create(ErrorMsg); - errdefer gpa.destroy(self); - self.* = try init(gpa, byte_offset, format, args); - return self; - } - - /// Assumes the ErrorMsg struct and msg were both allocated with allocator. - pub fn destroy(self: *ErrorMsg, gpa: *Allocator) void { - self.deinit(gpa); - gpa.destroy(self); - } - - pub fn init(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: anytype) !ErrorMsg { - return ErrorMsg{ - .byte_offset = byte_offset, - .msg = try std.fmt.allocPrint(gpa, format, args), - }; - } - - pub fn deinit(self: *ErrorMsg, gpa: *Allocator) void { - gpa.free(self.msg); - self.* = undefined; - } -}; - pub const FileExt = enum { c, cpp, diff --git a/src/Module.zig b/src/Module.zig index 0bdeab68d0..e612f8f759 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -35,8 +35,7 @@ zig_cache_artifact_directory: Compilation.Directory, /// Pointer to externally managed resource. `null` if there is no zig file being compiled. root_pkg: *Package, /// Module owns this resource. -/// The `Scope` is either a `Scope.ZIRModule` or `Scope.File`. -root_scope: *Scope, +root_scope: *Scope.File, /// It's rare for a decl to be exported, so we save memory by having a sparse map of /// Decl pointers to details about them being exported. /// The Export memory is owned by the `export_owners` table; the slice itself is owned by this table. @@ -57,19 +56,19 @@ decl_table: std.ArrayHashMapUnmanaged(Scope.NameHash, *Decl, Scope.name_hash_has /// The ErrorMsg memory is owned by the decl, using Module's general purpose allocator. /// Note that a Decl can succeed but the Fn it represents can fail. In this case, /// a Decl can have a failed_decls entry but have analysis status of success. -failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *Compilation.ErrorMsg) = .{}, +failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{}, /// When emit_h is non-null, each Decl gets one more compile error slot for /// emit-h failing for that Decl. This table is also how we tell if a Decl has /// failed emit-h or succeeded. -emit_h_failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *Compilation.ErrorMsg) = .{}, -/// A Decl can have multiple compileLogs, but only one error, so we map a Decl to a the src locs of all the compileLogs -compile_log_decls: std.AutoArrayHashMapUnmanaged(*Decl, ArrayListUnmanaged(usize)) = .{}, +emit_h_failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{}, +/// Keep track of one `@compileLog` callsite per owner Decl. +compile_log_decls: std.AutoArrayHashMapUnmanaged(*Decl, SrcLoc) = .{}, /// Using a map here for consistency with the other fields here. /// The ErrorMsg memory is owned by the `Scope`, using Module's general purpose allocator. -failed_files: std.AutoArrayHashMapUnmanaged(*Scope, *Compilation.ErrorMsg) = .{}, +failed_files: std.AutoArrayHashMapUnmanaged(*Scope, *ErrorMsg) = .{}, /// Using a map here for consistency with the other fields here. /// The ErrorMsg memory is owned by the `Export`, using Module's general purpose allocator. -failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *Compilation.ErrorMsg) = .{}, +failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *ErrorMsg) = .{}, next_anon_name_index: usize = 0, @@ -103,6 +102,8 @@ stage1_flags: packed struct { emit_h: ?Compilation.EmitLoc, +compile_log_text: std.ArrayListUnmanaged(u8) = .{}, + pub const Export = struct { options: std.builtin.ExportOptions, /// Byte offset into the file that contains the export directive. @@ -138,9 +139,9 @@ pub const Decl = struct { /// mapping them to an address in the output file. /// Memory owned by this decl, using Module's allocator. name: [*:0]const u8, - /// The direct parent container of the Decl. This is either a `Scope.Container` or `Scope.ZIRModule`. + /// The direct parent container of the Decl. /// Reference to externally owned memory. - scope: *Scope, + container: *Scope.Container, /// The AST Node decl index or ZIR Inst index that contains this declaration. /// Must be recomputed when the corresponding source file is modified. src_index: usize, @@ -235,31 +236,21 @@ pub const Decl = struct { } } + pub fn srcLoc(self: Decl) SrcLoc { + return .{ + .byte_offset = self.src(), + .file_scope = self.getFileScope(), + }; + } + pub fn src(self: Decl) usize { - switch (self.scope.tag) { - .container => { - const container = @fieldParentPtr(Scope.Container, "base", self.scope); - const tree = container.file_scope.contents.tree; - // TODO Container should have its own decls() - const decl_node = tree.root_node.decls()[self.src_index]; - return tree.token_locs[decl_node.firstToken()].start; - }, - .zir_module => { - const zir_module = @fieldParentPtr(Scope.ZIRModule, "base", self.scope); - const module = zir_module.contents.module; - const src_decl = module.decls[self.src_index]; - return src_decl.inst.src; - }, - .file, .block => unreachable, - .gen_zir => unreachable, - .local_val => unreachable, - .local_ptr => unreachable, - .decl => unreachable, - } + const tree = self.container.file_scope.contents.tree; + const decl_node = tree.root_node.decls()[self.src_index]; + return tree.token_locs[decl_node.firstToken()].start; } pub fn fullyQualifiedNameHash(self: Decl) Scope.NameHash { - return self.scope.fullyQualifiedNameHash(mem.spanZ(self.name)); + return self.container.fullyQualifiedNameHash(mem.spanZ(self.name)); } pub fn typedValue(self: *Decl) error{AnalysisFail}!TypedValue { @@ -293,9 +284,8 @@ pub const Decl = struct { } } - /// Asserts that the `Decl` is part of AST and not ZIRModule. - pub fn getFileScope(self: *Decl) *Scope.File { - return self.scope.cast(Scope.Container).?.file_scope; + pub fn getFileScope(self: Decl) *Scope.File { + return self.container.file_scope; } pub fn getEmitH(decl: *Decl, module: *Module) *EmitH { @@ -326,7 +316,7 @@ pub const Fn = struct { /// Contains un-analyzed ZIR instructions generated from Zig source AST. /// Even after we finish analysis, the ZIR is kept in memory, so that /// comptime and inline function calls can happen. - zir: zir.Module.Body, + zir: zir.Body, /// undefined unless analysis state is `success`. body: Body, state: Analysis, @@ -373,47 +363,49 @@ pub const Scope = struct { return @fieldParentPtr(T, "base", base); } - /// Asserts the scope has a parent which is a DeclAnalysis and - /// returns the arena Allocator. + /// Returns the arena Allocator associated with the Decl of the Scope. pub fn arena(self: *Scope) *Allocator { switch (self.tag) { .block => return self.cast(Block).?.arena, - .decl => return &self.cast(DeclAnalysis).?.arena.allocator, .gen_zir => return self.cast(GenZIR).?.arena, .local_val => return self.cast(LocalVal).?.gen_zir.arena, .local_ptr => return self.cast(LocalPtr).?.gen_zir.arena, - .zir_module => return &self.cast(ZIRModule).?.contents.module.arena.allocator, .file => unreachable, .container => unreachable, } } - /// If the scope has a parent which is a `DeclAnalysis`, - /// returns the `Decl`, otherwise returns `null`. - pub fn decl(self: *Scope) ?*Decl { + pub fn ownerDecl(self: *Scope) ?*Decl { + return switch (self.tag) { + .block => self.cast(Block).?.owner_decl, + .gen_zir => self.cast(GenZIR).?.decl, + .local_val => self.cast(LocalVal).?.gen_zir.decl, + .local_ptr => self.cast(LocalPtr).?.gen_zir.decl, + .file => null, + .container => null, + }; + } + + pub fn srcDecl(self: *Scope) ?*Decl { return switch (self.tag) { - .block => self.cast(Block).?.decl, + .block => self.cast(Block).?.src_decl, .gen_zir => self.cast(GenZIR).?.decl, .local_val => self.cast(LocalVal).?.gen_zir.decl, .local_ptr => self.cast(LocalPtr).?.gen_zir.decl, - .decl => self.cast(DeclAnalysis).?.decl, - .zir_module => null, .file => null, .container => null, }; } - /// Asserts the scope has a parent which is a ZIRModule or Container and - /// returns it. - pub fn namespace(self: *Scope) *Scope { + /// Asserts the scope has a parent which is a Container and returns it. + pub fn namespace(self: *Scope) *Container { switch (self.tag) { - .block => return self.cast(Block).?.decl.scope, - .gen_zir => return self.cast(GenZIR).?.decl.scope, - .local_val => return self.cast(LocalVal).?.gen_zir.decl.scope, - .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope, - .decl => return self.cast(DeclAnalysis).?.decl.scope, - .file => return &self.cast(File).?.root_container.base, - .zir_module, .container => return self, + .block => return self.cast(Block).?.owner_decl.container, + .gen_zir => return self.cast(GenZIR).?.decl.container, + .local_val => return self.cast(LocalVal).?.gen_zir.decl.container, + .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.container, + .file => return &self.cast(File).?.root_container, + .container => return self.cast(Container).?, } } @@ -426,9 +418,7 @@ pub const Scope = struct { .gen_zir => unreachable, .local_val => unreachable, .local_ptr => unreachable, - .decl => unreachable, .file => unreachable, - .zir_module => return self.cast(ZIRModule).?.fullyQualifiedNameHash(name), .container => return self.cast(Container).?.fullyQualifiedNameHash(name), } } @@ -437,12 +427,10 @@ pub const Scope = struct { pub fn tree(self: *Scope) *ast.Tree { switch (self.tag) { .file => return self.cast(File).?.contents.tree, - .zir_module => unreachable, - .decl => return self.cast(DeclAnalysis).?.decl.scope.cast(Container).?.file_scope.contents.tree, - .block => return self.cast(Block).?.decl.scope.cast(Container).?.file_scope.contents.tree, - .gen_zir => return self.cast(GenZIR).?.decl.scope.cast(Container).?.file_scope.contents.tree, - .local_val => return self.cast(LocalVal).?.gen_zir.decl.scope.cast(Container).?.file_scope.contents.tree, - .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope.cast(Container).?.file_scope.contents.tree, + .block => return self.cast(Block).?.src_decl.container.file_scope.contents.tree, + .gen_zir => return self.cast(GenZIR).?.decl.container.file_scope.contents.tree, + .local_val => return self.cast(LocalVal).?.gen_zir.decl.container.file_scope.contents.tree, + .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.container.file_scope.contents.tree, .container => return self.cast(Container).?.file_scope.contents.tree, } } @@ -454,38 +442,21 @@ pub const Scope = struct { .gen_zir => self.cast(GenZIR).?, .local_val => return self.cast(LocalVal).?.gen_zir, .local_ptr => return self.cast(LocalPtr).?.gen_zir, - .decl => unreachable, - .zir_module => unreachable, .file => unreachable, .container => unreachable, }; } - /// Asserts the scope has a parent which is a ZIRModule, Container or File and + /// Asserts the scope has a parent which is a Container or File and /// returns the sub_file_path field. pub fn subFilePath(base: *Scope) []const u8 { switch (base.tag) { .container => return @fieldParentPtr(Container, "base", base).file_scope.sub_file_path, .file => return @fieldParentPtr(File, "base", base).sub_file_path, - .zir_module => return @fieldParentPtr(ZIRModule, "base", base).sub_file_path, .block => unreachable, .gen_zir => unreachable, .local_val => unreachable, .local_ptr => unreachable, - .decl => unreachable, - } - } - - pub fn unload(base: *Scope, gpa: *Allocator) void { - switch (base.tag) { - .file => return @fieldParentPtr(File, "base", base).unload(gpa), - .zir_module => return @fieldParentPtr(ZIRModule, "base", base).unload(gpa), - .block => unreachable, - .gen_zir => unreachable, - .local_val => unreachable, - .local_ptr => unreachable, - .decl => unreachable, - .container => unreachable, } } @@ -493,67 +464,28 @@ pub const Scope = struct { switch (base.tag) { .container => return @fieldParentPtr(Container, "base", base).file_scope.getSource(module), .file => return @fieldParentPtr(File, "base", base).getSource(module), - .zir_module => return @fieldParentPtr(ZIRModule, "base", base).getSource(module), .gen_zir => unreachable, .local_val => unreachable, .local_ptr => unreachable, .block => unreachable, - .decl => unreachable, } } + /// When called from inside a Block Scope, chases the src_decl, not the owner_decl. pub fn getFileScope(base: *Scope) *Scope.File { var cur = base; while (true) { cur = switch (cur.tag) { .container => return @fieldParentPtr(Container, "base", cur).file_scope, .file => return @fieldParentPtr(File, "base", cur), - .zir_module => unreachable, // TODO are zir modules allowed to import packages? .gen_zir => @fieldParentPtr(GenZIR, "base", cur).parent, .local_val => @fieldParentPtr(LocalVal, "base", cur).parent, .local_ptr => @fieldParentPtr(LocalPtr, "base", cur).parent, - .block => @fieldParentPtr(Block, "base", cur).decl.scope, - .decl => @fieldParentPtr(DeclAnalysis, "base", cur).decl.scope, + .block => return @fieldParentPtr(Block, "base", cur).src_decl.container.file_scope, }; } } - /// Asserts the scope is a namespace Scope and removes the Decl from the namespace. - pub fn removeDecl(base: *Scope, child: *Decl) void { - switch (base.tag) { - .container => return @fieldParentPtr(Container, "base", base).removeDecl(child), - .zir_module => return @fieldParentPtr(ZIRModule, "base", base).removeDecl(child), - .file => unreachable, - .block => unreachable, - .gen_zir => unreachable, - .local_val => unreachable, - .local_ptr => unreachable, - .decl => unreachable, - } - } - - /// Asserts the scope is a File or ZIRModule and deinitializes it, then deallocates it. - pub fn destroy(base: *Scope, gpa: *Allocator) void { - switch (base.tag) { - .file => { - const scope_file = @fieldParentPtr(File, "base", base); - scope_file.deinit(gpa); - gpa.destroy(scope_file); - }, - .zir_module => { - const scope_zir_module = @fieldParentPtr(ZIRModule, "base", base); - scope_zir_module.deinit(gpa); - gpa.destroy(scope_zir_module); - }, - .block => unreachable, - .gen_zir => unreachable, - .local_val => unreachable, - .local_ptr => unreachable, - .decl => unreachable, - .container => unreachable, - } - } - fn name_hash_hash(x: NameHash) u32 { return @truncate(u32, @bitCast(u128, x)); } @@ -563,14 +495,11 @@ pub const Scope = struct { } pub const Tag = enum { - /// .zir source code. - zir_module, /// .zig source code. file, /// struct, enum or union, every .file contains one of these. container, block, - decl, gen_zir, local_val, local_ptr, @@ -657,6 +586,11 @@ pub const Scope = struct { self.* = undefined; } + pub fn destroy(self: *File, gpa: *Allocator) void { + self.deinit(gpa); + gpa.destroy(self); + } + pub fn dumpSrc(self: *File, src: usize) void { const loc = std.zig.findLineColumn(self.source.bytes, src); std.debug.print("{s}:{d}:{d}\n", .{ self.sub_file_path, loc.line + 1, loc.column + 1 }); @@ -681,109 +615,6 @@ pub const Scope = struct { } }; - pub const ZIRModule = struct { - pub const base_tag: Tag = .zir_module; - base: Scope = Scope{ .tag = base_tag }, - /// Relative to the owning package's root_src_dir. - /// Reference to external memory, not owned by ZIRModule. - sub_file_path: []const u8, - source: union(enum) { - unloaded: void, - bytes: [:0]const u8, - }, - contents: union { - not_available: void, - module: *zir.Module, - }, - status: enum { - never_loaded, - unloaded_success, - unloaded_parse_failure, - unloaded_sema_failure, - - loaded_sema_failure, - loaded_success, - }, - - /// Even though .zir files only have 1 module, this set is still needed - /// because of anonymous Decls, which can exist in the global set, but - /// not this one. - decls: ArrayListUnmanaged(*Decl), - - pub fn unload(self: *ZIRModule, gpa: *Allocator) void { - switch (self.status) { - .never_loaded, - .unloaded_parse_failure, - .unloaded_sema_failure, - .unloaded_success, - => {}, - - .loaded_success => { - self.contents.module.deinit(gpa); - gpa.destroy(self.contents.module); - self.contents = .{ .not_available = {} }; - self.status = .unloaded_success; - }, - .loaded_sema_failure => { - self.contents.module.deinit(gpa); - gpa.destroy(self.contents.module); - self.contents = .{ .not_available = {} }; - self.status = .unloaded_sema_failure; - }, - } - switch (self.source) { - .bytes => |bytes| { - gpa.free(bytes); - self.source = .{ .unloaded = {} }; - }, - .unloaded => {}, - } - } - - pub fn deinit(self: *ZIRModule, gpa: *Allocator) void { - self.decls.deinit(gpa); - self.unload(gpa); - self.* = undefined; - } - - pub fn removeDecl(self: *ZIRModule, child: *Decl) void { - for (self.decls.items) |item, i| { - if (item == child) { - _ = self.decls.swapRemove(i); - return; - } - } - } - - pub fn dumpSrc(self: *ZIRModule, src: usize) void { - const loc = std.zig.findLineColumn(self.source.bytes, src); - std.debug.print("{s}:{d}:{d}\n", .{ self.sub_file_path, loc.line + 1, loc.column + 1 }); - } - - pub fn getSource(self: *ZIRModule, module: *Module) ![:0]const u8 { - switch (self.source) { - .unloaded => { - const source = try module.root_pkg.root_src_directory.handle.readFileAllocOptions( - module.gpa, - self.sub_file_path, - std.math.maxInt(u32), - null, - 1, - 0, - ); - self.source = .{ .bytes = source }; - return source; - }, - .bytes => |bytes| return bytes, - } - } - - pub fn fullyQualifiedNameHash(self: *ZIRModule, name: []const u8) NameHash { - // ZIR modules only have 1 file with all decls global in the same namespace. - return std.zig.hashSrc(name); - } - }; - /// This is a temporary structure, references to it are valid only /// during semantic analysis of the block. pub const Block = struct { @@ -794,9 +625,14 @@ pub const Scope = struct { /// Maps ZIR to TZIR. Shared to sub-blocks. inst_table: *InstTable, func: ?*Fn, - decl: *Decl, + /// When analyzing an inline function call, owner_decl is the Decl of the caller + /// and src_decl is the Decl of the callee. + /// This Decl owns the arena memory of this Block. + owner_decl: *Decl, + /// This Decl is the Decl according to the Zig source code corresponding to this Block. + src_decl: *Decl, instructions: ArrayListUnmanaged(*Inst), - /// Points to the arena allocator of DeclAnalysis + /// Points to the arena allocator of the Decl. arena: *Allocator, label: ?Label = null, inlining: ?*Inlining, @@ -845,21 +681,12 @@ pub const Scope = struct { } }; - /// This is a temporary structure, references to it are valid only - /// during semantic analysis of the decl. - pub const DeclAnalysis = struct { - pub const base_tag: Tag = .decl; - base: Scope = Scope{ .tag = base_tag }, - decl: *Decl, - arena: std.heap.ArenaAllocator, - }; - /// This is a temporary structure, references to it are valid only /// during semantic analysis of the decl. pub const GenZIR = struct { pub const base_tag: Tag = .gen_zir; base: Scope = Scope{ .tag = base_tag }, - /// Parents can be: `GenZIR`, `ZIRModule`, `File` + /// Parents can be: `GenZIR`, `File` parent: *Scope, decl: *Decl, arena: *Allocator, @@ -905,11 +732,73 @@ pub const Scope = struct { }; }; +/// This struct holds data necessary to construct API-facing `AllErrors.Message`. +/// Its memory is managed with the general purpose allocator so that they +/// can be created and destroyed in response to incremental updates. +/// In some cases, the Scope.File could have been inferred from where the ErrorMsg +/// is stored. For example, if it is stored in Module.failed_decls, then the Scope.File +/// would be determined by the Decl Scope. However, the data structure contains the field +/// anyway so that `ErrorMsg` can be reused for error notes, which may be in a different +/// file than the parent error message. It also simplifies processing of error messages. +pub const ErrorMsg = struct { + src_loc: SrcLoc, + msg: []const u8, + notes: []ErrorMsg = &.{}, + + pub fn create( + gpa: *Allocator, + src_loc: SrcLoc, + comptime format: []const u8, + args: anytype, + ) !*ErrorMsg { + const self = try gpa.create(ErrorMsg); + errdefer gpa.destroy(self); + self.* = try init(gpa, src_loc, format, args); + return self; + } + + /// Assumes the ErrorMsg struct and msg were both allocated with `gpa`, + /// as well as all notes. + pub fn destroy(self: *ErrorMsg, gpa: *Allocator) void { + self.deinit(gpa); + gpa.destroy(self); + } + + pub fn init( + gpa: *Allocator, + src_loc: SrcLoc, + comptime format: []const u8, + args: anytype, + ) !ErrorMsg { + return ErrorMsg{ + .src_loc = src_loc, + .msg = try std.fmt.allocPrint(gpa, format, args), + }; + } + + pub fn deinit(self: *ErrorMsg, gpa: *Allocator) void { + for (self.notes) |*note| { + note.deinit(gpa); + } + gpa.free(self.notes); + gpa.free(self.msg); + self.* = undefined; + } +}; + +/// Canonical reference to a position within a source file. +pub const SrcLoc = struct { + file_scope: *Scope.File, + byte_offset: usize, +}; + pub const InnerError = error{ OutOfMemory, AnalysisFail }; pub fn deinit(self: *Module) void { const gpa = self.gpa; + self.compile_log_text.deinit(gpa); + self.zig_cache_artifact_directory.handle.close(); self.deletion_set.deinit(gpa); @@ -939,9 +828,6 @@ pub fn deinit(self: *Module) void { } self.failed_exports.deinit(gpa); - for (self.compile_log_decls.items()) |*entry| { - entry.value.deinit(gpa); - } self.compile_log_decls.deinit(gpa); for (self.decl_exports.items()) |entry| { @@ -965,7 +851,7 @@ pub fn deinit(self: *Module) void { self.global_error_set.deinit(gpa); for (self.import_table.items()) |entry| { - entry.value.base.destroy(gpa); + entry.value.destroy(gpa); } self.import_table.deinit(gpa); } @@ -978,7 +864,7 @@ fn freeExportList(gpa: *Allocator, export_list: []*Export) void { gpa.free(export_list); } -pub fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void { +pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) InnerError!void { const tracy = trace(@src()); defer tracy.end(); @@ -999,7 +885,7 @@ pub fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void { // The exports this Decl performs will be re-discovered, so we remove them here // prior to re-analysis. - self.deleteDeclExports(decl); + mod.deleteDeclExports(decl); // Dependencies will be re-discovered, so we remove them here prior to re-analysis. for (decl.dependencies.items()) |entry| { const dep = entry.key; @@ -1008,7 +894,7 @@ pub fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void { // We don't perform a deletion here, because this Decl or another one // may end up referencing it before the update is complete. dep.deletion_flag = true; - try self.deletion_set.append(self.gpa, dep); + try mod.deletion_set.append(mod.gpa, dep); } } decl.dependencies.clearRetainingCapacity(); @@ -1019,24 +905,21 @@ pub fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void { .unreferenced => false, }; - const type_changed = if (self.root_scope.cast(Scope.ZIRModule)) |zir_module| - try zir_sema.analyzeZirDecl(self, decl, zir_module.contents.module.decls[decl.src_index]) - else - self.astGenAndAnalyzeDecl(decl) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => return error.AnalysisFail, - else => { - try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1); - self.failed_decls.putAssumeCapacityNoClobber(decl, try Compilation.ErrorMsg.create( - self.gpa, - decl.src(), - "unable to analyze: {s}", - .{@errorName(err)}, - )); - decl.analysis = .sema_failure_retryable; - return error.AnalysisFail; - }, - }; + const type_changed = mod.astGenAndAnalyzeDecl(decl) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => return error.AnalysisFail, + else => { + decl.analysis = .sema_failure_retryable; + try mod.failed_decls.ensureCapacity(mod.gpa, mod.failed_decls.items().len + 1); + mod.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( + mod.gpa, + decl.srcLoc(), + "unable to analyze: {s}", + .{@errorName(err)}, + )); + return error.AnalysisFail; + }, + }; if (subsequent_analysis) { // We may need to chase the dependants and re-analyze them. @@ -1055,8 +938,8 @@ pub fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void { .codegen_failure, .codegen_failure_retryable, .complete, - => if (dep.generation != self.generation) { - try self.markOutdatedDecl(dep); + => if (dep.generation != mod.generation) { + try mod.markOutdatedDecl(dep); }, } } @@ -1068,8 +951,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { const tracy = trace(@src()); defer tracy.end(); - const container_scope = decl.scope.cast(Scope.Container).?; - const tree = try self.getAstTree(container_scope.file_scope); + const tree = try self.getAstTree(decl.container.file_scope); const ast_node = tree.root_node.decls()[decl.src_index]; switch (ast_node.tag) { .FnProto => { @@ -1085,7 +967,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { var fn_type_scope: Scope.GenZIR = .{ .decl = decl, .arena = &fn_type_scope_arena.allocator, - .parent = decl.scope, + .parent = &decl.container.base, }; defer fn_type_scope.instructions.deinit(self.gpa); @@ -1197,7 +1079,8 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { .parent = null, .inst_table = &inst_table, .func = null, - .decl = decl, + .owner_decl = decl, + .src_decl = decl, .instructions = .{}, .arena = &decl_arena.allocator, .inlining = null, @@ -1242,12 +1125,12 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { const new_func = try decl_arena.allocator.create(Fn); const fn_payload = try decl_arena.allocator.create(Value.Payload.Function); - const fn_zir: zir.Module.Body = blk: { + const fn_zir: zir.Body = blk: { // We put the ZIR inside the Decl arena. var gen_scope: Scope.GenZIR = .{ .decl = decl, .arena = &decl_arena.allocator, - .parent = decl.scope, + .parent = &decl.container.base, }; defer gen_scope.instructions.deinit(self.gpa); @@ -1400,7 +1283,8 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { .parent = null, .inst_table = &decl_inst_table, .func = null, - .decl = decl, + .owner_decl = decl, + .src_decl = decl, .instructions = .{}, .arena = &decl_arena.allocator, .inlining = null, @@ -1444,7 +1328,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { var gen_scope: Scope.GenZIR = .{ .decl = decl, .arena = &gen_scope_arena.allocator, - .parent = decl.scope, + .parent = &decl.container.base, }; defer gen_scope.instructions.deinit(self.gpa); @@ -1472,7 +1356,8 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { .parent = null, .inst_table = &var_inst_table, .func = null, - .decl = decl, + .owner_decl = decl, + .src_decl = decl, .instructions = .{}, .arena = &gen_scope_arena.allocator, .inlining = null, @@ -1503,7 +1388,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { var type_scope: Scope.GenZIR = .{ .decl = decl, .arena = &type_scope_arena.allocator, - .parent = decl.scope, + .parent = &decl.container.base, }; defer type_scope.instructions.deinit(self.gpa); @@ -1584,7 +1469,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { var gen_scope: Scope.GenZIR = .{ .decl = decl, .arena = &analysis_arena.allocator, - .parent = decl.scope, + .parent = &decl.container.base, }; defer gen_scope.instructions.deinit(self.gpa); @@ -1602,7 +1487,8 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { .parent = null, .inst_table = &inst_table, .func = null, - .decl = decl, + .owner_decl = decl, + .src_decl = decl, .instructions = .{}, .arena = &analysis_arena.allocator, .inlining = null, @@ -1632,44 +1518,6 @@ fn declareDeclDependency(self: *Module, depender: *Decl, dependee: *Decl) !void dependee.dependants.putAssumeCapacity(depender, {}); } -fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module { - switch (root_scope.status) { - .never_loaded, .unloaded_success => { - try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1); - - const source = try root_scope.getSource(self); - - var keep_zir_module = false; - const zir_module = try self.gpa.create(zir.Module); - defer if (!keep_zir_module) self.gpa.destroy(zir_module); - - zir_module.* = try zir.parse(self.gpa, source); - defer if (!keep_zir_module) zir_module.deinit(self.gpa); - - if (zir_module.error_msg) |src_err_msg| { - self.failed_files.putAssumeCapacityNoClobber( - &root_scope.base, - try Compilation.ErrorMsg.create(self.gpa, src_err_msg.byte_offset, "{s}", .{src_err_msg.msg}), - ); - root_scope.status = .unloaded_parse_failure; - return error.AnalysisFail; - } - - root_scope.status = .loaded_success; - root_scope.contents = .{ .module = zir_module }; - keep_zir_module = true; - - return zir_module; - }, - - .unloaded_parse_failure, - .unloaded_sema_failure, - => return error.AnalysisFail, - - .loaded_success, .loaded_sema_failure => return root_scope.contents.module, - } -} - pub fn getAstTree(self: *Module, root_scope: *Scope.File) !*ast.Tree { const tracy = trace(@src()); defer tracy.end(); @@ -1691,10 +1539,13 @@ pub fn getAstTree(self: *Module, root_scope: *Scope.File) !*ast.Tree { defer msg.deinit(); try parse_err.render(tree.token_ids, msg.writer()); - const err_msg = try self.gpa.create(Compilation.ErrorMsg); + const err_msg = try self.gpa.create(ErrorMsg); err_msg.* = .{ + .src_loc = .{ + .file_scope = root_scope, + .byte_offset = tree.token_locs[parse_err.loc()].start, + }, .msg = msg.toOwnedSlice(), - .byte_offset = tree.token_locs[parse_err.loc()].start, }; self.failed_files.putAssumeCapacityNoClobber(&root_scope.base, err_msg); @@ -1753,9 +1604,12 @@ pub fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void decl.src_index = decl_i; if (deleted_decls.swapRemove(decl) == null) { decl.analysis = .sema_failure; - const err_msg = try Compilation.ErrorMsg.create(self.gpa, tree.token_locs[name_tok].start, "redefinition of '{s}'", .{decl.name}); - errdefer err_msg.destroy(self.gpa); - try self.failed_decls.putNoClobber(self.gpa, decl, err_msg); + const msg = try ErrorMsg.create(self.gpa, .{ + .file_scope = container_scope.file_scope, + .byte_offset = tree.token_locs[name_tok].start, + }, "redefinition of '{s}'", .{decl.name}); + errdefer msg.destroy(self.gpa); + try self.failed_decls.putNoClobber(self.gpa, decl, msg); } else { if (!srcHashEql(decl.contents_hash, contents_hash)) { try self.markOutdatedDecl(decl); @@ -1795,7 +1649,10 @@ pub fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void decl.src_index = decl_i; if (deleted_decls.swapRemove(decl) == null) { decl.analysis = .sema_failure; - const err_msg = try Compilation.ErrorMsg.create(self.gpa, name_loc.start, "redefinition of '{s}'", .{decl.name}); + const err_msg = try ErrorMsg.create(self.gpa, .{ + .file_scope = container_scope.file_scope, + .byte_offset = name_loc.start, + }, "redefinition of '{s}'", .{decl.name}); errdefer err_msg.destroy(self.gpa); try self.failed_decls.putNoClobber(self.gpa, decl, err_msg); } else if (!srcHashEql(decl.contents_hash, contents_hash)) { @@ -1840,65 +1697,12 @@ pub fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void } } -pub fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void { - // We may be analyzing it for the first time, or this may be - // an incremental update. This code handles both cases. - const src_module = try self.getSrcModule(root_scope); - - try self.comp.work_queue.ensureUnusedCapacity(src_module.decls.len); - try root_scope.decls.ensureCapacity(self.gpa, src_module.decls.len); - - var exports_to_resolve = std.ArrayList(*zir.Decl).init(self.gpa); - defer exports_to_resolve.deinit(); - - // Keep track of the decls that we expect to see in this file so that - // we know which ones have been deleted. - var deleted_decls = std.AutoArrayHashMap(*Decl, void).init(self.gpa); - defer deleted_decls.deinit(); - try deleted_decls.ensureCapacity(self.decl_table.items().len); - for (self.decl_table.items()) |entry| { - deleted_decls.putAssumeCapacityNoClobber(entry.value, {}); - } - - for (src_module.decls) |src_decl, decl_i| { - const name_hash = root_scope.fullyQualifiedNameHash(src_decl.name); - if (self.decl_table.get(name_hash)) |decl| { - deleted_decls.removeAssertDiscard(decl); - if (!srcHashEql(src_decl.contents_hash, decl.contents_hash)) { - try self.markOutdatedDecl(decl); - decl.contents_hash = src_decl.contents_hash; - } - } else { - const new_decl = try self.createNewDecl( - &root_scope.base, - src_decl.name, - decl_i, - name_hash, - src_decl.contents_hash, - ); - root_scope.decls.appendAssumeCapacity(new_decl); - if (src_decl.inst.cast(zir.Inst.Export)) |export_inst| { - try exports_to_resolve.append(src_decl); - } - } - } - for (exports_to_resolve.items) |export_decl| { - _ = try zir_sema.resolveZirDecl(self, &root_scope.base, export_decl); - } - // Handle explicitly deleted decls from the source code. Not to be confused - // with when we delete decls because they are no longer referenced. - for (deleted_decls.items()) |entry| { - log.debug("noticed '{s}' deleted from source\n", .{entry.key.name}); - try self.deleteDecl(entry.key); - } -} - pub fn deleteDecl(self: *Module, decl: *Decl) !void { try self.deletion_set.ensureCapacity(self.gpa, self.deletion_set.items.len + decl.dependencies.items().len); // Remove from the namespace it resides in. In the case of an anonymous Decl it will // not be present in the set, and this does nothing. - decl.scope.removeDecl(decl); + decl.container.removeDecl(decl); log.debug("deleting decl '{s}'\n", .{decl.name}); const name_hash = decl.fullyQualifiedNameHash(); @@ -1929,9 +1733,7 @@ pub fn deleteDecl(self: *Module, decl: *Decl) !void { if (self.emit_h_failed_decls.swapRemove(decl)) |entry| { entry.value.destroy(self.gpa); } - if (self.compile_log_decls.swapRemove(decl)) |*entry| { - entry.value.deinit(self.gpa); - } + _ = self.compile_log_decls.swapRemove(decl); self.deleteDeclExports(decl); self.comp.bin_file.freeDecl(decl); @@ -1993,7 +1795,8 @@ pub fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void { .parent = null, .inst_table = &inst_table, .func = func, - .decl = decl, + .owner_decl = decl, + .src_decl = decl, .instructions = .{}, .arena = &arena.allocator, .inlining = null, @@ -2022,9 +1825,7 @@ fn markOutdatedDecl(self: *Module, decl: *Decl) !void { if (self.emit_h_failed_decls.swapRemove(decl)) |entry| { entry.value.destroy(self.gpa); } - if (self.compile_log_decls.swapRemove(decl)) |*entry| { - entry.value.deinit(self.gpa); - } + _ = self.compile_log_decls.swapRemove(decl); decl.analysis = .outdated; } @@ -2046,7 +1847,7 @@ fn allocateNewDecl( new_decl.* = .{ .name = "", - .scope = scope.namespace(), + .container = scope.namespace(), .src_index = src_index, .typed_value = .{ .never_succeeded = {} }, .analysis = .unreferenced, @@ -2129,34 +1930,34 @@ pub fn resolveDefinedValue(self: *Module, scope: *Scope, base: *Inst) !?Value { } pub fn analyzeExport( - self: *Module, + mod: *Module, scope: *Scope, src: usize, borrowed_symbol_name: []const u8, exported_decl: *Decl, ) !void { - try self.ensureDeclAnalyzed(exported_decl); + try mod.ensureDeclAnalyzed(exported_decl); const typed_value = exported_decl.typed_value.most_recent.typed_value; switch (typed_value.ty.zigTypeTag()) { .Fn => {}, - else => return self.fail(scope, src, "unable to export type '{}'", .{typed_value.ty}), + else => return mod.fail(scope, src, "unable to export type '{}'", .{typed_value.ty}), } - try self.decl_exports.ensureCapacity(self.gpa, self.decl_exports.items().len + 1); - try self.export_owners.ensureCapacity(self.gpa, self.export_owners.items().len + 1); + try mod.decl_exports.ensureCapacity(mod.gpa, mod.decl_exports.items().len + 1); + try mod.export_owners.ensureCapacity(mod.gpa, mod.export_owners.items().len + 1); - const new_export = try self.gpa.create(Export); - errdefer self.gpa.destroy(new_export); + const new_export = try mod.gpa.create(Export); + errdefer mod.gpa.destroy(new_export); - const symbol_name = try self.gpa.dupe(u8, borrowed_symbol_name); - errdefer self.gpa.free(symbol_name); + const symbol_name = try mod.gpa.dupe(u8, borrowed_symbol_name); + errdefer mod.gpa.free(symbol_name); - const owner_decl = scope.decl().?; + const owner_decl = scope.ownerDecl().?; new_export.* = .{ .options = .{ .name = symbol_name }, .src = src, - .link = switch (self.comp.bin_file.tag) { + .link = switch (mod.comp.bin_file.tag) { .coff => .{ .coff = {} }, .elf => .{ .elf = link.File.Elf.Export{} }, .macho => .{ .macho = link.File.MachO.Export{} }, @@ -2169,48 +1970,53 @@ pub fn analyzeExport( }; // Add to export_owners table. - const eo_gop = self.export_owners.getOrPutAssumeCapacity(owner_decl); + const eo_gop = mod.export_owners.getOrPutAssumeCapacity(owner_decl); if (!eo_gop.found_existing) { eo_gop.entry.value = &[0]*Export{}; } - eo_gop.entry.value = try self.gpa.realloc(eo_gop.entry.value, eo_gop.entry.value.len + 1); + eo_gop.entry.value = try mod.gpa.realloc(eo_gop.entry.value, eo_gop.entry.value.len + 1); eo_gop.entry.value[eo_gop.entry.value.len - 1] = new_export; - errdefer eo_gop.entry.value = self.gpa.shrink(eo_gop.entry.value, eo_gop.entry.value.len - 1); + errdefer eo_gop.entry.value = mod.gpa.shrink(eo_gop.entry.value, eo_gop.entry.value.len - 1); // Add to exported_decl table. - const de_gop = self.decl_exports.getOrPutAssumeCapacity(exported_decl); + const de_gop = mod.decl_exports.getOrPutAssumeCapacity(exported_decl); if (!de_gop.found_existing) { de_gop.entry.value = &[0]*Export{}; } - de_gop.entry.value = try self.gpa.realloc(de_gop.entry.value, de_gop.entry.value.len + 1); + de_gop.entry.value = try mod.gpa.realloc(de_gop.entry.value, de_gop.entry.value.len + 1); de_gop.entry.value[de_gop.entry.value.len - 1] = new_export; - errdefer de_gop.entry.value = self.gpa.shrink(de_gop.entry.value, de_gop.entry.value.len - 1); + errdefer de_gop.entry.value = mod.gpa.shrink(de_gop.entry.value, de_gop.entry.value.len - 1); - if (self.symbol_exports.get(symbol_name)) |_| { - try self.failed_exports.ensureCapacity(self.gpa, self.failed_exports.items().len + 1); - self.failed_exports.putAssumeCapacityNoClobber(new_export, try Compilation.ErrorMsg.create( - self.gpa, + if (mod.symbol_exports.get(symbol_name)) |other_export| { + new_export.status = .failed_retryable; + try mod.failed_exports.ensureCapacity(mod.gpa, mod.failed_exports.items().len + 1); + const msg = try mod.errMsg( + scope, src, "exported symbol collision: {s}", .{symbol_name}, - )); - // TODO: add a note + ); + errdefer msg.destroy(mod.gpa); + try mod.errNote( + &other_export.owner_decl.container.base, + other_export.src, + msg, + "other symbol here", + .{}, + ); + mod.failed_exports.putAssumeCapacityNoClobber(new_export, msg); new_export.status = .failed; return; } - try self.symbol_exports.putNoClobber(self.gpa, symbol_name, new_export); - self.comp.bin_file.updateDeclExports(self, exported_decl, de_gop.entry.value) catch |err| switch (err) { + try mod.symbol_exports.putNoClobber(mod.gpa, symbol_name, new_export); + mod.comp.bin_file.updateDeclExports(mod, exported_decl, de_gop.entry.value) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => { - try self.failed_exports.ensureCapacity(self.gpa, self.failed_exports.items().len + 1); - self.failed_exports.putAssumeCapacityNoClobber(new_export, try Compilation.ErrorMsg.create( - self.gpa, - src, - "unable to export: {s}", - .{@errorName(err)}, - )); new_export.status = .failed_retryable; + try mod.failed_exports.ensureCapacity(mod.gpa, mod.failed_exports.items().len + 1); + const msg = try mod.errMsg(scope, src, "unable to export: {s}", .{@errorName(err)}); + mod.failed_exports.putAssumeCapacityNoClobber(new_export, msg); }, }; } @@ -2476,7 +2282,7 @@ pub fn createAnonymousDecl( typed_value: TypedValue, ) !*Decl { const name_index = self.getNextAnonNameIndex(); - const scope_decl = scope.decl().?; + const scope_decl = scope.ownerDecl().?; const name = try std.fmt.allocPrint(self.gpa, "{s}__anon_{d}", .{ scope_decl.name, name_index }); defer self.gpa.free(name); const name_hash = scope.namespace().fullyQualifiedNameHash(name); @@ -2512,7 +2318,7 @@ pub fn createContainerDecl( decl_arena: *std.heap.ArenaAllocator, typed_value: TypedValue, ) !*Decl { - const scope_decl = scope.decl().?; + const scope_decl = scope.ownerDecl().?; const name = try self.getAnonTypeName(scope, base_token); defer self.gpa.free(name); const name_hash = scope.namespace().fullyQualifiedNameHash(name); @@ -2558,14 +2364,14 @@ pub fn lookupDeclName(self: *Module, scope: *Scope, ident_name: []const u8) ?*De } pub fn analyzeDeclRef(self: *Module, scope: *Scope, src: usize, decl: *Decl) InnerError!*Inst { - const scope_decl = scope.decl().?; + const scope_decl = scope.ownerDecl().?; try self.declareDeclDependency(scope_decl, decl); self.ensureDeclAnalyzed(decl) catch |err| { if (scope.cast(Scope.Block)) |block| { if (block.func) |func| { func.state = .dependency_failure; } else { - block.decl.analysis = .dependency_failure; + block.owner_decl.analysis = .dependency_failure; } } else { scope_decl.analysis = .dependency_failure; @@ -3217,10 +3023,51 @@ fn coerceArrayPtrToMany(self: *Module, scope: *Scope, dest_type: Type, inst: *In return self.fail(scope, inst.src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); } -pub fn fail(self: *Module, scope: *Scope, src: usize, comptime format: []const u8, args: anytype) InnerError { - @setCold(true); - const err_msg = try Compilation.ErrorMsg.create(self.gpa, src, format, args); - return self.failWithOwnedErrorMsg(scope, src, err_msg); +/// We don't return a pointer to the new error note because the pointer +/// becomes invalid when you add another one. +pub fn errNote( + mod: *Module, + scope: *Scope, + src: usize, + parent: *ErrorMsg, + comptime format: []const u8, + args: anytype, +) error{OutOfMemory}!void { + const msg = try std.fmt.allocPrint(mod.gpa, format, args); + errdefer mod.gpa.free(msg); + + parent.notes = try mod.gpa.realloc(parent.notes, parent.notes.len + 1); + parent.notes[parent.notes.len - 1] = .{ + .src_loc = .{ + .file_scope = scope.getFileScope(), + .byte_offset = src, + }, + .msg = msg, + }; +} + +pub fn errMsg( + mod: *Module, + scope: *Scope, + src_byte_offset: usize, + comptime format: []const u8, + args: anytype, +) error{OutOfMemory}!*ErrorMsg { + return ErrorMsg.create(mod.gpa, .{ + .file_scope = scope.getFileScope(), + .byte_offset = src_byte_offset, + }, format, args); +} + +pub fn fail( + mod: *Module, + scope: *Scope, + src_byte_offset: usize, + comptime format: []const u8, + args: anytype, +) InnerError { + const err_msg = try mod.errMsg(scope, src_byte_offset, format, args); + return mod.failWithOwnedErrorMsg(scope, err_msg); } pub fn failTok( @@ -3230,7 +3077,6 @@ pub fn failTok( comptime format: []const u8, args: anytype, ) InnerError { - @setCold(true); const src = scope.tree().token_locs[token_index].start; return self.fail(scope, src, format, args); } @@ -3242,80 +3088,36 @@ pub fn failNode( comptime format: []const u8, args: anytype, ) InnerError { - @setCold(true); const src = scope.tree().token_locs[ast_node.firstToken()].start; return self.fail(scope, src, format, args); } -fn addCompileLog(self: *Module, decl: *Decl, src: usize) error{OutOfMemory}!void { - const entry = try self.compile_log_decls.getOrPutValue(self.gpa, decl, .{}); - try entry.value.append(self.gpa, src); -} - -pub fn failCompileLog( - self: *Module, - scope: *Scope, - src: usize, -) InnerError!void { - switch (scope.tag) { - .decl => { - const decl = scope.cast(Scope.DeclAnalysis).?.decl; - try self.addCompileLog(decl, src); - }, - .block => { - const block = scope.cast(Scope.Block).?; - try self.addCompileLog(block.decl, src); - }, - .gen_zir => { - const gen_zir = scope.cast(Scope.GenZIR).?; - try self.addCompileLog(gen_zir.decl, src); - }, - .local_val => { - const gen_zir = scope.cast(Scope.LocalVal).?.gen_zir; - try self.addCompileLog(gen_zir.decl, src); - }, - .local_ptr => { - const gen_zir = scope.cast(Scope.LocalPtr).?.gen_zir; - try self.addCompileLog(gen_zir.decl, src); - }, - .zir_module, - .file, - .container, - => unreachable, - } -} - -fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *Compilation.ErrorMsg) InnerError { +pub fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, err_msg: *ErrorMsg) InnerError { + @setCold(true); { errdefer err_msg.destroy(self.gpa); try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1); try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1); } switch (scope.tag) { - .decl => { - const decl = scope.cast(Scope.DeclAnalysis).?.decl; - decl.analysis = .sema_failure; - decl.generation = self.generation; - self.failed_decls.putAssumeCapacityNoClobber(decl, err_msg); - }, .block => { const block = scope.cast(Scope.Block).?; if (block.inlining) |inlining| { if (inlining.shared.caller) |func| { func.state = .sema_failure; } else { - block.decl.analysis = .sema_failure; - block.decl.generation = self.generation; + block.owner_decl.analysis = .sema_failure; + block.owner_decl.generation = self.generation; } } else { if (block.func) |func| { func.state = .sema_failure; } else { - block.decl.analysis = .sema_failure; - block.decl.generation = self.generation; + block.owner_decl.analysis = .sema_failure; + block.owner_decl.generation = self.generation; } } - self.failed_decls.putAssumeCapacityNoClobber(block.decl, err_msg); + self.failed_decls.putAssumeCapacityNoClobber(block.owner_decl, err_msg); }, .gen_zir => { const gen_zir = scope.cast(Scope.GenZIR).?; @@ -3335,11 +3137,6 @@ fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *Com gen_zir.decl.generation = self.generation; self.failed_decls.putAssumeCapacityNoClobber(gen_zir.decl, err_msg); }, - .zir_module => { - const zir_module = scope.cast(Scope.ZIRModule).?; - zir_module.status = .loaded_sema_failure; - self.failed_files.putAssumeCapacityNoClobber(scope, err_msg); - }, .file => unreachable, .container => unreachable, } @@ -3671,7 +3468,8 @@ pub fn addSafetyCheck(mod: *Module, parent_block: *Scope.Block, ok: *Inst, panic .parent = parent_block, .inst_table = parent_block.inst_table, .func = parent_block.func, - .decl = parent_block.decl, + .owner_decl = parent_block.owner_decl, + .src_decl = parent_block.src_decl, .instructions = .{}, .arena = parent_block.arena, .inlining = parent_block.inlining, diff --git a/src/astgen.zig b/src/astgen.zig index 53503a0467..4631e46b5d 100644 --- a/src/astgen.zig +++ b/src/astgen.zig @@ -318,7 +318,7 @@ pub fn comptimeExpr(mod: *Module, parent_scope: *Scope, rl: ResultLoc, node: *as // Make a scope to collect generated instructions in the sub-expression. var block_scope: Scope.GenZIR = .{ .parent = parent_scope, - .decl = parent_scope.decl().?, + .decl = parent_scope.ownerDecl().?, .arena = parent_scope.arena(), .instructions = .{}, }; @@ -474,7 +474,7 @@ fn labeledBlockExpr( var block_scope: Scope.GenZIR = .{ .parent = parent_scope, - .decl = parent_scope.decl().?, + .decl = parent_scope.ownerDecl().?, .arena = gen_zir.arena, .instructions = .{}, .break_result_loc = rl, @@ -899,7 +899,7 @@ fn containerDecl(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Con var gen_scope: Scope.GenZIR = .{ .parent = scope, - .decl = scope.decl().?, + .decl = scope.ownerDecl().?, .arena = scope.arena(), .instructions = .{}, }; @@ -1028,7 +1028,13 @@ fn containerDecl(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Con .ty = Type.initTag(.type), .val = val, }); - return rlWrapPtr(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.DeclValInModule, .{ .decl = decl }, .{})); + if (rl == .ref) { + return addZIRInst(mod, scope, src, zir.Inst.DeclRef, .{ .decl = decl }, .{}); + } else { + return rlWrap(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.DeclVal, .{ + .decl = decl, + }, .{})); + } } fn errorSetDecl(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.ErrorSetDecl) InnerError!*zir.Inst { @@ -1084,7 +1090,7 @@ fn orelseCatchExpr( var block_scope: Scope.GenZIR = .{ .parent = scope, - .decl = scope.decl().?, + .decl = scope.ownerDecl().?, .arena = scope.arena(), .instructions = .{}, }; @@ -1266,7 +1272,7 @@ fn boolBinOp( var block_scope: Scope.GenZIR = .{ .parent = scope, - .decl = scope.decl().?, + .decl = scope.ownerDecl().?, .arena = scope.arena(), .instructions = .{}, }; @@ -1412,7 +1418,7 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn } var block_scope: Scope.GenZIR = .{ .parent = scope, - .decl = scope.decl().?, + .decl = scope.ownerDecl().?, .arena = scope.arena(), .instructions = .{}, }; @@ -1513,7 +1519,7 @@ fn whileExpr(mod: *Module, scope: *Scope, rl: ResultLoc, while_node: *ast.Node.W var expr_scope: Scope.GenZIR = .{ .parent = scope, - .decl = scope.decl().?, + .decl = scope.ownerDecl().?, .arena = scope.arena(), .instructions = .{}, }; @@ -1649,7 +1655,7 @@ fn forExpr(mod: *Module, scope: *Scope, rl: ResultLoc, for_node: *ast.Node.For) var for_scope: Scope.GenZIR = .{ .parent = scope, - .decl = scope.decl().?, + .decl = scope.ownerDecl().?, .arena = scope.arena(), .instructions = .{}, }; @@ -1843,7 +1849,7 @@ fn getRangeNode(node: *ast.Node) ?*ast.Node.SimpleInfixOp { fn switchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, switch_node: *ast.Node.Switch) InnerError!*zir.Inst { var block_scope: Scope.GenZIR = .{ .parent = scope, - .decl = scope.decl().?, + .decl = scope.ownerDecl().?, .arena = scope.arena(), .instructions = .{}, }; @@ -1885,7 +1891,7 @@ fn switchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, switch_node: *ast.Node var item_scope: Scope.GenZIR = .{ .parent = scope, - .decl = scope.decl().?, + .decl = scope.ownerDecl().?, .arena = scope.arena(), .instructions = .{}, }; @@ -1922,8 +1928,15 @@ fn switchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, switch_node: *ast.Node // Check for else/_ prong, those are handled last. if (case.items_len == 1 and case.items()[0].tag == .SwitchElse) { if (else_src) |src| { - return mod.fail(scope, case_src, "multiple else prongs in switch expression", .{}); - // TODO notes "previous else prong is here" + const msg = try mod.errMsg( + scope, + case_src, + "multiple else prongs in switch expression", + .{}, + ); + errdefer msg.destroy(mod.gpa); + try mod.errNote(scope, src, msg, "previous else prong is here", .{}); + return mod.failWithOwnedErrorMsg(scope, msg); } else_src = case_src; special_case = case; @@ -1932,8 +1945,15 @@ fn switchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, switch_node: *ast.Node mem.eql(u8, tree.tokenSlice(case.items()[0].firstToken()), "_")) { if (underscore_src) |src| { - return mod.fail(scope, case_src, "multiple '_' prongs in switch expression", .{}); - // TODO notes "previous '_' prong is here" + const msg = try mod.errMsg( + scope, + case_src, + "multiple '_' prongs in switch expression", + .{}, + ); + errdefer msg.destroy(mod.gpa); + try mod.errNote(scope, src, msg, "previous '_' prong is here", .{}); + return mod.failWithOwnedErrorMsg(scope, msg); } underscore_src = case_src; special_case = case; @@ -1942,9 +1962,16 @@ fn switchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, switch_node: *ast.Node if (else_src) |some_else| { if (underscore_src) |some_underscore| { - return mod.fail(scope, switch_src, "else and '_' prong in switch expression", .{}); - // TODO notes "else prong is here" - // TODO notes "'_' prong is here" + const msg = try mod.errMsg( + scope, + switch_src, + "else and '_' prong in switch expression", + .{}, + ); + errdefer msg.destroy(mod.gpa); + try mod.errNote(scope, some_else, msg, "else prong is here", .{}); + try mod.errNote(scope, some_underscore, msg, "'_' prong is here", .{}); + return mod.failWithOwnedErrorMsg(scope, msg); } } @@ -2162,7 +2189,13 @@ fn identifier(mod: *Module, scope: *Scope, rl: ResultLoc, ident: *ast.Node.OneTo } if (mod.lookupDeclName(scope, ident_name)) |decl| { - return rlWrapPtr(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.DeclValInModule, .{ .decl = decl }, .{})); + if (rl == .ref) { + return addZIRInst(mod, scope, src, zir.Inst.DeclRef, .{ .decl = decl }, .{}); + } else { + return rlWrap(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.DeclVal, .{ + .decl = decl, + }, .{})); + } } return mod.failNode(scope, &ident.base, "use of undeclared identifier '{s}'", .{ident_name}); @@ -2927,6 +2960,8 @@ fn rlWrapVoid(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node, resul return rlWrap(mod, scope, rl, void_inst); } +/// TODO go over all the callsites and see where we can introduce "by-value" ZIR instructions +/// to save ZIR memory. For example, see DeclVal vs DeclRef. fn rlWrapPtr(mod: *Module, scope: *Scope, rl: ResultLoc, ptr: *zir.Inst) InnerError!*zir.Inst { if (rl == .ref) return ptr; @@ -3032,7 +3067,7 @@ pub fn addZIRInstBlock( scope: *Scope, src: usize, tag: zir.Inst.Tag, - body: zir.Module.Body, + body: zir.Body, ) !*zir.Inst.Block { const gen_zir = scope.getGenZIR(); try gen_zir.instructions.ensureCapacity(mod.gpa, gen_zir.instructions.items.len + 1); @@ -3070,7 +3105,7 @@ pub fn addZIRInstConst(mod: *Module, scope: *Scope, src: usize, typed_value: Typ } /// TODO The existence of this function is a workaround for a bug in stage1. -pub fn addZIRInstLoop(mod: *Module, scope: *Scope, src: usize, body: zir.Module.Body) !*zir.Inst.Loop { +pub fn addZIRInstLoop(mod: *Module, scope: *Scope, src: usize, body: zir.Body) !*zir.Inst.Loop { const P = std.meta.fieldInfo(zir.Inst.Loop, .positionals).field_type; return addZIRInstSpecial(mod, scope, src, zir.Inst.Loop, P{ .body = body }, .{}); } diff --git a/src/codegen.zig b/src/codegen.zig index 709c91a635..9f2fbaab78 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -9,7 +9,7 @@ const TypedValue = @import("TypedValue.zig"); const link = @import("link.zig"); const Module = @import("Module.zig"); const Compilation = @import("Compilation.zig"); -const ErrorMsg = Compilation.ErrorMsg; +const ErrorMsg = Module.ErrorMsg; const Target = std.Target; const Allocator = mem.Allocator; const trace = @import("tracy.zig").trace; @@ -74,7 +74,7 @@ pub const DebugInfoOutput = union(enum) { pub fn generateSymbol( bin_file: *link.File, - src: usize, + src_loc: Module.SrcLoc, typed_value: TypedValue, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -87,56 +87,56 @@ pub fn generateSymbol( switch (bin_file.options.target.cpu.arch) { .wasm32 => unreachable, // has its own code path .wasm64 => unreachable, // has its own code path - .arm => return Function(.arm).generateSymbol(bin_file, src, typed_value, code, debug_output), - .armeb => return Function(.armeb).generateSymbol(bin_file, src, typed_value, code, debug_output), - .aarch64 => return Function(.aarch64).generateSymbol(bin_file, src, typed_value, code, debug_output), - .aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src, typed_value, code, debug_output), - .aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.arc => return Function(.arc).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.avr => return Function(.avr).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.bpfel => return Function(.bpfel).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.hexagon => return Function(.hexagon).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.mips => return Function(.mips).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.mipsel => return Function(.mipsel).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.mips64 => return Function(.mips64).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.mips64el => return Function(.mips64el).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.msp430 => return Function(.msp430).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.powerpc => return Function(.powerpc).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.r600 => return Function(.r600).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src, typed_value, code, debug_output), - .riscv64 => return Function(.riscv64).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.sparc => return Function(.sparc).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.sparcel => return Function(.sparcel).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.s390x => return Function(.s390x).generateSymbol(bin_file, src, typed_value, code, debug_output), - .spu_2 => return Function(.spu_2).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.tce => return Function(.tce).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.tcele => return Function(.tcele).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.thumb => return Function(.thumb).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.i386 => return Function(.i386).generateSymbol(bin_file, src, typed_value, code, debug_output), - .x86_64 => return Function(.x86_64).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.xcore => return Function(.xcore).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.nvptx => return Function(.nvptx).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.le32 => return Function(.le32).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.le64 => return Function(.le64).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.amdil => return Function(.amdil).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.hsail => return Function(.hsail).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.spir => return Function(.spir).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.spir64 => return Function(.spir64).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.kalimba => return Function(.kalimba).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.shave => return Function(.shave).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.lanai => return Function(.lanai).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src, typed_value, code, debug_output), - //.ve => return Function(.ve).generateSymbol(bin_file, src, typed_value, code, debug_output), + .arm => return Function(.arm).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + .armeb => return Function(.armeb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + .aarch64 => return Function(.aarch64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + .aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + .aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.arc => return Function(.arc).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.avr => return Function(.avr).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.bpfel => return Function(.bpfel).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.hexagon => return Function(.hexagon).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.mips => return Function(.mips).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.mipsel => return Function(.mipsel).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.mips64 => return Function(.mips64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.mips64el => return Function(.mips64el).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.msp430 => return Function(.msp430).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.powerpc => return Function(.powerpc).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.r600 => return Function(.r600).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + .riscv64 => return Function(.riscv64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.sparc => return Function(.sparc).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.sparcel => return Function(.sparcel).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.s390x => return Function(.s390x).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + .spu_2 => return Function(.spu_2).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.tce => return Function(.tce).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.tcele => return Function(.tcele).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.thumb => return Function(.thumb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.i386 => return Function(.i386).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + .x86_64 => return Function(.x86_64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.xcore => return Function(.xcore).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.nvptx => return Function(.nvptx).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.le32 => return Function(.le32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.le64 => return Function(.le64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.amdil => return Function(.amdil).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.hsail => return Function(.hsail).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.spir => return Function(.spir).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.spir64 => return Function(.spir64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.kalimba => return Function(.kalimba).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.shave => return Function(.shave).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.lanai => return Function(.lanai).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), + //.ve => return Function(.ve).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."), } }, @@ -147,7 +147,7 @@ pub fn generateSymbol( try code.ensureCapacity(code.items.len + payload.data.len + 1); code.appendSliceAssumeCapacity(payload.data); const prev_len = code.items.len; - switch (try generateSymbol(bin_file, src, .{ + switch (try generateSymbol(bin_file, src_loc, .{ .ty = typed_value.ty.elemType(), .val = sentinel, }, code, debug_output)) { @@ -165,7 +165,7 @@ pub fn generateSymbol( return Result{ .fail = try ErrorMsg.create( bin_file.allocator, - src, + src_loc, "TODO implement generateSymbol for more kinds of arrays", .{}, ), @@ -200,7 +200,7 @@ pub fn generateSymbol( return Result{ .fail = try ErrorMsg.create( bin_file.allocator, - src, + src_loc, "TODO implement generateSymbol for pointer {}", .{typed_value.val}, ), @@ -217,7 +217,7 @@ pub fn generateSymbol( return Result{ .fail = try ErrorMsg.create( bin_file.allocator, - src, + src_loc, "TODO implement generateSymbol for int type '{}'", .{typed_value.ty}, ), @@ -227,7 +227,7 @@ pub fn generateSymbol( return Result{ .fail = try ErrorMsg.create( bin_file.allocator, - src, + src_loc, "TODO implement generateSymbol for type '{s}'", .{@tagName(t)}, ), @@ -259,7 +259,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { ret_mcv: MCValue, fn_type: Type, arg_index: usize, - src: usize, + src_loc: Module.SrcLoc, stack_align: u32, /// Byte offset within the source file. @@ -428,7 +428,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn generateSymbol( bin_file: *link.File, - src: usize, + src_loc: Module.SrcLoc, typed_value: TypedValue, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -450,19 +450,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try branch_stack.append(.{}); const src_data: struct { lbrace_src: usize, rbrace_src: usize, source: []const u8 } = blk: { - if (module_fn.owner_decl.scope.cast(Module.Scope.Container)) |container_scope| { - const tree = container_scope.file_scope.contents.tree; - const fn_proto = tree.root_node.decls()[module_fn.owner_decl.src_index].castTag(.FnProto).?; - const block = fn_proto.getBodyNode().?.castTag(.Block).?; - const lbrace_src = tree.token_locs[block.lbrace].start; - const rbrace_src = tree.token_locs[block.rbrace].start; - break :blk .{ .lbrace_src = lbrace_src, .rbrace_src = rbrace_src, .source = tree.source }; - } else if (module_fn.owner_decl.scope.cast(Module.Scope.ZIRModule)) |zir_module| { - const byte_off = zir_module.contents.module.decls[module_fn.owner_decl.src_index].inst.src; - break :blk .{ .lbrace_src = byte_off, .rbrace_src = byte_off, .source = zir_module.source.bytes }; - } else { - unreachable; - } + const container_scope = module_fn.owner_decl.container; + const tree = container_scope.file_scope.contents.tree; + const fn_proto = tree.root_node.decls()[module_fn.owner_decl.src_index].castTag(.FnProto).?; + const block = fn_proto.getBodyNode().?.castTag(.Block).?; + const lbrace_src = tree.token_locs[block.lbrace].start; + const rbrace_src = tree.token_locs[block.rbrace].start; + break :blk .{ + .lbrace_src = lbrace_src, + .rbrace_src = rbrace_src, + .source = tree.source, + }; }; var function = Self{ @@ -478,7 +476,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .fn_type = fn_type, .arg_index = 0, .branch_stack = &branch_stack, - .src = src, + .src_loc = src_loc, .stack_align = undefined, .prev_di_pc = 0, .prev_di_src = src_data.lbrace_src, @@ -489,7 +487,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { defer function.stack.deinit(bin_file.allocator); defer function.exitlude_jump_relocs.deinit(bin_file.allocator); - var call_info = function.resolveCallingConventionValues(src, fn_type) catch |err| switch (err) { + var call_info = function.resolveCallingConventionValues(src_loc.byte_offset, fn_type) catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, else => |e| return e, }; @@ -536,12 +534,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const stack_end = self.max_end_stack; if (stack_end > math.maxInt(i32)) - return self.fail(self.src, "too much stack used in call parameters", .{}); + return self.failSymbol("too much stack used in call parameters", .{}); const aligned_stack_end = mem.alignForward(stack_end, self.stack_align); mem.writeIntLittle(u32, self.code.items[reloc_index..][0..4], @intCast(u32, aligned_stack_end)); if (self.code.items.len >= math.maxInt(i32)) { - return self.fail(self.src, "unable to perform relocation: jump too far", .{}); + return self.failSymbol("unable to perform relocation: jump too far", .{}); } if (self.exitlude_jump_relocs.items.len == 1) { self.code.items.len -= 5; @@ -598,7 +596,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (Instruction.Operand.fromU32(@intCast(u32, aligned_stack_end))) |op| { writeInt(u32, self.code.items[backpatch_reloc..][0..4], Instruction.sub(.al, .sp, .sp, op).toU32()); } else { - return self.fail(self.src, "TODO ARM: allow larger stacks", .{}); + return self.failSymbol("TODO ARM: allow larger stacks", .{}); } try self.dbgSetEpilogueBegin(); @@ -624,7 +622,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (math.cast(i26, amt)) |offset| { writeInt(u32, self.code.items[jmp_reloc..][0..4], Instruction.b(.al, offset).toU32()); } else |err| { - return self.fail(self.src, "exitlude jump is too large", .{}); + return self.failSymbol("exitlude jump is too large", .{}); } } } @@ -3678,7 +3676,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn fail(self: *Self, src: usize, comptime format: []const u8, args: anytype) InnerError { @setCold(true); assert(self.err_msg == null); - self.err_msg = try ErrorMsg.create(self.bin_file.allocator, src, format, args); + self.err_msg = try ErrorMsg.create(self.bin_file.allocator, .{ + .file_scope = self.src_loc.file_scope, + .byte_offset = src, + }, format, args); + return error.CodegenFail; + } + + fn failSymbol(self: *Self, comptime format: []const u8, args: anytype) InnerError { + @setCold(true); + assert(self.err_msg == null); + self.err_msg = try ErrorMsg.create(self.bin_file.allocator, self.src_loc, format, args); return error.CodegenFail; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 8c85f482fd..b26f753757 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -114,10 +114,13 @@ pub const DeclGen = struct { module: *Module, decl: *Decl, fwd_decl: std.ArrayList(u8), - error_msg: ?*Compilation.ErrorMsg, + error_msg: ?*Module.ErrorMsg, fn fail(dg: *DeclGen, src: usize, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { - dg.error_msg = try Compilation.ErrorMsg.create(dg.module.gpa, src, format, args); + dg.error_msg = try Module.ErrorMsg.create(dg.module.gpa, .{ + .file_scope = dg.decl.getFileScope(), + .byte_offset = src, + }, format, args); return error.AnalysisFail; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5d753c41cb..1edd466d54 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -148,7 +148,7 @@ pub const LLVMIRModule = struct { object_path: []const u8, gpa: *Allocator, - err_msg: ?*Compilation.ErrorMsg = null, + err_msg: ?*Module.ErrorMsg = null, // TODO: The fields below should really move into a different struct, // because they are only valid when generating a function @@ -177,6 +177,8 @@ pub const LLVMIRModule = struct { break_vals: *BreakValues, }) = .{}, + src_loc: Module.SrcLoc, + const BreakBasicBlocks = std.ArrayListUnmanaged(*const llvm.BasicBlock); const BreakValues = std.ArrayListUnmanaged(*const llvm.Value); @@ -254,6 +256,8 @@ pub const LLVMIRModule = struct { .builder = builder, .object_path = object_path, .gpa = gpa, + // TODO move this field into a struct that is only instantiated per gen() call + .src_loc = undefined, }; return self; } @@ -335,6 +339,8 @@ pub const LLVMIRModule = struct { const typed_value = decl.typed_value.most_recent.typed_value; const src = decl.src(); + self.src_loc = decl.srcLoc(); + log.debug("gen: {s} type: {}, value: {}", .{ decl.name, typed_value.ty, typed_value.val }); if (typed_value.val.castTag(.function)) |func_payload| { @@ -853,7 +859,10 @@ pub const LLVMIRModule = struct { pub fn fail(self: *LLVMIRModule, src: usize, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } { @setCold(true); assert(self.err_msg == null); - self.err_msg = try Compilation.ErrorMsg.create(self.gpa, src, format, args); + self.err_msg = try Module.ErrorMsg.create(self.gpa, .{ + .file_scope = self.src_loc.file_scope, + .byte_offset = src, + }, format, args); return error.CodegenFail; } }; diff --git a/src/link/Coff.zig b/src/link/Coff.zig index f7cd9b69ce..981d4ec3a3 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -670,7 +670,7 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .none); + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .none); const code = switch (res) { .externally_managed => |x| x, .appended => code_buffer.items, @@ -732,7 +732,7 @@ pub fn updateDeclExports(self: *Coff, module: *Module, decl: *const Module.Decl, try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1); module.failed_exports.putAssumeCapacityNoClobber( exp, - try Compilation.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: ExportOptions.section", .{}), + try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}), ); continue; } @@ -743,7 +743,7 @@ pub fn updateDeclExports(self: *Coff, module: *Module, decl: *const Module.Decl, try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1); module.failed_exports.putAssumeCapacityNoClobber( exp, - try Compilation.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: Exports other than '_start'", .{}), + try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: Exports other than '_start'", .{}), ); continue; } diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 8c76a4e967..ee50eb5d94 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2189,22 +2189,14 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { try dbg_line_buffer.ensureCapacity(26); const line_off: u28 = blk: { - if (decl.scope.cast(Module.Scope.Container)) |container_scope| { - const tree = container_scope.file_scope.contents.tree; - const file_ast_decls = tree.root_node.decls(); - // TODO Look into improving the performance here by adding a token-index-to-line - // lookup table. Currently this involves scanning over the source code for newlines. - const fn_proto = file_ast_decls[decl.src_index].castTag(.FnProto).?; - const block = fn_proto.getBodyNode().?.castTag(.Block).?; - const line_delta = std.zig.lineDelta(tree.source, 0, tree.token_locs[block.lbrace].start); - break :blk @intCast(u28, line_delta); - } else if (decl.scope.cast(Module.Scope.ZIRModule)) |zir_module| { - const byte_off = zir_module.contents.module.decls[decl.src_index].inst.src; - const line_delta = std.zig.lineDelta(zir_module.source.bytes, 0, byte_off); - break :blk @intCast(u28, line_delta); - } else { - unreachable; - } + const tree = decl.container.file_scope.contents.tree; + const file_ast_decls = tree.root_node.decls(); + // TODO Look into improving the performance here by adding a token-index-to-line + // lookup table. Currently this involves scanning over the source code for newlines. + const fn_proto = file_ast_decls[decl.src_index].castTag(.FnProto).?; + const block = fn_proto.getBodyNode().?.castTag(.Block).?; + const line_delta = std.zig.lineDelta(tree.source, 0, tree.token_locs[block.lbrace].start); + break :blk @intCast(u28, line_delta); }; const ptr_width_bytes = self.ptrWidthBytes(); @@ -2268,7 +2260,7 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { } else { // TODO implement .debug_info for global variables } - const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{ .dwarf = .{ .dbg_line = &dbg_line_buffer, .dbg_info = &dbg_info_buffer, @@ -2642,7 +2634,7 @@ pub fn updateDeclExports( try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1); module.failed_exports.putAssumeCapacityNoClobber( exp, - try Compilation.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: ExportOptions.section", .{}), + try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}), ); continue; } @@ -2660,7 +2652,7 @@ pub fn updateDeclExports( try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1); module.failed_exports.putAssumeCapacityNoClobber( exp, - try Compilation.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: GlobalLinkage.LinkOnce", .{}), + try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: GlobalLinkage.LinkOnce", .{}), ); continue; }, @@ -2703,8 +2695,7 @@ pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Dec if (self.llvm_ir_module) |_| return; - const container_scope = decl.scope.cast(Module.Scope.Container).?; - const tree = container_scope.file_scope.contents.tree; + const tree = decl.container.file_scope.contents.tree; const file_ast_decls = tree.root_node.decls(); // TODO Look into improving the performance here by adding a token-index-to-line // lookup table. Currently this involves scanning over the source code for newlines. diff --git a/src/link/MachO.zig b/src/link/MachO.zig index d913a82328..1017405255 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1148,7 +1148,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { } const res = if (debug_buffers) |*dbg| - try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{ .dwarf = .{ .dbg_line = &dbg.dbg_line_buffer, .dbg_info = &dbg.dbg_info_buffer, @@ -1156,7 +1156,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { }, }) else - try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .none); + try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .none); const code = switch (res) { .externally_managed => |x| x, @@ -1316,7 +1316,7 @@ pub fn updateDeclExports( try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1); module.failed_exports.putAssumeCapacityNoClobber( exp, - try Compilation.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: ExportOptions.section", .{}), + try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}), ); continue; } @@ -1334,7 +1334,7 @@ pub fn updateDeclExports( try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1); module.failed_exports.putAssumeCapacityNoClobber( exp, - try Compilation.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: GlobalLinkage.LinkOnce", .{}), + try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: GlobalLinkage.LinkOnce", .{}), ); continue; }, diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index 81a016ce42..fb7488a12c 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -906,8 +906,7 @@ pub fn updateDeclLineNumber(self: *DebugSymbols, module: *Module, decl: *const M const tracy = trace(@src()); defer tracy.end(); - const container_scope = decl.scope.cast(Module.Scope.Container).?; - const tree = container_scope.file_scope.contents.tree; + const tree = decl.container.file_scope.contents.tree; const file_ast_decls = tree.root_node.decls(); // TODO Look into improving the performance here by adding a token-index-to-line // lookup table. Currently this involves scanning over the source code for newlines. @@ -951,22 +950,14 @@ pub fn initDeclDebugBuffers( try dbg_line_buffer.ensureCapacity(26); const line_off: u28 = blk: { - if (decl.scope.cast(Module.Scope.Container)) |container_scope| { - const tree = container_scope.file_scope.contents.tree; - const file_ast_decls = tree.root_node.decls(); - // TODO Look into improving the performance here by adding a token-index-to-line - // lookup table. Currently this involves scanning over the source code for newlines. - const fn_proto = file_ast_decls[decl.src_index].castTag(.FnProto).?; - const block = fn_proto.getBodyNode().?.castTag(.Block).?; - const line_delta = std.zig.lineDelta(tree.source, 0, tree.token_locs[block.lbrace].start); - break :blk @intCast(u28, line_delta); - } else if (decl.scope.cast(Module.Scope.ZIRModule)) |zir_module| { - const byte_off = zir_module.contents.module.decls[decl.src_index].inst.src; - const line_delta = std.zig.lineDelta(zir_module.source.bytes, 0, byte_off); - break :blk @intCast(u28, line_delta); - } else { - unreachable; - } + const tree = decl.container.file_scope.contents.tree; + const file_ast_decls = tree.root_node.decls(); + // TODO Look into improving the performance here by adding a token-index-to-line + // lookup table. Currently this involves scanning over the source code for newlines. + const fn_proto = file_ast_decls[decl.src_index].castTag(.FnProto).?; + const block = fn_proto.getBodyNode().?.castTag(.Block).?; + const line_delta = std.zig.lineDelta(tree.source, 0, tree.token_locs[block.lbrace].start); + break :blk @intCast(u28, line_delta); }; dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{ diff --git a/src/main.zig b/src/main.zig index 867aa348b1..13bea13a5e 100644 --- a/src/main.zig +++ b/src/main.zig @@ -221,7 +221,6 @@ const usage_build_generic = \\ \\Supported file types: \\ .zig Zig source code - \\ .zir Zig Intermediate Representation code \\ .o ELF object file \\ .o MACH-O (macOS) object file \\ .obj COFF (Windows) object file @@ -245,8 +244,6 @@ const usage_build_generic = \\ -fno-emit-bin Do not output machine code \\ -femit-asm[=path] Output .s (assembly code) \\ -fno-emit-asm (default) Do not output .s (assembly code) - \\ -femit-zir[=path] Produce a .zir file with Zig IR - \\ -fno-emit-zir (default) Do not produce a .zir file with Zig IR \\ -femit-llvm-ir[=path] Produce a .ll file with LLVM IR (requires LLVM extensions) \\ -fno-emit-llvm-ir (default) Do not produce a .ll file with LLVM IR \\ -femit-h[=path] Generate a C header file (.h) @@ -1631,18 +1628,12 @@ fn buildOutputType( var emit_docs_resolved = try emit_docs.resolve("docs"); defer emit_docs_resolved.deinit(); - const zir_out_path: ?[]const u8 = switch (emit_zir) { - .no => null, - .yes_default_path => blk: { - if (root_src_file) |rsf| { - if (mem.endsWith(u8, rsf, ".zir")) { - break :blk try std.fmt.allocPrint(arena, "{s}.out.zir", .{root_name}); - } - } - break :blk try std.fmt.allocPrint(arena, "{s}.zir", .{root_name}); + switch (emit_zir) { + .no => {}, + .yes_default_path, .yes => { + fatal("The -femit-zir implementation has been intentionally deleted so that it can be rewritten as a proper backend.", .{}); }, - .yes => |p| p, - }; + } const root_pkg: ?*Package = if (root_src_file) |src_path| blk: { if (main_pkg_path) |p| { @@ -1753,7 +1744,7 @@ fn buildOutputType( .dll_export_fns = dll_export_fns, .object_format = object_format, .optimize_mode = optimize_mode, - .keep_source_files_loaded = zir_out_path != null, + .keep_source_files_loaded = false, .clang_argv = clang_argv.items, .lld_argv = lld_argv.items, .lib_dirs = lib_dirs.items, @@ -1845,7 +1836,7 @@ fn buildOutputType( } }; - updateModule(gpa, comp, zir_out_path, hook) catch |err| switch (err) { + updateModule(gpa, comp, hook) catch |err| switch (err) { error.SemanticAnalyzeFail => if (!watch) process.exit(1), else => |e| return e, }; @@ -1980,7 +1971,7 @@ fn buildOutputType( if (output_mode == .Exe) { try comp.makeBinFileWritable(); } - updateModule(gpa, comp, zir_out_path, hook) catch |err| switch (err) { + updateModule(gpa, comp, hook) catch |err| switch (err) { error.SemanticAnalyzeFail => continue, else => |e| return e, }; @@ -2003,7 +1994,7 @@ const AfterUpdateHook = union(enum) { update: []const u8, }; -fn updateModule(gpa: *Allocator, comp: *Compilation, zir_out_path: ?[]const u8, hook: AfterUpdateHook) !void { +fn updateModule(gpa: *Allocator, comp: *Compilation, hook: AfterUpdateHook) !void { try comp.update(); var errors = try comp.getAllErrorsAlloc(); @@ -2013,6 +2004,10 @@ fn updateModule(gpa: *Allocator, comp: *Compilation, zir_out_path: ?[]const u8, for (errors.list) |full_err_msg| { full_err_msg.renderToStdErr(); } + const log_text = comp.getCompileLogOutput(); + if (log_text.len != 0) { + std.debug.print("\nCompile Log Output:\n{s}", .{log_text}); + } return error.SemanticAnalyzeFail; } else switch (hook) { .none => {}, @@ -2024,20 +2019,6 @@ fn updateModule(gpa: *Allocator, comp: *Compilation, zir_out_path: ?[]const u8, .{}, ), } - - if (zir_out_path) |zop| { - const module = comp.bin_file.options.module orelse - fatal("-femit-zir with no zig source code", .{}); - var new_zir_module = try zir.emit(gpa, module); - defer new_zir_module.deinit(gpa); - - const baf = try io.BufferedAtomicFile.create(gpa, fs.cwd(), zop, .{}); - defer baf.destroy(); - - try new_zir_module.writeToStream(gpa, baf.writer()); - - try baf.finish(); - } } fn cmdTranslateC(comp: *Compilation, arena: *Allocator, enable_cache: bool) !void { @@ -2506,7 +2487,7 @@ pub fn cmdBuild(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !v }; defer comp.destroy(); - try updateModule(gpa, comp, null, .none); + try updateModule(gpa, comp, .none); try comp.makeBinFileExecutable(); child_argv.items[argv_index_exe] = try comp.bin_file.options.emit.?.directory.join( diff --git a/src/test.zig b/src/test.zig index 59927525df..1c9fb57f01 100644 --- a/src/test.zig +++ b/src/test.zig @@ -15,6 +15,8 @@ const CrossTarget = std.zig.CrossTarget; const zig_h = link.File.C.zig_h; +const hr = "=" ** 40; + test "self-hosted" { var ctx = TestContext.init(); defer ctx.deinit(); @@ -29,23 +31,32 @@ const ErrorMsg = union(enum) { msg: []const u8, line: u32, column: u32, + kind: Kind, }, plain: struct { msg: []const u8, + kind: Kind, }, - fn init(other: Compilation.AllErrors.Message) ErrorMsg { + const Kind = enum { + @"error", + note, + }; + + fn init(other: Compilation.AllErrors.Message, kind: Kind) ErrorMsg { switch (other) { .src => |src| return .{ .src = .{ .msg = src.msg, .line = @intCast(u32, src.line), .column = @intCast(u32, src.column), + .kind = kind, }, }, .plain => |plain| return .{ .plain = .{ .msg = plain.msg, + .kind = kind, }, }, } @@ -59,14 +70,15 @@ const ErrorMsg = union(enum) { ) !void { switch (self) { .src => |src| { - return writer.print(":{d}:{d}: error: {s}", .{ + return writer.print(":{d}:{d}: {s}: {s}", .{ src.line + 1, src.column + 1, + @tagName(src.kind), src.msg, }); }, .plain => |plain| { - return writer.print("error: {s}", .{plain.msg}); + return writer.print("{s}: {s}", .{ plain.msg, @tagName(plain.kind) }); }, } } @@ -86,9 +98,6 @@ pub const TestContext = struct { /// effects of the incremental compilation. src: [:0]const u8, case: union(enum) { - /// A transformation update transforms the input and tests against - /// the expected output ZIR. - Transformation: [:0]const u8, /// Check the main binary output file against an expected set of bytes. /// This is most useful with, for example, `-ofmt=c`. CompareObjectFile: []const u8, @@ -139,15 +148,6 @@ pub const TestContext = struct { files: std.ArrayList(File), - /// Adds a subcase in which the module is updated with `src`, and the - /// resulting ZIR is validated against `result`. - pub fn addTransform(self: *Case, src: [:0]const u8, result: [:0]const u8) void { - self.updates.append(.{ - .src = src, - .case = .{ .Transformation = result }, - }) catch unreachable; - } - /// Adds a subcase in which the module is updated with `src`, and a C /// header is generated. pub fn addHeader(self: *Case, src: [:0]const u8, result: [:0]const u8) void { @@ -182,31 +182,37 @@ pub const TestContext = struct { /// the form `:line:column: error: message`. pub fn addError(self: *Case, src: [:0]const u8, errors: []const []const u8) void { var array = self.updates.allocator.alloc(ErrorMsg, errors.len) catch unreachable; - for (errors) |e, i| { - if (e[0] != ':') { - array[i] = .{ .plain = .{ .msg = e } }; + for (errors) |err_msg_line, i| { + if (std.mem.startsWith(u8, err_msg_line, "error: ")) { + array[i] = .{ + .plain = .{ .msg = err_msg_line["error: ".len..], .kind = .@"error" }, + }; + continue; + } else if (std.mem.startsWith(u8, err_msg_line, "note: ")) { + array[i] = .{ + .plain = .{ .msg = err_msg_line["note: ".len..], .kind = .note }, + }; continue; } - var cur = e[1..]; - var line_index = std.mem.indexOf(u8, cur, ":"); - if (line_index == null) { - @panic("Invalid test: error must be specified as follows:\n:line:column: error: message\n=========\n"); - } - const line = std.fmt.parseInt(u32, cur[0..line_index.?], 10) catch @panic("Unable to parse line number"); - cur = cur[line_index.? + 1 ..]; - const column_index = std.mem.indexOf(u8, cur, ":"); - if (column_index == null) { - @panic("Invalid test: error must be specified as follows:\n:line:column: error: message\n=========\n"); - } - const column = std.fmt.parseInt(u32, cur[0..column_index.?], 10) catch @panic("Unable to parse column number"); - cur = cur[column_index.? + 2 ..]; - if (!std.mem.eql(u8, cur[0..7], "error: ")) { - @panic("Invalid test: error must be specified as follows:\n:line:column: error: message\n=========\n"); - } - const msg = cur[7..]; + // example: ":1:2: error: bad thing happened" + var it = std.mem.split(err_msg_line, ":"); + _ = it.next() orelse @panic("missing colon"); + const line_text = it.next() orelse @panic("missing line"); + const col_text = it.next() orelse @panic("missing column"); + const kind_text = it.next() orelse @panic("missing 'error'/'note'"); + const msg = it.rest()[1..]; // skip over the space at end of "error: " + + const line = std.fmt.parseInt(u32, line_text, 10) catch @panic("bad line number"); + const column = std.fmt.parseInt(u32, col_text, 10) catch @panic("bad column number"); + const kind: ErrorMsg.Kind = if (std.mem.eql(u8, kind_text, " error")) + .@"error" + else if (std.mem.eql(u8, kind_text, " note")) + .note + else + @panic("expected 'error'/'note'"); if (line == 0 or column == 0) { - @panic("Invalid test: error line and column must be specified starting at one!"); + @panic("line and column must be specified starting at one"); } array[i] = .{ @@ -214,6 +220,7 @@ pub const TestContext = struct { .msg = msg, .line = line - 1, .column = column - 1, + .kind = kind, }, }; } @@ -689,25 +696,20 @@ pub const TestContext = struct { var all_errors = try comp.getAllErrorsAlloc(); defer all_errors.deinit(allocator); if (all_errors.list.len != 0) { - std.debug.print("\nErrors occurred updating the compilation:\n================\n", .{}); + std.debug.print("\nErrors occurred updating the compilation:\n{s}\n", .{hr}); for (all_errors.list) |err_msg| { switch (err_msg) { .src => |src| { - std.debug.print(":{d}:{d}: error: {s}\n================\n", .{ - src.line + 1, src.column + 1, src.msg, + std.debug.print(":{d}:{d}: error: {s}\n{s}\n", .{ + src.line + 1, src.column + 1, src.msg, hr, }); }, .plain => |plain| { - std.debug.print("error: {s}\n================\n", .{plain.msg}); + std.debug.print("error: {s}\n{s}\n", .{ plain.msg, hr }); }, } } // TODO print generated C code - //if (comp.bin_file.cast(link.File.C)) |c_file| { - // std.debug.print("Generated C: \n===============\n{s}\n\n===========\n\n", .{ - // c_file.main.items, - // }); - //} std.debug.print("Test failed.\n", .{}); std.process.exit(1); } @@ -728,48 +730,74 @@ pub const TestContext = struct { std.testing.expectEqualStrings(expected_output, out); }, - .Transformation => |expected_output| { - update_node.setEstimatedTotalItems(5); - var emit_node = update_node.start("emit", 0); - emit_node.activate(); - var new_zir_module = try zir.emit(allocator, comp.bin_file.options.module.?); - defer new_zir_module.deinit(allocator); - emit_node.end(); - - var write_node = update_node.start("write", 0); - write_node.activate(); - var out_zir = std.ArrayList(u8).init(allocator); - defer out_zir.deinit(); - try new_zir_module.writeToStream(allocator, out_zir.writer()); - write_node.end(); - + .Error => |case_error_list| { var test_node = update_node.start("assert", 0); test_node.activate(); defer test_node.end(); - std.testing.expectEqualStrings(expected_output, out_zir.items); - }, - .Error => |e| { - var test_node = update_node.start("assert", 0); - test_node.activate(); - defer test_node.end(); - var handled_errors = try arena.alloc(bool, e.len); - for (handled_errors) |*handled| { - handled.* = false; + const handled_errors = try arena.alloc(bool, case_error_list.len); + std.mem.set(bool, handled_errors, false); + + var actual_errors = try comp.getAllErrorsAlloc(); + defer actual_errors.deinit(allocator); + + var any_failed = false; + var notes_to_check = std.ArrayList(*const Compilation.AllErrors.Message).init(allocator); + defer notes_to_check.deinit(); + + for (actual_errors.list) |actual_error| { + for (case_error_list) |case_msg, i| { + const ex_tag: @TagType(@TypeOf(case_msg)) = case_msg; + switch (actual_error) { + .src => |actual_msg| { + for (actual_msg.notes) |*note| { + try notes_to_check.append(note); + } + + if (ex_tag != .src) continue; + + if (actual_msg.line == case_msg.src.line and + actual_msg.column == case_msg.src.column and + std.mem.eql(u8, case_msg.src.msg, actual_msg.msg) and + case_msg.src.kind == .@"error") + { + handled_errors[i] = true; + break; + } + }, + .plain => |plain| { + if (ex_tag != .plain) continue; + + if (std.mem.eql(u8, case_msg.plain.msg, plain.msg) and + case_msg.plain.kind == .@"error") + { + handled_errors[i] = true; + break; + } + }, + } + } else { + std.debug.print( + "\nUnexpected error:\n{s}\n{}\n{s}", + .{ hr, ErrorMsg.init(actual_error, .@"error"), hr }, + ); + any_failed = true; + } } - var all_errors = try comp.getAllErrorsAlloc(); - defer all_errors.deinit(allocator); - for (all_errors.list) |a| { - for (e) |ex, i| { - const a_tag: @TagType(@TypeOf(a)) = a; - const ex_tag: @TagType(@TypeOf(ex)) = ex; - switch (a) { - .src => |src| { + while (notes_to_check.popOrNull()) |note| { + for (case_error_list) |case_msg, i| { + const ex_tag: @TagType(@TypeOf(case_msg)) = case_msg; + switch (note.*) { + .src => |actual_msg| { + for (actual_msg.notes) |*sub_note| { + try notes_to_check.append(sub_note); + } if (ex_tag != .src) continue; - if (src.line == ex.src.line and - src.column == ex.src.column and - std.mem.eql(u8, ex.src.msg, src.msg)) + if (actual_msg.line == case_msg.src.line and + actual_msg.column == case_msg.src.column and + std.mem.eql(u8, case_msg.src.msg, actual_msg.msg) and + case_msg.src.kind == .note) { handled_errors[i] = true; break; @@ -778,7 +806,9 @@ pub const TestContext = struct { .plain => |plain| { if (ex_tag != .plain) continue; - if (std.mem.eql(u8, ex.plain.msg, plain.msg)) { + if (std.mem.eql(u8, case_msg.plain.msg, plain.msg) and + case_msg.plain.kind == .note) + { handled_errors[i] = true; break; } @@ -786,23 +816,29 @@ pub const TestContext = struct { } } else { std.debug.print( - "{s}\nUnexpected error:\n================\n{}\n================\nTest failed.\n", - .{ case.name, ErrorMsg.init(a) }, + "\nUnexpected note:\n{s}\n{}\n{s}", + .{ hr, ErrorMsg.init(note.*, .note), hr }, ); - std.process.exit(1); + any_failed = true; } } for (handled_errors) |handled, i| { if (!handled) { - const er = e[i]; std.debug.print( - "{s}\nDid not receive error:\n================\n{}\n================\nTest failed.\n", - .{ case.name, er }, + "\nExpected error not found:\n{s}\n{}\n{s}", + .{ hr, case_error_list[i], hr }, ); - std.process.exit(1); + any_failed = true; } } + + if (any_failed) { + std.debug.print("\nTest case '{s}' failed, update_index={d}.\n", .{ + case.name, update_index, + }); + std.process.exit(1); + } }, .Execution => |expected_stdout| { update_node.setEstimatedTotalItems(4); diff --git a/src/type/Enum.zig b/src/type/Enum.zig index 9b9ec5b319..4dfd5f6e44 100644 --- a/src/type/Enum.zig +++ b/src/type/Enum.zig @@ -21,7 +21,7 @@ pub const Field = struct { }; pub const Zir = struct { - body: zir.Module.Body, + body: zir.Body, inst: *zir.Inst, }; diff --git a/src/type/Struct.zig b/src/type/Struct.zig index d6a591c95e..24e3a0dcad 100644 --- a/src/type/Struct.zig +++ b/src/type/Struct.zig @@ -24,7 +24,7 @@ pub const Field = struct { }; pub const Zir = struct { - body: zir.Module.Body, + body: zir.Body, inst: *zir.Inst, }; diff --git a/src/type/Union.zig b/src/type/Union.zig index 26cc1796c6..5c7acf7d36 100644 --- a/src/type/Union.zig +++ b/src/type/Union.zig @@ -24,7 +24,7 @@ pub const Field = struct { }; pub const Zir = struct { - body: zir.Module.Body, + body: zir.Body, inst: *zir.Inst, }; diff --git a/src/zir.zig b/src/zir.zig index 0ccb09efac..0e7b3a3520 100644 --- a/src/zir.zig +++ b/src/zir.zig @@ -12,17 +12,6 @@ const TypedValue = @import("TypedValue.zig"); const ir = @import("ir.zig"); const IrModule = @import("Module.zig"); -/// This struct is relevent only for the ZIR Module text format. It is not used for -/// semantic analysis of Zig source code. -pub const Decl = struct { - name: []const u8, - - /// Hash of slice into the source of the part after the = and before the next instruction. - contents_hash: std.zig.SrcHash, - - inst: *Inst, -}; - /// These are instructions that correspond to the ZIR text format. See `ir.Inst` for /// in-memory, analyzed instructions with types and values. /// We use a table to map these instruction to their respective semantically analyzed @@ -141,15 +130,12 @@ pub const Inst = struct { container_field, /// Declares the beginning of a statement. Used for debug info. dbg_stmt, - /// Represents a pointer to a global decl by name. + /// Represents a pointer to a global decl. declref, /// Represents a pointer to a global decl by string name. declref_str, - /// The syntax `@foo` is equivalent to `declval("foo")`. - /// declval is equivalent to declref followed by deref. + /// Equivalent to a declref followed by deref. declval, - /// Same as declval but the parameter is a `*Module.Decl` rather than a name. - declval_in_module, /// Load the value from a pointer. deref, /// Arithmetic division. Asserts no integer overflow. @@ -419,7 +405,6 @@ pub const Inst = struct { .declref => DeclRef, .declref_str => DeclRefStr, .declval => DeclVal, - .declval_in_module => DeclValInModule, .coerce_result_block_ptr => CoerceResultBlockPtr, .compilelog => CompileLog, .loop => Loop, @@ -496,7 +481,6 @@ pub const Inst = struct { .declref, .declref_str, .declval, - .declval_in_module, .deref, .div, .elemptr, @@ -650,7 +634,7 @@ pub const Inst = struct { base: Inst, positionals: struct { - body: Module.Body, + body: Body, }, kw_args: struct {}, }; @@ -705,7 +689,7 @@ pub const Inst = struct { base: Inst, positionals: struct { - name: []const u8, + decl: *IrModule.Decl, }, kw_args: struct {}, }; @@ -724,16 +708,6 @@ pub const Inst = struct { pub const base_tag = Tag.declval; base: Inst, - positionals: struct { - name: []const u8, - }, - kw_args: struct {}, - }; - - pub const DeclValInModule = struct { - pub const base_tag = Tag.declval_in_module; - base: Inst, - positionals: struct { decl: *IrModule.Decl, }, @@ -758,10 +732,7 @@ pub const Inst = struct { positionals: struct { to_log: []*Inst, }, - kw_args: struct { - /// If we have seen it already so don't make another error - seen: bool = false, - }, + kw_args: struct {}, }; pub const Const = struct { @@ -799,7 +770,7 @@ pub const Inst = struct { base: Inst, positionals: struct { - body: Module.Body, + body: Body, }, kw_args: struct {}, }; @@ -838,7 +809,7 @@ pub const Inst = struct { positionals: struct { fn_type: *Inst, - body: Module.Body, + body: Body, }, kw_args: struct { is_inline: bool = false, @@ -998,8 +969,8 @@ pub const Inst = struct { positionals: struct { condition: *Inst, - then_body: Module.Body, - else_body: Module.Body, + then_body: Body, + else_body: Body, }, kw_args: struct {}, }; @@ -1078,7 +1049,7 @@ pub const Inst = struct { /// List of all individual items and ranges items: []*Inst, cases: []Case, - else_body: Module.Body, + else_body: Body, }, kw_args: struct { /// Pointer to first range if such exists. @@ -1092,7 +1063,7 @@ pub const Inst = struct { pub const Case = struct { item: *Inst, - body: Module.Body, + body: Body, }; }; pub const TypeOfPeer = struct { @@ -1192,6 +1163,10 @@ pub const ErrorMsg = struct { msg: []const u8, }; +pub const Body = struct { + instructions: []*Inst, +}; + pub const Module = struct { decls: []*Decl, arena: std.heap.ArenaAllocator, @@ -1199,6 +1174,15 @@ pub const Module = struct { metadata: std.AutoHashMap(*Inst, MetaData), body_metadata: std.AutoHashMap(*Body, BodyMetaData), + pub const Decl = struct { + name: []const u8, + + /// Hash of slice into the source of the part after the = and before the next instruction. + contents_hash: std.zig.SrcHash, + + inst: *Inst, + }; + pub const MetaData = struct { deaths: ir.Inst.DeathsInt, addr: usize, @@ -1208,10 +1192,6 @@ pub const Module = struct { deaths: []*Inst, }; - pub const Body = struct { - instructions: []*Inst, - }; - pub fn deinit(self: *Module, allocator: *Allocator) void { self.metadata.deinit(); self.body_metadata.deinit(); @@ -1369,7 +1349,7 @@ const Writer = struct { } try stream.writeByte(']'); }, - Module.Body => { + Body => { try stream.writeAll("{\n"); if (self.module.body_metadata.get(param_ptr)) |metadata| { if (metadata.deaths.len > 0) { @@ -1468,8 +1448,6 @@ const Writer = struct { try stream.print("@{s}", .{info.name}); } } else if (inst.cast(Inst.DeclVal)) |decl_val| { - try stream.print("@{s}", .{decl_val.positionals.name}); - } else if (inst.cast(Inst.DeclValInModule)) |decl_val| { try stream.print("@{s}", .{decl_val.positionals.decl.name}); } else { // This should be unreachable in theory, but since ZIR is used for debugging the compiler @@ -1479,502 +1457,6 @@ const Writer = struct { } }; -pub fn parse(allocator: *Allocator, source: [:0]const u8) Allocator.Error!Module { - var global_name_map = std.StringHashMap(*Inst).init(allocator); - defer global_name_map.deinit(); - - var parser: Parser = .{ - .allocator = allocator, - .arena = std.heap.ArenaAllocator.init(allocator), - .i = 0, - .source = source, - .global_name_map = &global_name_map, - .decls = .{}, - .unnamed_index = 0, - .block_table = std.StringHashMap(*Inst.Block).init(allocator), - .loop_table = std.StringHashMap(*Inst.Loop).init(allocator), - }; - defer parser.block_table.deinit(); - defer parser.loop_table.deinit(); - errdefer parser.arena.deinit(); - - parser.parseRoot() catch |err| switch (err) { - error.ParseFailure => { - assert(parser.error_msg != null); - }, - else => |e| return e, - }; - - return Module{ - .decls = parser.decls.toOwnedSlice(allocator), - .arena = parser.arena, - .error_msg = parser.error_msg, - .metadata = std.AutoHashMap(*Inst, Module.MetaData).init(allocator), - .body_metadata = std.AutoHashMap(*Module.Body, Module.BodyMetaData).init(allocator), - }; -} - -const Parser = struct { - allocator: *Allocator, - arena: std.heap.ArenaAllocator, - i: usize, - source: [:0]const u8, - decls: std.ArrayListUnmanaged(*Decl), - global_name_map: *std.StringHashMap(*Inst), - error_msg: ?ErrorMsg = null, - unnamed_index: usize, - block_table: std.StringHashMap(*Inst.Block), - loop_table: std.StringHashMap(*Inst.Loop), - - const Body = struct { - instructions: std.ArrayList(*Inst), - name_map: *std.StringHashMap(*Inst), - }; - - fn parseBody(self: *Parser, body_ctx: ?*Body) !Module.Body { - var name_map = std.StringHashMap(*Inst).init(self.allocator); - defer name_map.deinit(); - - var body_context = Body{ - .instructions = std.ArrayList(*Inst).init(self.allocator), - .name_map = if (body_ctx) |bctx| bctx.name_map else &name_map, - }; - defer body_context.instructions.deinit(); - - try requireEatBytes(self, "{"); - skipSpace(self); - - while (true) : (self.i += 1) switch (self.source[self.i]) { - ';' => _ = try skipToAndOver(self, '\n'), - '%' => { - self.i += 1; - const ident = try skipToAndOver(self, ' '); - skipSpace(self); - try requireEatBytes(self, "="); - skipSpace(self); - const decl = try parseInstruction(self, &body_context, ident); - const ident_index = body_context.instructions.items.len; - if (try body_context.name_map.fetchPut(ident, decl.inst)) |_| { - return self.fail("redefinition of identifier '{s}'", .{ident}); - } - try body_context.instructions.append(decl.inst); - continue; - }, - ' ', '\n' => continue, - '}' => { - self.i += 1; - break; - }, - else => |byte| return self.failByte(byte), - }; - - // Move the instructions to the arena - const instrs = try self.arena.allocator.alloc(*Inst, body_context.instructions.items.len); - mem.copy(*Inst, instrs, body_context.instructions.items); - return Module.Body{ .instructions = instrs }; - } - - fn parseStringLiteral(self: *Parser) ![]u8 { - const start = self.i; - try self.requireEatBytes("\""); - - while (true) : (self.i += 1) switch (self.source[self.i]) { - '"' => { - self.i += 1; - const span = self.source[start..self.i]; - var bad_index: usize = undefined; - const parsed = std.zig.parseStringLiteral(&self.arena.allocator, span, &bad_index) catch |err| switch (err) { - error.InvalidCharacter => { - self.i = start + bad_index; - const bad_byte = self.source[self.i]; - return self.fail("invalid string literal character: '{c}'\n", .{bad_byte}); - }, - else => |e| return e, - }; - return parsed; - }, - '\\' => { - self.i += 1; - continue; - }, - 0 => return self.failByte(0), - else => continue, - }; - } - - fn parseIntegerLiteral(self: *Parser) !BigIntConst { - const start = self.i; - if (self.source[self.i] == '-') self.i += 1; - while (true) : (self.i += 1) switch (self.source[self.i]) { - '0'...'9' => continue, - else => break, - }; - const number_text = self.source[start..self.i]; - const base = 10; - // TODO reuse the same array list for this - const limbs_buffer_len = std.math.big.int.calcSetStringLimbsBufferLen(base, number_text.len); - const limbs_buffer = try self.allocator.alloc(std.math.big.Limb, limbs_buffer_len); - defer self.allocator.free(limbs_buffer); - const limb_len = std.math.big.int.calcSetStringLimbCount(base, number_text.len); - const limbs = try self.arena.allocator.alloc(std.math.big.Limb, limb_len); - var result = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - result.setString(base, number_text, limbs_buffer, self.allocator) catch |err| switch (err) { - error.InvalidCharacter => { - self.i = start; - return self.fail("invalid digit in integer literal", .{}); - }, - }; - return result.toConst(); - } - - fn parseRoot(self: *Parser) !void { - // The IR format is designed so that it can be tokenized and parsed at the same time. - while (true) { - switch (self.source[self.i]) { - ';' => _ = try skipToAndOver(self, '\n'), - '@' => { - self.i += 1; - const ident = try skipToAndOver(self, ' '); - skipSpace(self); - try requireEatBytes(self, "="); - skipSpace(self); - const decl = try parseInstruction(self, null, ident); - const ident_index = self.decls.items.len; - if (try self.global_name_map.fetchPut(ident, decl.inst)) |_| { - return self.fail("redefinition of identifier '{s}'", .{ident}); - } - try self.decls.append(self.allocator, decl); - }, - ' ', '\n' => self.i += 1, - 0 => break, - else => |byte| return self.fail("unexpected byte: '{c}'", .{byte}), - } - } - } - - fn eatByte(self: *Parser, byte: u8) bool { - if (self.source[self.i] != byte) return false; - self.i += 1; - return true; - } - - fn skipSpace(self: *Parser) void { - while (self.source[self.i] == ' ' or self.source[self.i] == '\n') { - self.i += 1; - } - } - - fn requireEatBytes(self: *Parser, bytes: []const u8) !void { - const start = self.i; - for (bytes) |byte| { - if (self.source[self.i] != byte) { - self.i = start; - return self.fail("expected '{s}'", .{bytes}); - } - self.i += 1; - } - } - - fn skipToAndOver(self: *Parser, byte: u8) ![]const u8 { - const start_i = self.i; - while (self.source[self.i] != 0) : (self.i += 1) { - if (self.source[self.i] == byte) { - const result = self.source[start_i..self.i]; - self.i += 1; - return result; - } - } - return self.fail("unexpected EOF", .{}); - } - - /// ParseFailure is an internal error code; handled in `parse`. - const InnerError = error{ ParseFailure, OutOfMemory }; - - fn failByte(self: *Parser, byte: u8) InnerError { - if (byte == 0) { - return self.fail("unexpected EOF", .{}); - } else { - return self.fail("unexpected byte: '{c}'", .{byte}); - } - } - - fn fail(self: *Parser, comptime format: []const u8, args: anytype) InnerError { - @setCold(true); - self.error_msg = ErrorMsg{ - .byte_offset = self.i, - .msg = try std.fmt.allocPrint(&self.arena.allocator, format, args), - }; - return error.ParseFailure; - } - - fn parseInstruction(self: *Parser, body_ctx: ?*Body, name: []const u8) InnerError!*Decl { - const contents_start = self.i; - const fn_name = try skipToAndOver(self, '('); - inline for (@typeInfo(Inst.Tag).Enum.fields) |field| { - if (mem.eql(u8, field.name, fn_name)) { - const tag = @field(Inst.Tag, field.name); - return parseInstructionGeneric(self, field.name, tag.Type(), tag, body_ctx, name, contents_start); - } - } - return self.fail("unknown instruction '{s}'", .{fn_name}); - } - - fn parseInstructionGeneric( - self: *Parser, - comptime fn_name: []const u8, - comptime InstType: type, - tag: Inst.Tag, - body_ctx: ?*Body, - inst_name: []const u8, - contents_start: usize, - ) InnerError!*Decl { - const inst_specific = try self.arena.allocator.create(InstType); - inst_specific.base = .{ - .src = self.i, - .tag = tag, - }; - - if (InstType == Inst.Block) { - try self.block_table.put(inst_name, inst_specific); - } else if (InstType == Inst.Loop) { - try self.loop_table.put(inst_name, inst_specific); - } - - if (@hasField(InstType, "ty")) { - inst_specific.ty = opt_type orelse { - return self.fail("instruction '" ++ fn_name ++ "' requires type", .{}); - }; - } - - const Positionals = @TypeOf(inst_specific.positionals); - inline for (@typeInfo(Positionals).Struct.fields) |arg_field| { - if (self.source[self.i] == ',') { - self.i += 1; - skipSpace(self); - } else if (self.source[self.i] == ')') { - return self.fail("expected positional parameter '{s}'", .{arg_field.name}); - } - @field(inst_specific.positionals, arg_field.name) = try parseParameterGeneric( - self, - arg_field.field_type, - body_ctx, - ); - skipSpace(self); - } - - const KW_Args = @TypeOf(inst_specific.kw_args); - inst_specific.kw_args = .{}; // assign defaults - skipSpace(self); - while (eatByte(self, ',')) { - skipSpace(self); - const name = try skipToAndOver(self, '='); - inline for (@typeInfo(KW_Args).Struct.fields) |arg_field| { - const field_name = arg_field.name; - if (mem.eql(u8, name, field_name)) { - const NonOptional = switch (@typeInfo(arg_field.field_type)) { - .Optional => |info| info.child, - else => arg_field.field_type, - }; - @field(inst_specific.kw_args, field_name) = try parseParameterGeneric(self, NonOptional, body_ctx); - break; - } - } else { - return self.fail("unrecognized keyword parameter: '{s}'", .{name}); - } - skipSpace(self); - } - try requireEatBytes(self, ")"); - - const decl = try self.arena.allocator.create(Decl); - decl.* = .{ - .name = inst_name, - .contents_hash = std.zig.hashSrc(self.source[contents_start..self.i]), - .inst = &inst_specific.base, - }; - - return decl; - } - - fn parseParameterGeneric(self: *Parser, comptime T: type, body_ctx: ?*Body) !T { - if (@typeInfo(T) == .Enum) { - const start = self.i; - while (true) : (self.i += 1) switch (self.source[self.i]) { - ' ', '\n', ',', ')' => { - const enum_name = self.source[start..self.i]; - return std.meta.stringToEnum(T, enum_name) orelse { - return self.fail("tag '{s}' not a member of enum '{s}'", .{ enum_name, @typeName(T) }); - }; - }, - 0 => return self.failByte(0), - else => continue, - }; - } - switch (T) { - Module.Body => return parseBody(self, body_ctx), - bool => { - const bool_value = switch (self.source[self.i]) { - '0' => false, - '1' => true, - else => |byte| return self.fail("expected '0' or '1' for boolean value, found {c}", .{byte}), - }; - self.i += 1; - return bool_value; - }, - []*Inst => { - try requireEatBytes(self, "["); - skipSpace(self); - if (eatByte(self, ']')) return &[0]*Inst{}; - - var instructions = std.ArrayList(*Inst).init(&self.arena.allocator); - while (true) { - skipSpace(self); - try instructions.append(try parseParameterInst(self, body_ctx)); - skipSpace(self); - if (!eatByte(self, ',')) break; - } - try requireEatBytes(self, "]"); - return instructions.toOwnedSlice(); - }, - *Inst => return parseParameterInst(self, body_ctx), - []u8, []const u8 => return self.parseStringLiteral(), - BigIntConst => return self.parseIntegerLiteral(), - usize => { - const big_int = try self.parseIntegerLiteral(); - return big_int.to(usize) catch |err| return self.fail("integer literal: {s}", .{@errorName(err)}); - }, - TypedValue => return self.fail("'const' is a special instruction; not legal in ZIR text", .{}), - *IrModule.Decl => return self.fail("'declval_in_module' is a special instruction; not legal in ZIR text", .{}), - *Inst.Block => { - const name = try self.parseStringLiteral(); - return self.block_table.get(name).?; - }, - *Inst.Loop => { - const name = try self.parseStringLiteral(); - return self.loop_table.get(name).?; - }, - [][]const u8 => { - try requireEatBytes(self, "["); - skipSpace(self); - if (eatByte(self, ']')) return &[0][]const u8{}; - - var strings = std.ArrayList([]const u8).init(&self.arena.allocator); - while (true) { - skipSpace(self); - try strings.append(try self.parseStringLiteral()); - skipSpace(self); - if (!eatByte(self, ',')) break; - } - try requireEatBytes(self, "]"); - return strings.toOwnedSlice(); - }, - []Inst.SwitchBr.Case => { - try requireEatBytes(self, "{"); - skipSpace(self); - if (eatByte(self, '}')) return &[0]Inst.SwitchBr.Case{}; - - var cases = std.ArrayList(Inst.SwitchBr.Case).init(&self.arena.allocator); - while (true) { - const cur = try cases.addOne(); - skipSpace(self); - cur.item = try self.parseParameterGeneric(*Inst, body_ctx); - skipSpace(self); - try requireEatBytes(self, "=>"); - cur.body = try self.parseBody(body_ctx); - skipSpace(self); - if (!eatByte(self, ',')) break; - } - skipSpace(self); - try requireEatBytes(self, "}"); - return cases.toOwnedSlice(); - }, - else => @compileError("Unimplemented: ir parseParameterGeneric for type " ++ @typeName(T)), - } - return self.fail("TODO parse parameter {s}", .{@typeName(T)}); - } - - fn parseParameterInst(self: *Parser, body_ctx: ?*Body) !*Inst { - const local_ref = switch (self.source[self.i]) { - '@' => false, - '%' => true, - else => |byte| return self.fail("unexpected byte: '{c}'", .{byte}), - }; - const map = if (local_ref) - if (body_ctx) |bc| - bc.name_map - else - return self.fail("referencing a % instruction in global scope", .{}) - else - self.global_name_map; - - self.i += 1; - const name_start = self.i; - while (true) : (self.i += 1) switch (self.source[self.i]) { - 0, ' ', '\n', ',', ')', ']' => break, - else => continue, - }; - const ident = self.source[name_start..self.i]; - return map.get(ident) orelse { - const bad_name = self.source[name_start - 1 .. self.i]; - const src = name_start - 1; - if (local_ref) { - self.i = src; - return self.fail("unrecognized identifier: {s}", .{bad_name}); - } else { - const declval = try self.arena.allocator.create(Inst.DeclVal); - declval.* = .{ - .base = .{ - .src = src, - .tag = Inst.DeclVal.base_tag, - }, - .positionals = .{ .name = ident }, - .kw_args = .{}, - }; - return &declval.base; - } - }; - } - - fn generateName(self: *Parser) ![]u8 { - const result = try std.fmt.allocPrint(&self.arena.allocator, "unnamed${d}", .{self.unnamed_index}); - self.unnamed_index += 1; - return result; - } -}; - -pub fn emit(allocator: *Allocator, old_module: *IrModule) !Module { - var ctx: EmitZIR = .{ - .allocator = allocator, - .decls = .{}, - .arena = std.heap.ArenaAllocator.init(allocator), - .old_module = old_module, - .next_auto_name = 0, - .names = std.StringArrayHashMap(void).init(allocator), - .primitive_table = std.AutoHashMap(Inst.Primitive.Builtin, *Decl).init(allocator), - .indent = 0, - .block_table = std.AutoHashMap(*ir.Inst.Block, *Inst.Block).init(allocator), - .loop_table = std.AutoHashMap(*ir.Inst.Loop, *Inst.Loop).init(allocator), - .metadata = std.AutoHashMap(*Inst, Module.MetaData).init(allocator), - .body_metadata = std.AutoHashMap(*Module.Body, Module.BodyMetaData).init(allocator), - }; - errdefer ctx.metadata.deinit(); - errdefer ctx.body_metadata.deinit(); - defer ctx.block_table.deinit(); - defer ctx.loop_table.deinit(); - defer ctx.decls.deinit(allocator); - defer ctx.names.deinit(); - defer ctx.primitive_table.deinit(); - errdefer ctx.arena.deinit(); - - try ctx.emit(); - - return Module{ - .decls = ctx.decls.toOwnedSlice(allocator), - .arena = ctx.arena, - .metadata = ctx.metadata, - .body_metadata = ctx.body_metadata, - }; -} - /// For debugging purposes, prints a function representation to stderr. pub fn dumpFn(old_module: IrModule, module_fn: *IrModule.Fn) void { const allocator = old_module.gpa; @@ -2374,1052 +1856,14 @@ const DumpTzir = struct { } }; -const EmitZIR = struct { - allocator: *Allocator, - arena: std.heap.ArenaAllocator, - old_module: *const IrModule, - decls: std.ArrayListUnmanaged(*Decl), - names: std.StringArrayHashMap(void), - next_auto_name: usize, - primitive_table: std.AutoHashMap(Inst.Primitive.Builtin, *Decl), - indent: usize, - block_table: std.AutoHashMap(*ir.Inst.Block, *Inst.Block), - loop_table: std.AutoHashMap(*ir.Inst.Loop, *Inst.Loop), - metadata: std.AutoHashMap(*Inst, Module.MetaData), - body_metadata: std.AutoHashMap(*Module.Body, Module.BodyMetaData), - - fn emit(self: *EmitZIR) !void { - // Put all the Decls in a list and sort them by name to avoid nondeterminism introduced - // by the hash table. - var src_decls = std.ArrayList(*IrModule.Decl).init(self.allocator); - defer src_decls.deinit(); - try src_decls.ensureCapacity(self.old_module.decl_table.items().len); - try self.decls.ensureCapacity(self.allocator, self.old_module.decl_table.items().len); - try self.names.ensureCapacity(self.old_module.decl_table.items().len); - - for (self.old_module.decl_table.items()) |entry| { - const decl = entry.value; - src_decls.appendAssumeCapacity(decl); - self.names.putAssumeCapacityNoClobber(mem.spanZ(decl.name), {}); - } - std.sort.sort(*IrModule.Decl, src_decls.items, {}, (struct { - fn lessThan(context: void, a: *IrModule.Decl, b: *IrModule.Decl) bool { - return a.src_index < b.src_index; - } - }).lessThan); - - // Emit all the decls. - for (src_decls.items) |ir_decl| { - switch (ir_decl.analysis) { - .unreferenced => continue, - - .complete => {}, - .codegen_failure => {}, // We still can emit the ZIR. - .codegen_failure_retryable => {}, // We still can emit the ZIR. - - .in_progress => unreachable, - .outdated => unreachable, - - .sema_failure, - .sema_failure_retryable, - .dependency_failure, - => if (self.old_module.failed_decls.get(ir_decl)) |err_msg| { - const fail_inst = try self.arena.allocator.create(Inst.UnOp); - fail_inst.* = .{ - .base = .{ - .src = ir_decl.src(), - .tag = .compileerror, - }, - .positionals = .{ - .operand = blk: { - const msg_str = try self.arena.allocator.dupe(u8, err_msg.msg); - - const str_inst = try self.arena.allocator.create(Inst.Str); - str_inst.* = .{ - .base = .{ - .src = ir_decl.src(), - .tag = Inst.Str.base_tag, - }, - .positionals = .{ - .bytes = err_msg.msg, - }, - .kw_args = .{}, - }; - break :blk &str_inst.base; - }, - }, - .kw_args = .{}, - }; - const decl = try self.arena.allocator.create(Decl); - decl.* = .{ - .name = mem.spanZ(ir_decl.name), - .contents_hash = undefined, - .inst = &fail_inst.base, - }; - try self.decls.append(self.allocator, decl); - continue; - }, - } - if (self.old_module.export_owners.get(ir_decl)) |exports| { - for (exports) |module_export| { - const symbol_name = try self.emitStringLiteral(module_export.src, module_export.options.name); - const export_inst = try self.arena.allocator.create(Inst.Export); - export_inst.* = .{ - .base = .{ - .src = module_export.src, - .tag = Inst.Export.base_tag, - }, - .positionals = .{ - .symbol_name = symbol_name.inst, - .decl_name = mem.spanZ(module_export.exported_decl.name), - }, - .kw_args = .{}, - }; - _ = try self.emitUnnamedDecl(&export_inst.base); - } - } - const new_decl = try self.emitTypedValue(ir_decl.src(), ir_decl.typed_value.most_recent.typed_value); - new_decl.name = try self.arena.allocator.dupe(u8, mem.spanZ(ir_decl.name)); - } - } - - const ZirBody = struct { - inst_table: *std.AutoHashMap(*ir.Inst, *Inst), - instructions: *std.ArrayList(*Inst), - }; - - fn resolveInst(self: *EmitZIR, new_body: ZirBody, inst: *ir.Inst) !*Inst { - if (inst.cast(ir.Inst.Constant)) |const_inst| { - const new_inst = if (const_inst.val.castTag(.function)) |func_pl| blk: { - const owner_decl = func_pl.data.owner_decl; - break :blk try self.emitDeclVal(inst.src, mem.spanZ(owner_decl.name)); - } else if (const_inst.val.castTag(.decl_ref)) |declref| blk: { - const decl_ref = try self.emitDeclRef(inst.src, declref.data); - try new_body.instructions.append(decl_ref); - break :blk decl_ref; - } else if (const_inst.val.castTag(.variable)) |var_pl| blk: { - const owner_decl = var_pl.data.owner_decl; - break :blk try self.emitDeclVal(inst.src, mem.spanZ(owner_decl.name)); - } else blk: { - break :blk (try self.emitTypedValue(inst.src, .{ .ty = inst.ty, .val = const_inst.val })).inst; - }; - _ = try new_body.inst_table.put(inst, new_inst); - return new_inst; - } else { - return new_body.inst_table.get(inst).?; - } - } - - fn emitDeclVal(self: *EmitZIR, src: usize, decl_name: []const u8) !*Inst { - const declval = try self.arena.allocator.create(Inst.DeclVal); - declval.* = .{ - .base = .{ - .src = src, - .tag = Inst.DeclVal.base_tag, - }, - .positionals = .{ .name = try self.arena.allocator.dupe(u8, decl_name) }, - .kw_args = .{}, - }; - return &declval.base; - } - - fn emitComptimeIntVal(self: *EmitZIR, src: usize, val: Value) !*Decl { - const big_int_space = try self.arena.allocator.create(Value.BigIntSpace); - const int_inst = try self.arena.allocator.create(Inst.Int); - int_inst.* = .{ - .base = .{ - .src = src, - .tag = Inst.Int.base_tag, - }, - .positionals = .{ - .int = val.toBigInt(big_int_space), - }, - .kw_args = .{}, - }; - return self.emitUnnamedDecl(&int_inst.base); - } - - fn emitDeclRef(self: *EmitZIR, src: usize, module_decl: *IrModule.Decl) !*Inst { - const declref_inst = try self.arena.allocator.create(Inst.DeclRef); - declref_inst.* = .{ - .base = .{ - .src = src, - .tag = Inst.DeclRef.base_tag, - }, - .positionals = .{ - .name = mem.spanZ(module_decl.name), - }, - .kw_args = .{}, - }; - return &declref_inst.base; - } - - fn emitFn(self: *EmitZIR, module_fn: *IrModule.Fn, src: usize, ty: Type) Allocator.Error!*Decl { - var inst_table = std.AutoHashMap(*ir.Inst, *Inst).init(self.allocator); - defer inst_table.deinit(); - - var instructions = std.ArrayList(*Inst).init(self.allocator); - defer instructions.deinit(); - - switch (module_fn.state) { - .queued => unreachable, - .in_progress => unreachable, - .inline_only => unreachable, - .success => { - try self.emitBody(module_fn.body, &inst_table, &instructions); - }, - .sema_failure => { - const err_msg = self.old_module.failed_decls.get(module_fn.owner_decl).?; - const fail_inst = try self.arena.allocator.create(Inst.UnOp); - fail_inst.* = .{ - .base = .{ - .src = src, - .tag = .compileerror, - }, - .positionals = .{ - .operand = blk: { - const msg_str = try self.arena.allocator.dupe(u8, err_msg.msg); - - const str_inst = try self.arena.allocator.create(Inst.Str); - str_inst.* = .{ - .base = .{ - .src = src, - .tag = Inst.Str.base_tag, - }, - .positionals = .{ - .bytes = msg_str, - }, - .kw_args = .{}, - }; - break :blk &str_inst.base; - }, - }, - .kw_args = .{}, - }; - try instructions.append(&fail_inst.base); - }, - .dependency_failure => { - const fail_inst = try self.arena.allocator.create(Inst.UnOp); - fail_inst.* = .{ - .base = .{ - .src = src, - .tag = .compileerror, - }, - .positionals = .{ - .operand = blk: { - const msg_str = try self.arena.allocator.dupe(u8, "depends on another failed Decl"); - - const str_inst = try self.arena.allocator.create(Inst.Str); - str_inst.* = .{ - .base = .{ - .src = src, - .tag = Inst.Str.base_tag, - }, - .positionals = .{ - .bytes = msg_str, - }, - .kw_args = .{}, - }; - break :blk &str_inst.base; - }, - }, - .kw_args = .{}, - }; - try instructions.append(&fail_inst.base); - }, - } - - const fn_type = try self.emitType(src, ty); - - const arena_instrs = try self.arena.allocator.alloc(*Inst, instructions.items.len); - mem.copy(*Inst, arena_instrs, instructions.items); - - const fn_inst = try self.arena.allocator.create(Inst.Fn); - fn_inst.* = .{ - .base = .{ - .src = src, - .tag = Inst.Fn.base_tag, - }, - .positionals = .{ - .fn_type = fn_type.inst, - .body = .{ .instructions = arena_instrs }, - }, - .kw_args = .{ - .is_inline = module_fn.state == .inline_only, - }, - }; - return self.emitUnnamedDecl(&fn_inst.base); - } - - fn emitTypedValue(self: *EmitZIR, src: usize, typed_value: TypedValue) Allocator.Error!*Decl { - const allocator = &self.arena.allocator; - if (typed_value.val.castTag(.decl_ref)) |decl_ref| { - const decl = decl_ref.data; - return try self.emitUnnamedDecl(try self.emitDeclRef(src, decl)); - } else if (typed_value.val.castTag(.variable)) |variable| { - return self.emitTypedValue(src, .{ - .ty = typed_value.ty, - .val = variable.data.init, - }); - } - if (typed_value.val.isUndef()) { - const as_inst = try self.arena.allocator.create(Inst.BinOp); - as_inst.* = .{ - .base = .{ - .tag = .as, - .src = src, - }, - .positionals = .{ - .lhs = (try self.emitType(src, typed_value.ty)).inst, - .rhs = (try self.emitPrimitive(src, .@"undefined")).inst, - }, - .kw_args = .{}, - }; - return self.emitUnnamedDecl(&as_inst.base); - } - switch (typed_value.ty.zigTypeTag()) { - .Pointer => { - const ptr_elem_type = typed_value.ty.elemType(); - switch (ptr_elem_type.zigTypeTag()) { - .Array => { - // TODO more checks to make sure this can be emitted as a string literal - //const array_elem_type = ptr_elem_type.elemType(); - //if (array_elem_type.eql(Type.initTag(.u8)) and - // ptr_elem_type.hasSentinel(Value.initTag(.zero))) - //{ - //} - const bytes = typed_value.val.toAllocatedBytes(allocator) catch |err| switch (err) { - error.AnalysisFail => unreachable, - else => |e| return e, - }; - return self.emitStringLiteral(src, bytes); - }, - else => |t| std.debug.panic("TODO implement emitTypedValue for pointer to {s}", .{@tagName(t)}), - } - }, - .ComptimeInt => return self.emitComptimeIntVal(src, typed_value.val), - .Int => { - const as_inst = try self.arena.allocator.create(Inst.BinOp); - as_inst.* = .{ - .base = .{ - .tag = .as, - .src = src, - }, - .positionals = .{ - .lhs = (try self.emitType(src, typed_value.ty)).inst, - .rhs = (try self.emitComptimeIntVal(src, typed_value.val)).inst, - }, - .kw_args = .{}, - }; - return self.emitUnnamedDecl(&as_inst.base); - }, - .Type => { - const ty = try typed_value.val.toType(&self.arena.allocator); - return self.emitType(src, ty); - }, - .Fn => { - const module_fn = typed_value.val.castTag(.function).?.data; - return self.emitFn(module_fn, src, typed_value.ty); - }, - .Array => { - // TODO more checks to make sure this can be emitted as a string literal - //const array_elem_type = ptr_elem_type.elemType(); - //if (array_elem_type.eql(Type.initTag(.u8)) and - // ptr_elem_type.hasSentinel(Value.initTag(.zero))) - //{ - //} - const bytes = typed_value.val.toAllocatedBytes(allocator) catch |err| switch (err) { - error.AnalysisFail => unreachable, - else => |e| return e, - }; - const str_inst = try self.arena.allocator.create(Inst.Str); - str_inst.* = .{ - .base = .{ - .src = src, - .tag = Inst.Str.base_tag, - }, - .positionals = .{ - .bytes = bytes, - }, - .kw_args = .{}, - }; - return self.emitUnnamedDecl(&str_inst.base); - }, - .Void => return self.emitPrimitive(src, .void_value), - .Bool => if (typed_value.val.toBool()) - return self.emitPrimitive(src, .@"true") - else - return self.emitPrimitive(src, .@"false"), - .EnumLiteral => { - const enum_literal = typed_value.val.castTag(.enum_literal).?; - const inst = try self.arena.allocator.create(Inst.Str); - inst.* = .{ - .base = .{ - .src = src, - .tag = .enum_literal, - }, - .positionals = .{ - .bytes = enum_literal.data, - }, - .kw_args = .{}, - }; - return self.emitUnnamedDecl(&inst.base); - }, - else => |t| std.debug.panic("TODO implement emitTypedValue for {s}", .{@tagName(t)}), - } - } - - fn emitNoOp(self: *EmitZIR, src: usize, old_inst: *ir.Inst.NoOp, tag: Inst.Tag) Allocator.Error!*Inst { - const new_inst = try self.arena.allocator.create(Inst.NoOp); - new_inst.* = .{ - .base = .{ - .src = src, - .tag = tag, - }, - .positionals = .{}, - .kw_args = .{}, - }; - return &new_inst.base; - } - - fn emitUnOp( - self: *EmitZIR, - src: usize, - new_body: ZirBody, - old_inst: *ir.Inst.UnOp, - tag: Inst.Tag, - ) Allocator.Error!*Inst { - const new_inst = try self.arena.allocator.create(Inst.UnOp); - new_inst.* = .{ - .base = .{ - .src = src, - .tag = tag, - }, - .positionals = .{ - .operand = try self.resolveInst(new_body, old_inst.operand), - }, - .kw_args = .{}, - }; - return &new_inst.base; - } - - fn emitBinOp( - self: *EmitZIR, - src: usize, - new_body: ZirBody, - old_inst: *ir.Inst.BinOp, - tag: Inst.Tag, - ) Allocator.Error!*Inst { - const new_inst = try self.arena.allocator.create(Inst.BinOp); - new_inst.* = .{ - .base = .{ - .src = src, - .tag = tag, - }, - .positionals = .{ - .lhs = try self.resolveInst(new_body, old_inst.lhs), - .rhs = try self.resolveInst(new_body, old_inst.rhs), - }, - .kw_args = .{}, - }; - return &new_inst.base; - } - - fn emitCast( - self: *EmitZIR, - src: usize, - new_body: ZirBody, - old_inst: *ir.Inst.UnOp, - tag: Inst.Tag, - ) Allocator.Error!*Inst { - const new_inst = try self.arena.allocator.create(Inst.BinOp); - new_inst.* = .{ - .base = .{ - .src = src, - .tag = tag, - }, - .positionals = .{ - .lhs = (try self.emitType(old_inst.base.src, old_inst.base.ty)).inst, - .rhs = try self.resolveInst(new_body, old_inst.operand), - }, - .kw_args = .{}, - }; - return &new_inst.base; - } - - fn emitBody( - self: *EmitZIR, - body: ir.Body, - inst_table: *std.AutoHashMap(*ir.Inst, *Inst), - instructions: *std.ArrayList(*Inst), - ) Allocator.Error!void { - const new_body = ZirBody{ - .inst_table = inst_table, - .instructions = instructions, - }; - for (body.instructions) |inst| { - const new_inst = switch (inst.tag) { - .constant => unreachable, // excluded from function bodies - - .breakpoint => try self.emitNoOp(inst.src, inst.castTag(.breakpoint).?, .breakpoint), - .unreach => try self.emitNoOp(inst.src, inst.castTag(.unreach).?, .unreach_nocheck), - .retvoid => try self.emitNoOp(inst.src, inst.castTag(.retvoid).?, .returnvoid), - .dbg_stmt => try self.emitNoOp(inst.src, inst.castTag(.dbg_stmt).?, .dbg_stmt), - - .not => try self.emitUnOp(inst.src, new_body, inst.castTag(.not).?, .boolnot), - .ret => try self.emitUnOp(inst.src, new_body, inst.castTag(.ret).?, .@"return"), - .ptrtoint => try self.emitUnOp(inst.src, new_body, inst.castTag(.ptrtoint).?, .ptrtoint), - .isnull => try self.emitUnOp(inst.src, new_body, inst.castTag(.isnull).?, .isnull), - .isnonnull => try self.emitUnOp(inst.src, new_body, inst.castTag(.isnonnull).?, .isnonnull), - .iserr => try self.emitUnOp(inst.src, new_body, inst.castTag(.iserr).?, .iserr), - .load => try self.emitUnOp(inst.src, new_body, inst.castTag(.load).?, .deref), - .ref => try self.emitUnOp(inst.src, new_body, inst.castTag(.ref).?, .ref), - .unwrap_optional => try self.emitUnOp(inst.src, new_body, inst.castTag(.unwrap_optional).?, .unwrap_optional_unsafe), - .wrap_optional => try self.emitCast(inst.src, new_body, inst.castTag(.wrap_optional).?, .as), - - .add => try self.emitBinOp(inst.src, new_body, inst.castTag(.add).?, .add), - .sub => try self.emitBinOp(inst.src, new_body, inst.castTag(.sub).?, .sub), - .store => try self.emitBinOp(inst.src, new_body, inst.castTag(.store).?, .store), - .cmp_lt => try self.emitBinOp(inst.src, new_body, inst.castTag(.cmp_lt).?, .cmp_lt), - .cmp_lte => try self.emitBinOp(inst.src, new_body, inst.castTag(.cmp_lte).?, .cmp_lte), - .cmp_eq => try self.emitBinOp(inst.src, new_body, inst.castTag(.cmp_eq).?, .cmp_eq), - .cmp_gte => try self.emitBinOp(inst.src, new_body, inst.castTag(.cmp_gte).?, .cmp_gte), - .cmp_gt => try self.emitBinOp(inst.src, new_body, inst.castTag(.cmp_gt).?, .cmp_gt), - .cmp_neq => try self.emitBinOp(inst.src, new_body, inst.castTag(.cmp_neq).?, .cmp_neq), - .booland => try self.emitBinOp(inst.src, new_body, inst.castTag(.booland).?, .booland), - .boolor => try self.emitBinOp(inst.src, new_body, inst.castTag(.boolor).?, .boolor), - .bitand => try self.emitBinOp(inst.src, new_body, inst.castTag(.bitand).?, .bitand), - .bitor => try self.emitBinOp(inst.src, new_body, inst.castTag(.bitor).?, .bitor), - .xor => try self.emitBinOp(inst.src, new_body, inst.castTag(.xor).?, .xor), - - .bitcast => try self.emitCast(inst.src, new_body, inst.castTag(.bitcast).?, .bitcast), - .intcast => try self.emitCast(inst.src, new_body, inst.castTag(.intcast).?, .intcast), - .floatcast => try self.emitCast(inst.src, new_body, inst.castTag(.floatcast).?, .floatcast), - - .alloc => blk: { - const new_inst = try self.arena.allocator.create(Inst.UnOp); - new_inst.* = .{ - .base = .{ - .src = inst.src, - .tag = .alloc, - }, - .positionals = .{ - .operand = (try self.emitType(inst.src, inst.ty)).inst, - }, - .kw_args = .{}, - }; - break :blk &new_inst.base; - }, - - .arg => blk: { - const old_inst = inst.castTag(.arg).?; - const new_inst = try self.arena.allocator.create(Inst.Arg); - new_inst.* = .{ - .base = .{ - .src = inst.src, - .tag = .arg, - }, - .positionals = .{ - .name = try self.arena.allocator.dupe(u8, mem.spanZ(old_inst.name)), - }, - .kw_args = .{}, - }; - break :blk &new_inst.base; - }, - - .block => blk: { - const old_inst = inst.castTag(.block).?; - const new_inst = try self.arena.allocator.create(Inst.Block); - - try self.block_table.put(old_inst, new_inst); - - var block_body = std.ArrayList(*Inst).init(self.allocator); - defer block_body.deinit(); - - try self.emitBody(old_inst.body, inst_table, &block_body); - - new_inst.* = .{ - .base = .{ - .src = inst.src, - .tag = Inst.Block.base_tag, - }, - .positionals = .{ - .body = .{ .instructions = block_body.toOwnedSlice() }, - }, - .kw_args = .{}, - }; - - break :blk &new_inst.base; - }, - - .loop => blk: { - const old_inst = inst.castTag(.loop).?; - const new_inst = try self.arena.allocator.create(Inst.Loop); - - try self.loop_table.put(old_inst, new_inst); - - var loop_body = std.ArrayList(*Inst).init(self.allocator); - defer loop_body.deinit(); - - try self.emitBody(old_inst.body, inst_table, &loop_body); - - new_inst.* = .{ - .base = .{ - .src = inst.src, - .tag = Inst.Loop.base_tag, - }, - .positionals = .{ - .body = .{ .instructions = loop_body.toOwnedSlice() }, - }, - .kw_args = .{}, - }; - - break :blk &new_inst.base; - }, - - .brvoid => blk: { - const old_inst = inst.cast(ir.Inst.BrVoid).?; - const new_block = self.block_table.get(old_inst.block).?; - const new_inst = try self.arena.allocator.create(Inst.BreakVoid); - new_inst.* = .{ - .base = .{ - .src = inst.src, - .tag = Inst.BreakVoid.base_tag, - }, - .positionals = .{ - .block = new_block, - }, - .kw_args = .{}, - }; - break :blk &new_inst.base; - }, - - .br => blk: { - const old_inst = inst.castTag(.br).?; - const new_block = self.block_table.get(old_inst.block).?; - const new_inst = try self.arena.allocator.create(Inst.Break); - new_inst.* = .{ - .base = .{ - .src = inst.src, - .tag = Inst.Break.base_tag, - }, - .positionals = .{ - .block = new_block, - .operand = try self.resolveInst(new_body, old_inst.operand), - }, - .kw_args = .{}, - }; - break :blk &new_inst.base; - }, - - .call => blk: { - const old_inst = inst.castTag(.call).?; - const new_inst = try self.arena.allocator.create(Inst.Call); - - const args = try self.arena.allocator.alloc(*Inst, old_inst.args.len); - for (args) |*elem, i| { - elem.* = try self.resolveInst(new_body, old_inst.args[i]); - } - new_inst.* = .{ - .base = .{ - .src = inst.src, - .tag = Inst.Call.base_tag, - }, - .positionals = .{ - .func = try self.resolveInst(new_body, old_inst.func), - .args = args, - }, - .kw_args = .{}, - }; - break :blk &new_inst.base; - }, - - .assembly => blk: { - const old_inst = inst.castTag(.assembly).?; - const new_inst = try self.arena.allocator.create(Inst.Asm); - - const inputs = try self.arena.allocator.alloc(*Inst, old_inst.inputs.len); - for (inputs) |*elem, i| { - elem.* = (try self.emitStringLiteral(inst.src, old_inst.inputs[i])).inst; - } - - const clobbers = try self.arena.allocator.alloc(*Inst, old_inst.clobbers.len); - for (clobbers) |*elem, i| { - elem.* = (try self.emitStringLiteral(inst.src, old_inst.clobbers[i])).inst; - } - - const args = try self.arena.allocator.alloc(*Inst, old_inst.args.len); - for (args) |*elem, i| { - elem.* = try self.resolveInst(new_body, old_inst.args[i]); - } - - new_inst.* = .{ - .base = .{ - .src = inst.src, - .tag = Inst.Asm.base_tag, - }, - .positionals = .{ - .asm_source = (try self.emitStringLiteral(inst.src, old_inst.asm_source)).inst, - .return_type = (try self.emitType(inst.src, inst.ty)).inst, - }, - .kw_args = .{ - .@"volatile" = old_inst.is_volatile, - .output = if (old_inst.output) |o| - (try self.emitStringLiteral(inst.src, o)).inst - else - null, - .inputs = inputs, - .clobbers = clobbers, - .args = args, - }, - }; - break :blk &new_inst.base; - }, - - .condbr => blk: { - const old_inst = inst.castTag(.condbr).?; - - var then_body = std.ArrayList(*Inst).init(self.allocator); - var else_body = std.ArrayList(*Inst).init(self.allocator); - - defer then_body.deinit(); - defer else_body.deinit(); - - const then_deaths = try self.arena.allocator.alloc(*Inst, old_inst.thenDeaths().len); - const else_deaths = try self.arena.allocator.alloc(*Inst, old_inst.elseDeaths().len); - - for (old_inst.thenDeaths()) |death, i| { - then_deaths[i] = try self.resolveInst(new_body, death); - } - for (old_inst.elseDeaths()) |death, i| { - else_deaths[i] = try self.resolveInst(new_body, death); - } - - try self.emitBody(old_inst.then_body, inst_table, &then_body); - try self.emitBody(old_inst.else_body, inst_table, &else_body); - - const new_inst = try self.arena.allocator.create(Inst.CondBr); - - try self.body_metadata.put(&new_inst.positionals.then_body, .{ .deaths = then_deaths }); - try self.body_metadata.put(&new_inst.positionals.else_body, .{ .deaths = else_deaths }); - - new_inst.* = .{ - .base = .{ - .src = inst.src, - .tag = Inst.CondBr.base_tag, - }, - .positionals = .{ - .condition = try self.resolveInst(new_body, old_inst.condition), - .then_body = .{ .instructions = then_body.toOwnedSlice() }, - .else_body = .{ .instructions = else_body.toOwnedSlice() }, - }, - .kw_args = .{}, - }; - break :blk &new_inst.base; - }, - .switchbr => blk: { - const old_inst = inst.castTag(.switchbr).?; - const cases = try self.arena.allocator.alloc(Inst.SwitchBr.Case, old_inst.cases.len); - const new_inst = try self.arena.allocator.create(Inst.SwitchBr); - new_inst.* = .{ - .base = .{ - .src = inst.src, - .tag = Inst.SwitchBr.base_tag, - }, - .positionals = .{ - .target_ptr = try self.resolveInst(new_body, old_inst.target_ptr), - .cases = cases, - .items = &[_]*Inst{}, // TODO this should actually be populated - .else_body = undefined, // populated below - }, - .kw_args = .{}, - }; - - var body_tmp = std.ArrayList(*Inst).init(self.allocator); - defer body_tmp.deinit(); - - for (old_inst.cases) |*case, i| { - body_tmp.items.len = 0; - - const case_deaths = try self.arena.allocator.alloc(*Inst, old_inst.caseDeaths(i).len); - for (old_inst.caseDeaths(i)) |death, j| { - case_deaths[j] = try self.resolveInst(new_body, death); - } - try self.body_metadata.put(&cases[i].body, .{ .deaths = case_deaths }); - - try self.emitBody(case.body, inst_table, &body_tmp); - const item = (try self.emitTypedValue(inst.src, .{ - .ty = old_inst.target_ptr.ty.elemType(), - .val = case.item, - })).inst; - - cases[i] = .{ - .item = item, - .body = .{ .instructions = try self.arena.allocator.dupe(*Inst, body_tmp.items) }, - }; - } - { // else - const else_deaths = try self.arena.allocator.alloc(*Inst, old_inst.elseDeaths().len); - for (old_inst.elseDeaths()) |death, j| { - else_deaths[j] = try self.resolveInst(new_body, death); - } - try self.body_metadata.put(&new_inst.positionals.else_body, .{ .deaths = else_deaths }); - - body_tmp.items.len = 0; - try self.emitBody(old_inst.else_body, inst_table, &body_tmp); - new_inst.positionals.else_body = .{ - .instructions = try self.arena.allocator.dupe(*Inst, body_tmp.items), - }; - } - - break :blk &new_inst.base; - }, - .varptr => @panic("TODO"), - }; - try self.metadata.put(new_inst, .{ - .deaths = inst.deaths, - .addr = @ptrToInt(inst), - }); - try instructions.append(new_inst); - try inst_table.put(inst, new_inst); - } - } - - fn emitType(self: *EmitZIR, src: usize, ty: Type) Allocator.Error!*Decl { - switch (ty.tag()) { - .i8 => return self.emitPrimitive(src, .i8), - .u8 => return self.emitPrimitive(src, .u8), - .i16 => return self.emitPrimitive(src, .i16), - .u16 => return self.emitPrimitive(src, .u16), - .i32 => return self.emitPrimitive(src, .i32), - .u32 => return self.emitPrimitive(src, .u32), - .i64 => return self.emitPrimitive(src, .i64), - .u64 => return self.emitPrimitive(src, .u64), - .isize => return self.emitPrimitive(src, .isize), - .usize => return self.emitPrimitive(src, .usize), - .c_short => return self.emitPrimitive(src, .c_short), - .c_ushort => return self.emitPrimitive(src, .c_ushort), - .c_int => return self.emitPrimitive(src, .c_int), - .c_uint => return self.emitPrimitive(src, .c_uint), - .c_long => return self.emitPrimitive(src, .c_long), - .c_ulong => return self.emitPrimitive(src, .c_ulong), - .c_longlong => return self.emitPrimitive(src, .c_longlong), - .c_ulonglong => return self.emitPrimitive(src, .c_ulonglong), - .c_longdouble => return self.emitPrimitive(src, .c_longdouble), - .c_void => return self.emitPrimitive(src, .c_void), - .f16 => return self.emitPrimitive(src, .f16), - .f32 => return self.emitPrimitive(src, .f32), - .f64 => return self.emitPrimitive(src, .f64), - .f128 => return self.emitPrimitive(src, .f128), - .anyerror => return self.emitPrimitive(src, .anyerror), - else => switch (ty.zigTypeTag()) { - .Bool => return self.emitPrimitive(src, .bool), - .Void => return self.emitPrimitive(src, .void), - .NoReturn => return self.emitPrimitive(src, .noreturn), - .Type => return self.emitPrimitive(src, .type), - .ComptimeInt => return self.emitPrimitive(src, .comptime_int), - .ComptimeFloat => return self.emitPrimitive(src, .comptime_float), - .Fn => { - const param_types = try self.allocator.alloc(Type, ty.fnParamLen()); - defer self.allocator.free(param_types); - - ty.fnParamTypes(param_types); - const emitted_params = try self.arena.allocator.alloc(*Inst, param_types.len); - for (param_types) |param_type, i| { - emitted_params[i] = (try self.emitType(src, param_type)).inst; - } - - const fntype_inst = try self.arena.allocator.create(Inst.FnType); - fntype_inst.* = .{ - .base = .{ - .src = src, - .tag = Inst.FnType.base_tag, - }, - .positionals = .{ - .param_types = emitted_params, - .return_type = (try self.emitType(src, ty.fnReturnType())).inst, - }, - .kw_args = .{ - .cc = ty.fnCallingConvention(), - }, - }; - return self.emitUnnamedDecl(&fntype_inst.base); - }, - .Int => { - const info = ty.intInfo(self.old_module.getTarget()); - const signed = try self.emitPrimitive(src, switch (info.signedness) { - .signed => .@"true", - .unsigned => .@"false", - }); - const bits_val = try Value.Tag.int_u64.create(&self.arena.allocator, info.bits); - const bits = try self.emitComptimeIntVal(src, bits_val); - const inttype_inst = try self.arena.allocator.create(Inst.IntType); - inttype_inst.* = .{ - .base = .{ - .src = src, - .tag = Inst.IntType.base_tag, - }, - .positionals = .{ - .signed = signed.inst, - .bits = bits.inst, - }, - .kw_args = .{}, - }; - return self.emitUnnamedDecl(&inttype_inst.base); - }, - .Pointer => { - if (ty.isSinglePointer()) { - const inst = try self.arena.allocator.create(Inst.UnOp); - const tag: Inst.Tag = if (ty.isConstPtr()) .single_const_ptr_type else .single_mut_ptr_type; - inst.* = .{ - .base = .{ - .src = src, - .tag = tag, - }, - .positionals = .{ - .operand = (try self.emitType(src, ty.elemType())).inst, - }, - .kw_args = .{}, - }; - return self.emitUnnamedDecl(&inst.base); - } else { - std.debug.panic("TODO implement emitType for {}", .{ty}); - } - }, - .Optional => { - var buf: Type.Payload.ElemType = undefined; - const inst = try self.arena.allocator.create(Inst.UnOp); - inst.* = .{ - .base = .{ - .src = src, - .tag = .optional_type, - }, - .positionals = .{ - .operand = (try self.emitType(src, ty.optionalChild(&buf))).inst, - }, - .kw_args = .{}, - }; - return self.emitUnnamedDecl(&inst.base); - }, - .Array => { - var len_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = ty.arrayLen(), - }; - const len = Value.initPayload(&len_pl.base); - - const inst = if (ty.sentinel()) |sentinel| blk: { - const inst = try self.arena.allocator.create(Inst.ArrayTypeSentinel); - inst.* = .{ - .base = .{ - .src = src, - .tag = .array_type, - }, - .positionals = .{ - .len = (try self.emitTypedValue(src, .{ - .ty = Type.initTag(.usize), - .val = len, - })).inst, - .sentinel = (try self.emitTypedValue(src, .{ - .ty = ty.elemType(), - .val = sentinel, - })).inst, - .elem_type = (try self.emitType(src, ty.elemType())).inst, - }, - .kw_args = .{}, - }; - break :blk &inst.base; - } else blk: { - const inst = try self.arena.allocator.create(Inst.BinOp); - inst.* = .{ - .base = .{ - .src = src, - .tag = .array_type, - }, - .positionals = .{ - .lhs = (try self.emitTypedValue(src, .{ - .ty = Type.initTag(.usize), - .val = len, - })).inst, - .rhs = (try self.emitType(src, ty.elemType())).inst, - }, - .kw_args = .{}, - }; - break :blk &inst.base; - }; - return self.emitUnnamedDecl(inst); - }, - else => std.debug.panic("TODO implement emitType for {}", .{ty}), - }, - } - } - - fn autoName(self: *EmitZIR) ![]u8 { - while (true) { - const proposed_name = try std.fmt.allocPrint(&self.arena.allocator, "unnamed${d}", .{self.next_auto_name}); - self.next_auto_name += 1; - const gop = try self.names.getOrPut(proposed_name); - if (!gop.found_existing) { - gop.entry.value = {}; - return proposed_name; - } - } - } - - fn emitPrimitive(self: *EmitZIR, src: usize, tag: Inst.Primitive.Builtin) !*Decl { - const gop = try self.primitive_table.getOrPut(tag); - if (!gop.found_existing) { - const primitive_inst = try self.arena.allocator.create(Inst.Primitive); - primitive_inst.* = .{ - .base = .{ - .src = src, - .tag = Inst.Primitive.base_tag, - }, - .positionals = .{ - .tag = tag, - }, - .kw_args = .{}, - }; - gop.entry.value = try self.emitUnnamedDecl(&primitive_inst.base); - } - return gop.entry.value; - } - - fn emitStringLiteral(self: *EmitZIR, src: usize, str: []const u8) !*Decl { - const str_inst = try self.arena.allocator.create(Inst.Str); - str_inst.* = .{ - .base = .{ - .src = src, - .tag = Inst.Str.base_tag, - }, - .positionals = .{ - .bytes = str, - }, - .kw_args = .{}, - }; - return self.emitUnnamedDecl(&str_inst.base); - } - - fn emitUnnamedDecl(self: *EmitZIR, inst: *Inst) !*Decl { - const decl = try self.arena.allocator.create(Decl); - decl.* = .{ - .name = try self.autoName(), - .contents_hash = undefined, - .inst = inst, - }; - try self.decls.append(self.allocator, decl); - return decl; - } -}; - /// For debugging purposes, like dumpFn but for unanalyzed zir blocks pub fn dumpZir(allocator: *Allocator, kind: []const u8, decl_name: [*:0]const u8, instructions: []*Inst) !void { var fib = std.heap.FixedBufferAllocator.init(&[_]u8{}); var module = Module{ - .decls = &[_]*Decl{}, + .decls = &[_]*Module.Decl{}, .arena = std.heap.ArenaAllocator.init(&fib.allocator), .metadata = std.AutoHashMap(*Inst, Module.MetaData).init(&fib.allocator), - .body_metadata = std.AutoHashMap(*Module.Body, Module.BodyMetaData).init(&fib.allocator), + .body_metadata = std.AutoHashMap(*Body, Module.BodyMetaData).init(&fib.allocator), }; var write = Writer{ .module = &module, diff --git a/src/zir_sema.zig b/src/zir_sema.zig index 6302ab29e4..36eb5f4239 100644 --- a/src/zir_sema.zig +++ b/src/zir_sema.zig @@ -63,7 +63,6 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError! .declref => return analyzeInstDeclRef(mod, scope, old_inst.castTag(.declref).?), .declref_str => return analyzeInstDeclRefStr(mod, scope, old_inst.castTag(.declref_str).?), .declval => return analyzeInstDeclVal(mod, scope, old_inst.castTag(.declval).?), - .declval_in_module => return analyzeInstDeclValInModule(mod, scope, old_inst.castTag(.declval_in_module).?), .ensure_result_used => return analyzeInstEnsureResultUsed(mod, scope, old_inst.castTag(.ensure_result_used).?), .ensure_result_non_error => return analyzeInstEnsureResultNonError(mod, scope, old_inst.castTag(.ensure_result_non_error).?), .ensure_indexable => return analyzeInstEnsureIndexable(mod, scope, old_inst.castTag(.ensure_indexable).?), @@ -166,7 +165,7 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError! } } -pub fn analyzeBody(mod: *Module, block: *Scope.Block, body: zir.Module.Body) !void { +pub fn analyzeBody(mod: *Module, block: *Scope.Block, body: zir.Body) !void { const tracy = trace(@src()); defer tracy.end(); @@ -183,7 +182,7 @@ pub fn analyzeBodyValueAsType( mod: *Module, block_scope: *Scope.Block, zir_result_inst: *zir.Inst, - body: zir.Module.Body, + body: zir.Body, ) !Type { try analyzeBody(mod, block_scope, body); const result_inst = block_scope.inst_table.get(zir_result_inst).?; @@ -191,84 +190,6 @@ pub fn analyzeBodyValueAsType( return val.toType(block_scope.base.arena()); } -pub fn analyzeZirDecl(mod: *Module, decl: *Decl, src_decl: *zir.Decl) InnerError!bool { - var decl_scope: Scope.DeclAnalysis = .{ - .decl = decl, - .arena = std.heap.ArenaAllocator.init(mod.gpa), - }; - errdefer decl_scope.arena.deinit(); - - decl.analysis = .in_progress; - - const typed_value = try analyzeConstInst(mod, &decl_scope.base, src_decl.inst); - const arena_state = try decl_scope.arena.allocator.create(std.heap.ArenaAllocator.State); - - var prev_type_has_bits = false; - var type_changed = true; - - if (decl.typedValueManaged()) |tvm| { - prev_type_has_bits = tvm.typed_value.ty.hasCodeGenBits(); - type_changed = !tvm.typed_value.ty.eql(typed_value.ty); - - tvm.deinit(mod.gpa); - } - - arena_state.* = decl_scope.arena.state; - decl.typed_value = .{ - .most_recent = .{ - .typed_value = typed_value, - .arena = arena_state, - }, - }; - decl.analysis = .complete; - decl.generation = mod.generation; - if (typed_value.ty.hasCodeGenBits()) { - // We don't fully codegen the decl until later, but we do need to reserve a global - // offset table index for it. This allows us to codegen decls out of dependency order, - // increasing how many computations can be done in parallel. - try mod.comp.bin_file.allocateDeclIndexes(decl); - try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl }); - } else if (prev_type_has_bits) { - mod.comp.bin_file.freeDecl(decl); - } - - return type_changed; -} - -pub fn resolveZirDecl(mod: *Module, scope: *Scope, src_decl: *zir.Decl) InnerError!*Decl { - const zir_module = mod.root_scope.cast(Scope.ZIRModule).?; - const entry = zir_module.contents.module.findDecl(src_decl.name).?; - return resolveZirDeclHavingIndex(mod, scope, src_decl, entry.index); -} - -fn resolveZirDeclHavingIndex(mod: *Module, scope: *Scope, src_decl: *zir.Decl, src_index: usize) InnerError!*Decl { - const name_hash = scope.namespace().fullyQualifiedNameHash(src_decl.name); - const decl = mod.decl_table.get(name_hash).?; - decl.src_index = src_index; - try mod.ensureDeclAnalyzed(decl); - return decl; -} - -/// Declares a dependency on the decl. -fn resolveCompleteZirDecl(mod: *Module, scope: *Scope, src_decl: *zir.Decl) InnerError!*Decl { - const decl = try resolveZirDecl(mod, scope, src_decl); - switch (decl.analysis) { - .unreferenced => unreachable, - .in_progress => unreachable, - .outdated => unreachable, - - .dependency_failure, - .sema_failure, - .sema_failure_retryable, - .codegen_failure, - .codegen_failure_retryable, - => return error.AnalysisFail, - - .complete => {}, - } - return decl; -} - pub fn resolveInst(mod: *Module, scope: *Scope, zir_inst: *zir.Inst) InnerError!*Inst { const block = scope.cast(Scope.Block).?; return block.inst_table.get(zir_inst).?; // Instruction does not dominate all uses! @@ -640,22 +561,28 @@ fn analyzeInstCompileError(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) In } fn analyzeInstCompileLog(mod: *Module, scope: *Scope, inst: *zir.Inst.CompileLog) InnerError!*Inst { - std.debug.print("| ", .{}); - for (inst.positionals.to_log) |item, i| { - const to_log = try resolveInst(mod, scope, item); - if (to_log.value()) |val| { - std.debug.print("{}", .{val}); + var managed = mod.compile_log_text.toManaged(mod.gpa); + defer mod.compile_log_text = managed.moveToUnmanaged(); + const writer = managed.writer(); + + for (inst.positionals.to_log) |arg_inst, i| { + if (i != 0) try writer.print(", ", .{}); + + const arg = try resolveInst(mod, scope, arg_inst); + if (arg.value()) |val| { + try writer.print("@as({}, {})", .{ arg.ty, val }); } else { - std.debug.print("(runtime value)", .{}); + try writer.print("@as({}, [runtime value])", .{arg.ty}); } - if (i != inst.positionals.to_log.len - 1) std.debug.print(", ", .{}); } - std.debug.print("\n", .{}); - if (!inst.kw_args.seen) { + try writer.print("\n", .{}); - // so that we do not give multiple compile errors if it gets evaled twice - inst.kw_args.seen = true; - try mod.failCompileLog(scope, inst.base.src); + const gop = try mod.compile_log_decls.getOrPut(mod.gpa, scope.ownerDecl().?); + if (!gop.found_existing) { + gop.entry.value = .{ + .file_scope = scope.getFileScope(), + .byte_offset = inst.base.src, + }; } return mod.constVoid(scope, inst.base.src); } @@ -705,7 +632,8 @@ fn analyzeInstLoop(mod: *Module, scope: *Scope, inst: *zir.Inst.Loop) InnerError .parent = parent_block, .inst_table = parent_block.inst_table, .func = parent_block.func, - .decl = parent_block.decl, + .owner_decl = parent_block.owner_decl, + .src_decl = parent_block.src_decl, .instructions = .{}, .arena = parent_block.arena, .inlining = parent_block.inlining, @@ -732,7 +660,8 @@ fn analyzeInstBlockFlat(mod: *Module, scope: *Scope, inst: *zir.Inst.Block, is_c .parent = parent_block, .inst_table = parent_block.inst_table, .func = parent_block.func, - .decl = parent_block.decl, + .owner_decl = parent_block.owner_decl, + .src_decl = parent_block.src_decl, .instructions = .{}, .arena = parent_block.arena, .label = null, @@ -744,13 +673,14 @@ fn analyzeInstBlockFlat(mod: *Module, scope: *Scope, inst: *zir.Inst.Block, is_c try analyzeBody(mod, &child_block, inst.positionals.body); - try parent_block.instructions.appendSlice(mod.gpa, child_block.instructions.items); - - // comptime blocks won't generate any runtime values - if (child_block.instructions.items.len == 0) - return mod.constVoid(scope, inst.base.src); + // Move the analyzed instructions into the parent block arena. + const copied_instructions = try parent_block.arena.dupe(*Inst, child_block.instructions.items); + try parent_block.instructions.appendSlice(mod.gpa, copied_instructions); - return parent_block.instructions.items[parent_block.instructions.items.len - 1]; + // The result of a flat block is the last instruction. + const zir_inst_list = inst.positionals.body.instructions; + const last_zir_inst = zir_inst_list[zir_inst_list.len - 1]; + return resolveInst(mod, scope, last_zir_inst); } fn analyzeInstBlock(mod: *Module, scope: *Scope, inst: *zir.Inst.Block, is_comptime: bool) InnerError!*Inst { @@ -775,7 +705,8 @@ fn analyzeInstBlock(mod: *Module, scope: *Scope, inst: *zir.Inst.Block, is_compt .parent = parent_block, .inst_table = parent_block.inst_table, .func = parent_block.func, - .decl = parent_block.decl, + .owner_decl = parent_block.owner_decl, + .src_decl = parent_block.src_decl, .instructions = .{}, .arena = parent_block.arena, // TODO @as here is working around a stage1 miscompilation bug :( @@ -890,22 +821,15 @@ fn analyzeInstDeclRefStr(mod: *Module, scope: *Scope, inst: *zir.Inst.DeclRefStr fn analyzeInstDeclRef(mod: *Module, scope: *Scope, inst: *zir.Inst.DeclRef) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.analyzeDeclRefByName(scope, inst.base.src, inst.positionals.name); + return mod.analyzeDeclRef(scope, inst.base.src, inst.positionals.decl); } fn analyzeInstDeclVal(mod: *Module, scope: *Scope, inst: *zir.Inst.DeclVal) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - const decl = try analyzeDeclVal(mod, scope, inst); - const ptr = try mod.analyzeDeclRef(scope, inst.base.src, decl); - return mod.analyzeDeref(scope, inst.base.src, ptr, inst.base.src); -} - -fn analyzeInstDeclValInModule(mod: *Module, scope: *Scope, inst: *zir.Inst.DeclValInModule) InnerError!*Inst { - const tracy = trace(@src()); - defer tracy.end(); - const decl = inst.positionals.decl; - return mod.analyzeDeclRef(scope, inst.base.src, decl); + const decl_ref = try mod.analyzeDeclRef(scope, inst.base.src, inst.positionals.decl); + // TODO look into avoiding the call to analyzeDeref here + return mod.analyzeDeref(scope, inst.base.src, decl_ref, inst.base.src); } fn analyzeInstCall(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError!*Inst { @@ -1032,9 +956,8 @@ fn analyzeInstCall(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError .parent = null, .inst_table = &inst_table, .func = module_fn, - // Note that we pass the caller's Decl, not the callee. This causes - // compile errors to be attached (correctly) to the caller's Decl. - .decl = scope.decl().?, + .owner_decl = scope.ownerDecl().?, + .src_decl = module_fn.owner_decl, .instructions = .{}, .arena = scope.arena(), .label = null, @@ -1069,7 +992,7 @@ fn analyzeInstFn(mod: *Module, scope: *Scope, fn_inst: *zir.Inst.Fn) InnerError! .state = if (fn_inst.kw_args.is_inline) .inline_only else .queued, .zir = fn_inst.positionals.body, .body = undefined, - .owner_decl = scope.decl().?, + .owner_decl = scope.ownerDecl().?, }; return mod.constInst(scope, fn_inst.base.src, .{ .ty = fn_type, @@ -1391,7 +1314,7 @@ fn analyzeInstFieldPtr(mod: *Module, scope: *Scope, fieldptr: *zir.Inst.FieldPtr return mod.analyzeDeclRef(scope, fieldptr.base.src, decl); } - if (&container_scope.file_scope.base == mod.root_scope) { + if (container_scope.file_scope == mod.root_scope) { return mod.fail(scope, fieldptr.base.src, "root source file has no member called '{s}'", .{field_name}); } else { return mod.fail(scope, fieldptr.base.src, "container '{}' has no member called '{s}'", .{ child_type, field_name }); @@ -1606,7 +1529,8 @@ fn analyzeInstSwitchBr(mod: *Module, scope: *Scope, inst: *zir.Inst.SwitchBr) In .parent = parent_block, .inst_table = parent_block.inst_table, .func = parent_block.func, - .decl = parent_block.decl, + .owner_decl = parent_block.owner_decl, + .src_decl = parent_block.src_decl, .instructions = .{}, .arena = parent_block.arena, .inlining = parent_block.inlining, @@ -2182,7 +2106,8 @@ fn analyzeInstCondBr(mod: *Module, scope: *Scope, inst: *zir.Inst.CondBr) InnerE .parent = parent_block, .inst_table = parent_block.inst_table, .func = parent_block.func, - .decl = parent_block.decl, + .owner_decl = parent_block.owner_decl, + .src_decl = parent_block.src_decl, .instructions = .{}, .arena = parent_block.arena, .inlining = parent_block.inlining, @@ -2196,7 +2121,8 @@ fn analyzeInstCondBr(mod: *Module, scope: *Scope, inst: *zir.Inst.CondBr) InnerE .parent = parent_block, .inst_table = parent_block.inst_table, .func = parent_block.func, - .decl = parent_block.decl, + .owner_decl = parent_block.owner_decl, + .src_decl = parent_block.src_decl, .instructions = .{}, .arena = parent_block.arena, .inlining = parent_block.inlining, @@ -2294,17 +2220,6 @@ fn analyzeBreak( } else unreachable; } -fn analyzeDeclVal(mod: *Module, scope: *Scope, inst: *zir.Inst.DeclVal) InnerError!*Decl { - const decl_name = inst.positionals.name; - const zir_module = scope.namespace().cast(Scope.ZIRModule).?; - const src_decl = zir_module.contents.module.findDecl(decl_name) orelse - return mod.fail(scope, inst.base.src, "use of undeclared identifier '{s}'", .{decl_name}); - - const decl = try resolveCompleteZirDecl(mod, scope, src_decl.decl); - - return decl; -} - fn analyzeInstSimplePtrType(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp, mutable: bool, size: std.builtin.TypeInfo.Pointer.Size) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); diff --git a/test/stage2/test.zig b/test/stage2/test.zig index f25f07adbf..cb5e9bebbb 100644 --- a/test/stage2/test.zig +++ b/test/stage2/test.zig @@ -36,7 +36,7 @@ pub fn addCases(ctx: *TestContext) !void { { var case = ctx.exe("hello world with updates", linux_x64); - case.addError("", &[_][]const u8{"no entry point found"}); + case.addError("", &[_][]const u8{"error: no entry point found"}); // Incorrect return type case.addError( @@ -147,7 +147,7 @@ pub fn addCases(ctx: *TestContext) !void { { var case = ctx.exe("hello world with updates", macosx_x64); - case.addError("", &[_][]const u8{"no entry point found"}); + case.addError("", &[_][]const u8{"error: no entry point found"}); // Incorrect return type case.addError( @@ -1243,24 +1243,46 @@ pub fn addCases(ctx: *TestContext) !void { \\} , &[_][]const u8{":3:9: error: redefinition of 'testing'"}); } - ctx.compileError("compileLog", linux_x64, - \\export fn _start() noreturn { - \\ const b = true; - \\ var f: u32 = 1; - \\ @compileLog(b, 20, f, x); - \\ @compileLog(1000); - \\ var bruh: usize = true; - \\ unreachable; - \\} - \\fn x() void {} - , &[_][]const u8{ - ":4:3: error: found compile log statement", - ":5:3: error: found compile log statement", - ":6:21: error: expected usize, found bool", - }); - // TODO if this is here it invalidates the compile error checker: - // "| true, 20, (runtime value), (function)" - // "| 1000" + + { + // TODO make the test harness support checking the compile log output too + var case = ctx.obj("@compileLog", linux_x64); + // The other compile error prevents emission of a "found compile log" statement. + case.addError( + \\export fn _start() noreturn { + \\ const b = true; + \\ var f: u32 = 1; + \\ @compileLog(b, 20, f, x); + \\ @compileLog(1000); + \\ var bruh: usize = true; + \\ unreachable; + \\} + \\export fn other() void { + \\ @compileLog(1234); + \\} + \\fn x() void {} + , &[_][]const u8{ + ":6:23: error: expected usize, found bool", + }); + + // Now only compile log statements remain. One per Decl. + case.addError( + \\export fn _start() noreturn { + \\ const b = true; + \\ var f: u32 = 1; + \\ @compileLog(b, 20, f, x); + \\ @compileLog(1000); + \\ unreachable; + \\} + \\export fn other() void { + \\ @compileLog(1234); + \\} + \\fn x() void {} + , &[_][]const u8{ + ":11:8: error: found compile log statement", + ":4:5: note: also here", + }); + } { var case = ctx.obj("extern variable has no type", linux_x64); -- cgit v1.2.3 From e292f33de75fa00863a855161d6df9f6884df841 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sun, 17 Jan 2021 10:37:23 +0100 Subject: stage2 aarch64: add basic genSetStack --- src/codegen.zig | 63 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 9f2fbaab78..e491178549 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -2758,6 +2758,69 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); }, }, + .aarch64, .aarch64_be, .aarch64_32 => switch (mcv) { + .dead => unreachable, + .ptr_stack_offset => unreachable, + .ptr_embedded_in_code => unreachable, + .unreach, .none => return, // Nothing to do. + .undef => { + if (!self.wantSafety()) + return; // The already existing value will do just fine. + // TODO Upgrade this to a memset call when we have that available. + switch (ty.abiSize(self.target.*)) { + 1 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaa }), + 2 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaa }), + 4 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), + 8 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), + else => return self.fail(src, "TODO implement memset", .{}), + } + }, + .compare_flags_unsigned => |op| { + return self.fail(src, "TODO implement set stack variable with compare flags value (unsigned)", .{}); + }, + .compare_flags_signed => |op| { + return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{}); + }, + .immediate => { + const reg = try self.copyToTmpRegister(src, mcv); + return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); + }, + .embedded_in_code => |code_offset| { + return self.fail(src, "TODO implement set stack variable from embedded_in_code", .{}); + }, + .register => |reg| { + const abi_size = ty.abiSize(self.target.*); + const adj_off = stack_offset + abi_size; + + switch (abi_size) { + 4, 8 => { + const offset = if (adj_off <= math.maxInt(u12)) blk: { + break :blk Instruction.LoadStoreOffset.imm(@intCast(u12, adj_off)); + } else Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off })); + const rn: Register = switch (abi_size) { + 4 => .w29, + 8 => .x29, + else => unreachable, + }; + + writeInt(u32, try self.code.addManyAsArray(4), Instruction.str(reg, rn, .{ + .offset = offset, + }).toU32()); + }, + else => return self.fail(src, "TODO implement storing other types abi_size={}", .{abi_size}), + } + }, + .memory => |vaddr| { + return self.fail(src, "TODO implement set stack variable from memory vaddr", .{}); + }, + .stack_offset => |off| { + if (stack_offset == off) + return; // Copy stack variable to itself; nothing to do. + + const reg = try self.copyToTmpRegister(src, mcv); + return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); + }, + }, else => return self.fail(src, "TODO implement getSetStack for {}", .{self.target.cpu.arch}), } } -- cgit v1.2.3 From b25cf7db0253f331f44b279fcbcdf71faa1afb92 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sun, 17 Jan 2021 11:26:02 +0100 Subject: stage2 aarch64: add basic function pro/epilogue Fix typo in `nop` implementation. Simplify `aarch64` macOS tests. --- src/codegen.zig | 66 ++++++++++++++++++++++++++++++- src/codegen/aarch64.zig | 2 +- test/stage2/aarch64.zig | 101 +++++++++++------------------------------------- 3 files changed, 88 insertions(+), 81 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index e491178549..1478ede6ff 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -637,6 +637,67 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.dbgSetEpilogueBegin(); } }, + .aarch64, .aarch64_be, .aarch64_32 => { + const cc = self.fn_type.fnCallingConvention(); + if (cc != .Naked) { + // TODO Finish function prologue and epilogue for aarch64. + // Reserve the stack for local variables, etc. + + // stp fp, lr, [sp, #-16]! + writeInt(u32, try self.code.addManyAsArray(4), Instruction.stp( + .x29, + .x30, + Register.sp, + Instruction.LoadStorePairOffset.pre_index(-16), + ).toU32()); + + try self.dbgSetPrologueEnd(); + + try self.genBody(self.mod_fn.body); + + try self.dbgSetEpilogueBegin(); + + // exitlude jumps + if (self.exitlude_jump_relocs.items.len == 1) { + // There is only one relocation. Hence, + // this relocation must be at the end of + // the code. Therefore, we can just delete + // the space initially reserved for the + // jump + self.code.items.len -= 4; + } else for (self.exitlude_jump_relocs.items) |jmp_reloc| { + const amt = @intCast(i32, self.code.items.len) - @intCast(i32, jmp_reloc + 8); + if (amt == -4) { + // This return is at the end of the + // code block. We can't just delete + // the space because there may be + // other jumps we already relocated to + // the address. Instead, insert a nop + writeInt(u32, self.code.items[jmp_reloc..][0..4], Instruction.nop().toU32()); + } else { + if (math.cast(i28, amt)) |offset| { + writeInt(u32, self.code.items[jmp_reloc..][0..4], Instruction.b(offset).toU32()); + } else |err| { + return self.failSymbol("exitlude jump is too large", .{}); + } + } + } + + // ldp fp, lr, [sp], #16 + writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldp( + .x29, + .x30, + Register.sp, + Instruction.LoadStorePairOffset.post_index(16), + ).toU32()); + // ret lr + writeInt(u32, try self.code.addManyAsArray(4), Instruction.ret(null).toU32()); + } else { + try self.dbgSetPrologueEnd(); + try self.genBody(self.mod_fn.body); + try self.dbgSetEpilogueBegin(); + } + }, else => { try self.dbgSetPrologueEnd(); try self.genBody(self.mod_fn.body); @@ -1962,8 +2023,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.exitlude_jump_relocs.append(self.gpa, self.code.items.len - 4); }, .aarch64 => { - // TODO: relocations - writeInt(u32, try self.code.addManyAsArray(4), Instruction.ret(null).toU32()); + // Just add space for an instruction, patch this later + try self.code.resize(self.code.items.len + 4); + try self.exitlude_jump_relocs.append(self.gpa, self.code.items.len - 4); }, else => return self.fail(src, "TODO implement return for {}", .{self.target.cpu.arch}), } diff --git a/src/codegen/aarch64.zig b/src/codegen/aarch64.zig index 5fba1ea7e1..5999f8888c 100644 --- a/src/codegen/aarch64.zig +++ b/src/codegen/aarch64.zig @@ -814,7 +814,7 @@ pub const Instruction = union(enum) { // Nop pub fn nop() Instruction { - return Instruction{ .NoOperation = {} }; + return Instruction{ .NoOperation = .{} }; } // Logical (shifted register) diff --git a/test/stage2/aarch64.zig b/test/stage2/aarch64.zig index fb2b240c4d..6c283f8e9f 100644 --- a/test/stage2/aarch64.zig +++ b/test/stage2/aarch64.zig @@ -17,97 +17,60 @@ pub fn addCases(ctx: *TestContext) !void { // Regular old hello world case.addCompareOutput( + \\extern "c" fn write(usize, usize, usize) void; + \\extern "c" fn exit(usize) noreturn; + \\ \\export fn _start() noreturn { \\ print(); \\ - \\ exit(); + \\ exit(0); \\} \\ \\fn print() void { - \\ asm volatile ("svc #0x80" - \\ : - \\ : [number] "{x16}" (4), - \\ [arg1] "{x0}" (1), - \\ [arg2] "{x1}" (@ptrToInt("Hello, World!\n")), - \\ [arg3] "{x2}" (14) - \\ : "memory" - \\ ); - \\ return; - \\} - \\ - \\fn exit() noreturn { - \\ asm volatile ("svc #0x80" - \\ : - \\ : [number] "{x16}" (1), - \\ [arg1] "{x0}" (0) - \\ : "memory" - \\ ); - \\ unreachable; + \\ const msg = @ptrToInt("Hello, World!\n"); + \\ const len = 14; + \\ write(1, msg, len); \\} , "Hello, World!\n", ); + // Now change the message only case.addCompareOutput( + \\extern "c" fn write(usize, usize, usize) void; + \\extern "c" fn exit(usize) noreturn; + \\ \\export fn _start() noreturn { \\ print(); \\ - \\ exit(); + \\ exit(0); \\} \\ \\fn print() void { - \\ asm volatile ("svc #0x80" - \\ : - \\ : [number] "{x16}" (4), - \\ [arg1] "{x0}" (1), - \\ [arg2] "{x1}" (@ptrToInt("What is up? This is a longer message that will force the data to be relocated in virtual address space.\n")), - \\ [arg3] "{x2}" (104) - \\ : "memory" - \\ ); - \\ return; - \\} - \\ - \\fn exit() noreturn { - \\ asm volatile ("svc #0x80" - \\ : - \\ : [number] "{x16}" (1), - \\ [arg1] "{x0}" (0) - \\ : "memory" - \\ ); - \\ unreachable; + \\ const msg = @ptrToInt("What is up? This is a longer message that will force the data to be relocated in virtual address space.\n"); + \\ const len = 104; + \\ write(1, msg, len); \\} , "What is up? This is a longer message that will force the data to be relocated in virtual address space.\n", ); + // Now we print it twice. case.addCompareOutput( + \\extern "c" fn write(usize, usize, usize) void; + \\extern "c" fn exit(usize) noreturn; + \\ \\export fn _start() noreturn { \\ print(); \\ print(); \\ - \\ exit(); + \\ exit(0); \\} \\ \\fn print() void { - \\ asm volatile ("svc #0x80" - \\ : - \\ : [number] "{x16}" (4), - \\ [arg1] "{x0}" (1), - \\ [arg2] "{x1}" (@ptrToInt("What is up? This is a longer message that will force the data to be relocated in virtual address space.\n")), - \\ [arg3] "{x2}" (104) - \\ : "memory" - \\ ); - \\ return; - \\} - \\ - \\fn exit() noreturn { - \\ asm volatile ("svc #0x80" - \\ : - \\ : [number] "{x16}" (1), - \\ [arg1] "{x0}" (0) - \\ : "memory" - \\ ); - \\ unreachable; + \\ const msg = @ptrToInt("What is up? This is a longer message that will force the data to be relocated in virtual address space.\n"); + \\ const len = 104; + \\ write(1, msg, len); \\} , \\What is up? This is a longer message that will force the data to be relocated in virtual address space. @@ -200,24 +163,6 @@ pub fn addCases(ctx: *TestContext) !void { ); } - { - var case = ctx.exe("hello world linked to libc", macos_aarch64); - - // TODO rewrite this test once we handle more int conversions and return args. - case.addCompareOutput( - \\extern "c" fn write(usize, usize, usize) void; - \\extern "c" fn exit(usize) noreturn; - \\ - \\export fn _start() noreturn { - \\ write(1, @ptrToInt("Hello,"), 6); - \\ write(1, @ptrToInt(" World!\n,"), 8); - \\ exit(0); - \\} - , - "Hello, World!\n", - ); - } - { var case = ctx.exe("only libc exit", macos_aarch64); -- cgit v1.2.3 From 81183365852e7f871500a3f71810ae3b468f0027 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sun, 17 Jan 2021 17:20:55 +0100 Subject: macho: refactor undef symbol handling Now, we don't erroneously write to the string table on every write of global and undef symbols. --- src/codegen.zig | 18 +++------- src/link/MachO.zig | 89 ++++++++++++++++++++-------------------------- src/link/MachO/imports.zig | 81 +++++++++++++++++++---------------------- 3 files changed, 80 insertions(+), 108 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 1478ede6ff..430adc8137 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1920,21 +1920,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } else if (func_value.castTag(.extern_fn)) |func_payload| { const decl = func_payload.data; - // We don't free the decl_name immediately unless it already exists. - // If it doesn't, it will get autofreed when we clean up the extern symbol table. const decl_name = try std.fmt.allocPrint(self.bin_file.allocator, "_{s}", .{decl.name}); + defer self.bin_file.allocator.free(decl_name); const already_defined = macho_file.extern_lazy_symbols.contains(decl_name); - const symbol: u32 = if (macho_file.extern_lazy_symbols.getIndex(decl_name)) |index| blk: { - self.bin_file.allocator.free(decl_name); - break :blk @intCast(u32, index); - } else blk: { - const index = @intCast(u32, macho_file.extern_lazy_symbols.items().len); - try macho_file.extern_lazy_symbols.putNoClobber(self.bin_file.allocator, decl_name, .{ - .name = decl_name, - .dylib_ordinal = 1, // TODO this is now hardcoded, since we only support libSystem. - }); - break :blk index; - }; + const symbol: u32 = if (macho_file.extern_lazy_symbols.getIndex(decl_name)) |index| + @intCast(u32, index) + else + try macho_file.addExternSymbol(decl_name); const start = self.code.items.len; const len: usize = blk: { switch (arch) { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 1491dc53e8..16e06e9a82 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1011,11 +1011,11 @@ pub fn deinit(self: *MachO) void { ds.deinit(self.base.allocator); } for (self.extern_lazy_symbols.items()) |*entry| { - entry.value.deinit(self.base.allocator); + self.base.allocator.free(entry.key); } self.extern_lazy_symbols.deinit(self.base.allocator); for (self.extern_nonlazy_symbols.items()) |*entry| { - entry.value.deinit(self.base.allocator); + self.base.allocator.free(entry.key); } self.extern_nonlazy_symbols.deinit(self.base.allocator); self.pie_fixups.deinit(self.base.allocator); @@ -2042,9 +2042,16 @@ pub fn populateMissingMetadata(self: *MachO) !void { } if (!self.extern_nonlazy_symbols.contains("dyld_stub_binder")) { const index = @intCast(u32, self.extern_nonlazy_symbols.items().len); - const name = try std.fmt.allocPrint(self.base.allocator, "dyld_stub_binder", .{}); + const name = try self.base.allocator.dupe(u8, "dyld_stub_binder"); + const offset = try self.makeString("dyld_stub_binder"); try self.extern_nonlazy_symbols.putNoClobber(self.base.allocator, name, .{ - .name = name, + .inner = .{ + .n_strx = offset, + .n_type = std.macho.N_UNDF | std.macho.N_EXT, + .n_sect = 0, + .n_desc = std.macho.REFERENCE_FLAG_UNDEFINED_NON_LAZY | std.macho.N_SYMBOL_RESOLVER, + .n_value = 0, + }, .dylib_ordinal = 1, // TODO this is currently hardcoded. .segment = self.data_const_segment_cmd_index.?, .offset = index * @sizeOf(u64), @@ -2222,15 +2229,15 @@ pub fn makeStaticString(comptime bytes: []const u8) [16]u8 { return buf; } -pub fn makeString(self: *MachO, bytes: []const u8) !u32 { +fn makeString(self: *MachO, bytes: []const u8) !u32 { try self.string_table.ensureCapacity(self.base.allocator, self.string_table.items.len + bytes.len + 1); - const result = @intCast(u32, self.string_table.items.len); + const offset = @intCast(u32, self.string_table.items.len); self.string_table.appendSliceAssumeCapacity(bytes); self.string_table.appendAssumeCapacity(0); self.string_table_dirty = true; if (self.d_sym) |*ds| ds.string_table_dirty = true; - return result; + return offset; } fn getString(self: *MachO, str_off: u32) []const u8 { @@ -2246,6 +2253,23 @@ fn updateString(self: *MachO, old_str_off: u32, new_name: []const u8) !u32 { return self.makeString(new_name); } +pub fn addExternSymbol(self: *MachO, name: []const u8) !u32 { + const index = @intCast(u32, self.extern_lazy_symbols.items().len); + const offset = try self.makeString(name); + const sym_name = try self.base.allocator.dupe(u8, name); + try self.extern_lazy_symbols.putNoClobber(self.base.allocator, sym_name, .{ + .inner = .{ + .n_strx = offset, + .n_type = macho.N_UNDF | macho.N_EXT, + .n_sect = 0, + .n_desc = macho.REFERENCE_FLAG_UNDEFINED_NON_LAZY | macho.N_SYMBOL_RESOLVER, + .n_value = 0, + }, + .dylib_ordinal = 1, // TODO this is now hardcoded, since we only support libSystem. + }); + return index; +} + const NextSegmentAddressAndOffset = struct { address: u64, offset: u64, @@ -2585,24 +2609,10 @@ fn writeAllGlobalAndUndefSymbols(self: *MachO) !void { defer undefs.deinit(); try undefs.ensureCapacity(nundefs); for (self.extern_lazy_symbols.items()) |entry| { - const name = try self.makeString(entry.key); - undefs.appendAssumeCapacity(.{ - .n_strx = name, - .n_type = std.macho.N_UNDF | std.macho.N_EXT, - .n_sect = 0, - .n_desc = std.macho.REFERENCE_FLAG_UNDEFINED_NON_LAZY | std.macho.N_SYMBOL_RESOLVER, - .n_value = 0, - }); + undefs.appendAssumeCapacity(entry.value.inner); } for (self.extern_nonlazy_symbols.items()) |entry| { - const name = try self.makeString(entry.key); - undefs.appendAssumeCapacity(.{ - .n_strx = name, - .n_type = std.macho.N_UNDF | std.macho.N_EXT, - .n_sect = 0, - .n_desc = std.macho.REFERENCE_FLAG_UNDEFINED_NON_LAZY | std.macho.N_SYMBOL_RESOLVER, - .n_value = 0, - }); + undefs.appendAssumeCapacity(entry.value.inner); } const locals_off = symtab.symoff; @@ -2781,19 +2791,12 @@ fn writeRebaseInfoTable(self: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); - var symbols = try self.base.allocator.alloc(*const ExternSymbol, self.extern_lazy_symbols.items().len); - defer self.base.allocator.free(symbols); - - for (self.extern_lazy_symbols.items()) |*entry, i| { - symbols[i] = &entry.value; - } - - const size = try rebaseInfoSize(symbols); + const size = try rebaseInfoSize(self.extern_lazy_symbols.items()); var buffer = try self.base.allocator.alloc(u8, @intCast(usize, size)); defer self.base.allocator.free(buffer); var stream = std.io.fixedBufferStream(buffer); - try writeRebaseInfo(symbols, stream.writer()); + try writeRebaseInfo(self.extern_lazy_symbols.items(), stream.writer()); const linkedit_segment = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly; @@ -2820,19 +2823,12 @@ fn writeBindingInfoTable(self: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); - var symbols = try self.base.allocator.alloc(*const ExternSymbol, self.extern_nonlazy_symbols.items().len); - defer self.base.allocator.free(symbols); - - for (self.extern_nonlazy_symbols.items()) |*entry, i| { - symbols[i] = &entry.value; - } - - const size = try bindInfoSize(symbols); + const size = try bindInfoSize(self.extern_nonlazy_symbols.items()); var buffer = try self.base.allocator.alloc(u8, @intCast(usize, size)); defer self.base.allocator.free(buffer); var stream = std.io.fixedBufferStream(buffer); - try writeBindInfo(symbols, stream.writer()); + try writeBindInfo(self.extern_nonlazy_symbols.items(), stream.writer()); const linkedit_segment = self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly; @@ -2856,19 +2852,12 @@ fn writeBindingInfoTable(self: *MachO) !void { fn writeLazyBindingInfoTable(self: *MachO) !void { if (!self.lazy_binding_info_dirty) return; - var symbols = try self.base.allocator.alloc(*const ExternSymbol, self.extern_lazy_symbols.items().len); - defer self.base.allocator.free(symbols); - - for (self.extern_lazy_symbols.items()) |*entry, i| { - symbols[i] = &entry.value; - } - - const size = try lazyBindInfoSize(symbols); + const size = try lazyBindInfoSize(self.extern_lazy_symbols.items()); var buffer = try self.base.allocator.alloc(u8, @intCast(usize, size)); defer self.base.allocator.free(buffer); var stream = std.io.fixedBufferStream(buffer); - try writeLazyBindInfo(symbols, stream.writer()); + try writeLazyBindInfo(self.extern_lazy_symbols.items(), stream.writer()); const linkedit_segment = self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly; diff --git a/src/link/MachO/imports.zig b/src/link/MachO/imports.zig index c5f6211f1a..2bc34d14c5 100644 --- a/src/link/MachO/imports.zig +++ b/src/link/MachO/imports.zig @@ -7,12 +7,8 @@ const assert = std.debug.assert; const Allocator = mem.Allocator; pub const ExternSymbol = struct { - /// Symbol name. - /// We own the memory, therefore we'll need to free it by calling `deinit`. - /// In self-hosted, we don't expect it to be null ever. - /// However, this is for backwards compatibility with LLD when - /// we'll be patching things up post mortem. - name: ?[]u8 = null, + /// MachO symbol table entry. + inner: macho.nlist_64, /// Id of the dynamic library where the specified entries can be found. /// Id of 0 means self. @@ -26,22 +22,16 @@ pub const ExternSymbol = struct { /// Offset relative to the start address of the `segment`. offset: u32 = 0, - - pub fn deinit(self: *ExternSymbol, allocator: *Allocator) void { - if (self.name) |name| { - allocator.free(name); - } - } }; -pub fn rebaseInfoSize(symbols: []*const ExternSymbol) !u64 { +pub fn rebaseInfoSize(symbols: anytype) !u64 { var stream = std.io.countingWriter(std.io.null_writer); var writer = stream.writer(); var size: u64 = 0; - for (symbols) |symbol| { + for (symbols) |entry| { size += 2; - try leb.writeILEB128(writer, symbol.offset); + try leb.writeILEB128(writer, entry.value.offset); size += 1; } @@ -49,8 +39,9 @@ pub fn rebaseInfoSize(symbols: []*const ExternSymbol) !u64 { return size; } -pub fn writeRebaseInfo(symbols: []*const ExternSymbol, writer: anytype) !void { - for (symbols) |symbol| { +pub fn writeRebaseInfo(symbols: anytype, writer: anytype) !void { + for (symbols) |entry| { + const symbol = entry.value; try writer.writeByte(macho.REBASE_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.REBASE_TYPE_POINTER)); try writer.writeByte(macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, symbol.segment)); try leb.writeILEB128(writer, symbol.offset); @@ -59,23 +50,23 @@ pub fn writeRebaseInfo(symbols: []*const ExternSymbol, writer: anytype) !void { try writer.writeByte(macho.REBASE_OPCODE_DONE); } -pub fn bindInfoSize(symbols: []*const ExternSymbol) !u64 { +pub fn bindInfoSize(symbols: anytype) !u64 { var stream = std.io.countingWriter(std.io.null_writer); var writer = stream.writer(); var size: u64 = 0; - for (symbols) |symbol| { + for (symbols) |entry| { + const symbol = entry.value; + size += 1; if (symbol.dylib_ordinal > 15) { try leb.writeULEB128(writer, @bitCast(u64, symbol.dylib_ordinal)); } size += 1; - if (symbol.name) |name| { - size += 1; - size += name.len; - size += 1; - } + size += 1; + size += entry.key.len; + size += 1; size += 1; try leb.writeILEB128(writer, symbol.offset); @@ -86,8 +77,10 @@ pub fn bindInfoSize(symbols: []*const ExternSymbol) !u64 { return size; } -pub fn writeBindInfo(symbols: []*const ExternSymbol, writer: anytype) !void { - for (symbols) |symbol| { +pub fn writeBindInfo(symbols: anytype, writer: anytype) !void { + for (symbols) |entry| { + const symbol = entry.value; + if (symbol.dylib_ordinal > 15) { try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB); try leb.writeULEB128(writer, @bitCast(u64, symbol.dylib_ordinal)); @@ -98,11 +91,9 @@ pub fn writeBindInfo(symbols: []*const ExternSymbol, writer: anytype) !void { } try writer.writeByte(macho.BIND_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.BIND_TYPE_POINTER)); - if (symbol.name) |name| { - try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM); // TODO Sometimes we might want to add flags. - try writer.writeAll(name); - try writer.writeByte(0); - } + try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM); // TODO Sometimes we might want to add flags. + try writer.writeAll(entry.key); + try writer.writeByte(0); try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, symbol.segment)); try leb.writeILEB128(writer, symbol.offset); @@ -111,23 +102,24 @@ pub fn writeBindInfo(symbols: []*const ExternSymbol, writer: anytype) !void { } } -pub fn lazyBindInfoSize(symbols: []*const ExternSymbol) !u64 { +pub fn lazyBindInfoSize(symbols: anytype) !u64 { var stream = std.io.countingWriter(std.io.null_writer); var writer = stream.writer(); var size: u64 = 0; - for (symbols) |symbol| { + for (symbols) |entry| { + const symbol = entry.value; size += 1; try leb.writeILEB128(writer, symbol.offset); size += 1; if (symbol.dylib_ordinal > 15) { try leb.writeULEB128(writer, @bitCast(u64, symbol.dylib_ordinal)); } - if (symbol.name) |name| { - size += 1; - size += name.len; - size += 1; - } + + size += 1; + size += entry.key.len; + size += 1; + size += 2; } @@ -135,8 +127,9 @@ pub fn lazyBindInfoSize(symbols: []*const ExternSymbol) !u64 { return size; } -pub fn writeLazyBindInfo(symbols: []*const ExternSymbol, writer: anytype) !void { - for (symbols) |symbol| { +pub fn writeLazyBindInfo(symbols: anytype, writer: anytype) !void { + for (symbols) |entry| { + const symbol = entry.value; try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, symbol.segment)); try leb.writeILEB128(writer, symbol.offset); @@ -149,11 +142,9 @@ pub fn writeLazyBindInfo(symbols: []*const ExternSymbol, writer: anytype) !void try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @truncate(u4, @bitCast(u64, symbol.dylib_ordinal))); } - if (symbol.name) |name| { - try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM); // TODO Sometimes we might want to add flags. - try writer.writeAll(name); - try writer.writeByte(0); - } + try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM); // TODO Sometimes we might want to add flags. + try writer.writeAll(entry.key); + try writer.writeByte(0); try writer.writeByte(macho.BIND_OPCODE_DO_BIND); try writer.writeByte(macho.BIND_OPCODE_DONE); -- cgit v1.2.3 From c6cb02c2265f181baaae4d57679baa8306431c11 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 17 Jan 2021 22:22:47 +0100 Subject: stage2 AArch64: fix stack offsets in genSetStack --- src/codegen.zig | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 1478ede6ff..df04a740b9 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -2693,9 +2693,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (abi_size) { 1, 4 => { - const offset = if (adj_off <= math.maxInt(u12)) blk: { - break :blk Instruction.Offset.imm(@intCast(u12, adj_off)); - } else Instruction.Offset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off }), 0); + const offset = if (math.cast(u12, adj_off)) |imm| blk: { + break :blk Instruction.Offset.imm(imm); + } else |_| Instruction.Offset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off }), 0); const str = switch (abi_size) { 1 => Instruction.strb, 4 => Instruction.str, @@ -2856,12 +2856,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (abi_size) { 4, 8 => { - const offset = if (adj_off <= math.maxInt(u12)) blk: { - break :blk Instruction.LoadStoreOffset.imm(@intCast(u12, adj_off)); - } else Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off })); - const rn: Register = switch (abi_size) { - 4 => .w29, - 8 => .x29, + const offset = if (math.cast(i9, adj_off)) |imm| + Instruction.LoadStoreOffset.imm_post_index(-imm) + else |_| + Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off })); + const rn: Register = switch (arch) { + .aarch64, .aarch64_be => .x29, + .aarch64_32 => .w29, else => unreachable, }; -- cgit v1.2.3 From 458011f21fda961055a0fd7d280e33824c63c446 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 17 Jan 2021 23:09:08 +0100 Subject: stage2 AArch64: update function prologue and epilogue to include stack offsets --- src/codegen.zig | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index df04a740b9..443117b54d 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -641,20 +641,33 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const cc = self.fn_type.fnCallingConvention(); if (cc != .Naked) { // TODO Finish function prologue and epilogue for aarch64. - // Reserve the stack for local variables, etc. // stp fp, lr, [sp, #-16]! + // mov fp, sp + // sub sp, sp, #reloc writeInt(u32, try self.code.addManyAsArray(4), Instruction.stp( .x29, .x30, Register.sp, Instruction.LoadStorePairOffset.pre_index(-16), ).toU32()); + writeInt(u32, try self.code.addManyAsArray(4), Instruction.add(.x29, .xzr, 0, false).toU32()); + const backpatch_reloc = self.code.items.len; + try self.code.resize(backpatch_reloc + 4); try self.dbgSetPrologueEnd(); try self.genBody(self.mod_fn.body); + // Backpatch stack offset + const stack_end = self.max_end_stack; + const aligned_stack_end = mem.alignForward(stack_end, self.stack_align); + if (math.cast(u12, aligned_stack_end)) |size| { + writeInt(u32, self.code.items[backpatch_reloc..][0..4], Instruction.sub(.xzr, .xzr, size, false).toU32()); + } else |_| { + return self.failSymbol("TODO AArch64: allow larger stacks", .{}); + } + try self.dbgSetEpilogueBegin(); // exitlude jumps @@ -690,6 +703,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { Register.sp, Instruction.LoadStorePairOffset.post_index(16), ).toU32()); + // add sp, sp, #stack_size + writeInt(u32, try self.code.addManyAsArray(4), Instruction.add(.xzr, .xzr, @intCast(u12, aligned_stack_end), false).toU32()); // ret lr writeInt(u32, try self.code.addManyAsArray(4), Instruction.ret(null).toU32()); } else { -- cgit v1.2.3 From 6c7e66613d57aec2f2949c065ea6431ff6c31f88 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Mon, 18 Jan 2021 21:51:56 +0100 Subject: stage2 AArch64: implement jump --- src/codegen.zig | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 9566856122..14572c2012 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -2349,6 +2349,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail(src, "TODO: enable larger branch offset", .{}); } }, + .aarch64, .aarch64_be, .aarch64_32 => { + if (math.cast(i28, @intCast(i32, index) - @intCast(i32, self.code.items.len + 8))) |delta| { + writeInt(u32, try self.code.addManyAsArray(4), Instruction.b(delta).toU32()); + } else |err| { + return self.fail(src, "TODO: enable larger branch offset", .{}); + } + }, else => return self.fail(src, "TODO implement jump for {}", .{self.target.cpu.arch}), } } -- cgit v1.2.3 From ecc246efa2c133aaab73032a18fed5b2c15e08ce Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 18 Jan 2021 19:17:23 -0700 Subject: stage2: rework ZIR/TZIR for optionals and error unions * fix wrong pointer const-ness when unwrapping optionals * allow grouped expressions and orelse as lvalues * ZIR for unwrapping optionals: no redundant deref - add notes to please don't use rlWrapPtr, this function should be deleted * catch and orelse: better ZIR for non-lvalue: no redundant deref; operate entirely on values. lvalue case still works properly. - properly propagate the result location into the target expression * Test harness: better output when tests fail due to compile errors. * TZIR: add instruction variants. These allow fewer TZIR instructions to be emitted from zir_sema. See the commit diff for per-instruction documentation. - is_null - is_non_null - is_null_ptr - is_non_null_ptr - is_err - is_err_ptr - optional_payload - optional_payload_ptr * TZIR: removed old naming convention instructions: - isnonnull - isnull - iserr - unwrap_optional * ZIR: add instruction variants. These allow fewer ZIR instructions to be emitted from astgen. See the commit diff for per-instruction documentation. - is_non_null - is_null - is_non_null_ptr - is_null_ptr - is_err - is_err_ptr - optional_payload_safe - optional_payload_unsafe - optional_payload_safe_ptr - optional_payload_unsafe_ptr - err_union_payload_safe - err_union_payload_unsafe - err_union_payload_safe_ptr - err_union_payload_unsafe_ptr - err_union_code - err_union_code_ptr * ZIR: removed old naming convention instructions: - isnonnull - isnull - iserr - unwrap_optional_safe - unwrap_optional_unsafe - unwrap_err_safe - unwrap_err_unsafe - unwrap_err_code --- src/Module.zig | 2 +- src/astgen.zig | 124 ++++++++++++++++++++++++++++++++++++++------------ src/codegen.zig | 37 ++++++++++++--- src/ir.zig | 32 +++++++++---- src/test.zig | 5 ++- src/zir.zig | 130 ++++++++++++++++++++++++++++++++++++++--------------- src/zir_sema.zig | 135 ++++++++++++++++++++++++++++++++++++++++++++----------- 7 files changed, 358 insertions(+), 107 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/Module.zig b/src/Module.zig index d75c1d2a0d..747e60f970 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -2453,7 +2453,7 @@ pub fn analyzeIsNull( return self.constBool(scope, src, bool_value); } const b = try self.requireRuntimeBlock(scope, src); - const inst_tag: Inst.Tag = if (invert_logic) .isnonnull else .isnull; + const inst_tag: Inst.Tag = if (invert_logic) .is_non_null else .is_null; return self.addUnOp(b, src, Type.initTag(.bool), inst_tag, operand); } diff --git a/src/astgen.zig b/src/astgen.zig index 625ad967aa..f24d078b4b 100644 --- a/src/astgen.zig +++ b/src/astgen.zig @@ -118,7 +118,6 @@ fn lvalExpr(mod: *Module, scope: *Scope, node: *ast.Node) InnerError!*zir.Inst { .LabeledBlock, .Break, .PtrType, - .GroupedExpression, .ArrayType, .ArrayTypeSentinel, .EnumLiteral, @@ -129,7 +128,6 @@ fn lvalExpr(mod: *Module, scope: *Scope, node: *ast.Node) InnerError!*zir.Inst { .ErrorUnion, .MergeErrorSets, .Range, - .OrElse, .Await, .BitNot, .Negation, @@ -168,7 +166,14 @@ fn lvalExpr(mod: *Module, scope: *Scope, node: *ast.Node) InnerError!*zir.Inst { }, // can be assigned to - .UnwrapOptional, .Deref, .Period, .ArrayAccess, .Identifier => {}, + .UnwrapOptional, + .Deref, + .Period, + .ArrayAccess, + .Identifier, + .GroupedExpression, + .OrElse, + => {}, } return expr(mod, scope, .ref, node); } @@ -913,8 +918,12 @@ fn unwrapOptional(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Si const tree = scope.tree(); const src = tree.token_locs[node.rtoken].start; - const operand = try expr(mod, scope, .ref, node.lhs); - return rlWrapPtr(mod, scope, rl, try addZIRUnOp(mod, scope, src, .unwrap_optional_safe, operand)); + const operand = try expr(mod, scope, rl, node.lhs); + const op: zir.Inst.Tag = switch (rl) { + .ref => .optional_payload_safe_ptr, + else => .optional_payload_safe, + }; + return addZIRUnOp(mod, scope, src, op, operand); } fn containerField( @@ -1110,6 +1119,7 @@ fn errorSetDecl(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Erro } // analyzing the error set results in a decl ref, so we might need to dereference it + // TODO remove all callsites to rlWrapPtr return rlWrapPtr(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.ErrorSet, .{ .fields = fields }, .{})); } @@ -1123,11 +1133,61 @@ fn errorType(mod: *Module, scope: *Scope, node: *ast.Node.OneToken) InnerError!* } fn catchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Catch) InnerError!*zir.Inst { - return orelseCatchExpr(mod, scope, rl, node.lhs, node.op_token, .iserr, .unwrap_err_unsafe, node.rhs, node.payload); + switch (rl) { + .ref => return orelseCatchExpr( + mod, + scope, + rl, + node.lhs, + node.op_token, + .is_err_ptr, + .err_union_payload_unsafe_ptr, + .err_union_code_ptr, + node.rhs, + node.payload, + ), + else => return orelseCatchExpr( + mod, + scope, + rl, + node.lhs, + node.op_token, + .is_err, + .err_union_payload_unsafe, + .err_union_code, + node.rhs, + node.payload, + ), + } } fn orelseExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.SimpleInfixOp) InnerError!*zir.Inst { - return orelseCatchExpr(mod, scope, rl, node.lhs, node.op_token, .isnonnull, .unwrap_optional_unsafe, node.rhs, null); + switch (rl) { + .ref => return orelseCatchExpr( + mod, + scope, + rl, + node.lhs, + node.op_token, + .is_null_ptr, + .optional_payload_unsafe_ptr, + undefined, + node.rhs, + null, + ), + else => return orelseCatchExpr( + mod, + scope, + rl, + node.lhs, + node.op_token, + .is_null, + .optional_payload_unsafe, + undefined, + node.rhs, + null, + ), + } } fn orelseCatchExpr( @@ -1138,17 +1198,13 @@ fn orelseCatchExpr( op_token: ast.TokenIndex, cond_op: zir.Inst.Tag, unwrap_op: zir.Inst.Tag, + unwrap_code_op: zir.Inst.Tag, rhs: *ast.Node, payload_node: ?*ast.Node, ) InnerError!*zir.Inst { const tree = scope.tree(); const src = tree.token_locs[op_token].start; - const operand_ptr = try expr(mod, scope, .ref, lhs); - // TODO we could avoid an unnecessary copy if .iserr, .isnull took a pointer - const err_union = try addZIRUnOp(mod, scope, src, .deref, operand_ptr); - const cond = try addZIRUnOp(mod, scope, src, cond_op, err_union); - var block_scope: Scope.GenZIR = .{ .parent = scope, .decl = scope.ownerDecl().?, @@ -1157,14 +1213,8 @@ fn orelseCatchExpr( }; defer block_scope.instructions.deinit(mod.gpa); - const condbr = try addZIRInstSpecial(mod, &block_scope.base, src, zir.Inst.CondBr, .{ - .condition = cond, - .then_body = undefined, // populated below - .else_body = undefined, // populated below - }, .{}); - const block = try addZIRInstBlock(mod, scope, src, .block, .{ - .instructions = try block_scope.arena.dupe(*zir.Inst, block_scope.instructions.items), + .instructions = undefined, // populated below }); // Most result location types can be forwarded directly; however @@ -1175,9 +1225,18 @@ fn orelseCatchExpr( .discard, .none, .ty, .ptr, .ref => rl, .inferred_ptr, .bitcasted_ptr, .block_ptr => .{ .block_ptr = block }, }; + // This could be a pointer or value depending on the `rl` parameter. + const operand = try expr(mod, &block_scope.base, branch_rl, lhs); + const cond = try addZIRUnOp(mod, &block_scope.base, src, cond_op, operand); + + const condbr = try addZIRInstSpecial(mod, &block_scope.base, src, zir.Inst.CondBr, .{ + .condition = cond, + .then_body = undefined, // populated below + .else_body = undefined, // populated below + }, .{}); var then_scope: Scope.GenZIR = .{ - .parent = scope, + .parent = &block_scope.base, .decl = block_scope.decl, .arena = block_scope.arena, .instructions = .{}, @@ -1193,38 +1252,41 @@ fn orelseCatchExpr( if (mem.eql(u8, err_name, "_")) break :blk &then_scope.base; - const unwrapped_err_ptr = try addZIRUnOp(mod, &then_scope.base, src, .unwrap_err_code, operand_ptr); err_val_scope = .{ .parent = &then_scope.base, .gen_zir = &then_scope, .name = err_name, - .inst = try addZIRUnOp(mod, &then_scope.base, src, .deref, unwrapped_err_ptr), + .inst = try addZIRUnOp(mod, &then_scope.base, src, unwrap_code_op, operand), }; break :blk &err_val_scope.base; }; _ = try addZIRInst(mod, &then_scope.base, src, zir.Inst.Break, .{ .block = block, - .operand = try rlWrap(mod, then_sub_scope, .{ .ref = {} }, try expr(mod, then_sub_scope, branch_rl, rhs)), + .operand = try expr(mod, then_sub_scope, branch_rl, rhs), }, .{}); var else_scope: Scope.GenZIR = .{ - .parent = scope, + .parent = &block_scope.base, .decl = block_scope.decl, .arena = block_scope.arena, .instructions = .{}, }; defer else_scope.instructions.deinit(mod.gpa); - const unwrapped_payload = try addZIRUnOp(mod, &else_scope.base, src, unwrap_op, operand_ptr); + // This could be a pointer or value depending on `unwrap_op`. + const unwrapped_payload = try addZIRUnOp(mod, &else_scope.base, src, unwrap_op, operand); _ = try addZIRInst(mod, &else_scope.base, src, zir.Inst.Break, .{ .block = block, .operand = unwrapped_payload, }, .{}); + // All branches have been generated, add the instructions to the block. + block.positionals.body.instructions = try block_scope.arena.dupe(*zir.Inst, block_scope.instructions.items); + condbr.positionals.then_body = .{ .instructions = try then_scope.arena.dupe(*zir.Inst, then_scope.instructions.items) }; condbr.positionals.else_body = .{ .instructions = try else_scope.arena.dupe(*zir.Inst, else_scope.instructions.items) }; - return rlWrapPtr(mod, scope, rl, &block.base); + return &block.base; } /// Return whether the identifier names of two tokens are equal. Resolves @"" @@ -1253,6 +1315,7 @@ fn field(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.SimpleInfix const lhs = try expr(mod, scope, .ref, node.lhs); const field_name = try identifierStringInst(mod, scope, node.rhs.castTag(.Identifier).?); + // TODO remove all callsites to rlWrapPtr return rlWrapPtr(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.FieldPtr, .{ .object_ptr = lhs, .field_name = field_name }, .{})); } @@ -1263,6 +1326,7 @@ fn arrayAccess(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Array const array_ptr = try expr(mod, scope, .ref, node.lhs); const index = try expr(mod, scope, .none, node.index_expr); + // TODO remove all callsites to rlWrapPtr return rlWrapPtr(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.ElemPtr, .{ .array_ptr = array_ptr, .index = index }, .{})); } @@ -1420,13 +1484,13 @@ const CondKind = union(enum) { const cond_ptr = try expr(mod, &block_scope.base, .ref, cond_node); self.* = .{ .optional = cond_ptr }; const result = try addZIRUnOp(mod, &block_scope.base, src, .deref, cond_ptr); - return try addZIRUnOp(mod, &block_scope.base, src, .isnonnull, result); + return try addZIRUnOp(mod, &block_scope.base, src, .is_non_null, result); }, .err_union => { const err_ptr = try expr(mod, &block_scope.base, .ref, cond_node); self.* = .{ .err_union = err_ptr }; const result = try addZIRUnOp(mod, &block_scope.base, src, .deref, err_ptr); - return try addZIRUnOp(mod, &block_scope.base, src, .iserr, result); + return try addZIRUnOp(mod, &block_scope.base, src, .is_err, result); }, } } @@ -1456,7 +1520,7 @@ const CondKind = union(enum) { fn elseSubScope(self: CondKind, mod: *Module, else_scope: *Scope.GenZIR, src: usize, payload_node: ?*ast.Node) !*Scope { if (self != .err_union) return &else_scope.base; - const payload_ptr = try addZIRUnOp(mod, &else_scope.base, src, .unwrap_err_unsafe, self.err_union.?); + const payload_ptr = try addZIRUnOp(mod, &else_scope.base, src, .err_union_payload_unsafe_ptr, self.err_union.?); const payload = payload_node.?.castTag(.Payload).?; const ident_node = payload.error_symbol.castTag(.Identifier).?; @@ -2264,6 +2328,7 @@ fn identifier(mod: *Module, scope: *Scope, rl: ResultLoc, ident: *ast.Node.OneTo .local_ptr => { const local_ptr = s.cast(Scope.LocalPtr).?; if (mem.eql(u8, local_ptr.name, ident_name)) { + // TODO remove all callsites to rlWrapPtr return rlWrapPtr(mod, scope, rl, local_ptr.ptr); } s = local_ptr.parent; @@ -3047,6 +3112,7 @@ fn rlWrapVoid(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node, resul /// TODO go over all the callsites and see where we can introduce "by-value" ZIR instructions /// to save ZIR memory. For example, see DeclVal vs DeclRef. +/// Do not add additional callsites to this function. fn rlWrapPtr(mod: *Module, scope: *Scope, rl: ResultLoc, ptr: *zir.Inst) InnerError!*zir.Inst { if (rl == .ref) return ptr; diff --git a/src/codegen.zig b/src/codegen.zig index 14572c2012..1ca2bb2abe 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -860,9 +860,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), .floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), .intcast => return self.genIntCast(inst.castTag(.intcast).?), - .isnonnull => return self.genIsNonNull(inst.castTag(.isnonnull).?), - .isnull => return self.genIsNull(inst.castTag(.isnull).?), - .iserr => return self.genIsErr(inst.castTag(.iserr).?), + .is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), + .is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), + .is_null => return self.genIsNull(inst.castTag(.is_null).?), + .is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), + .is_err => return self.genIsErr(inst.castTag(.is_err).?), + .is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), .load => return self.genLoad(inst.castTag(.load).?), .loop => return self.genLoop(inst.castTag(.loop).?), .not => return self.genNot(inst.castTag(.not).?), @@ -874,7 +877,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .sub => return self.genSub(inst.castTag(.sub).?), .switchbr => return self.genSwitch(inst.castTag(.switchbr).?), .unreach => return MCValue{ .unreach = {} }, - .unwrap_optional => return self.genUnwrapOptional(inst.castTag(.unwrap_optional).?), + .optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), + .optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), .wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), .varptr => return self.genVarPtr(inst.castTag(.varptr).?), .xor => return self.genXor(inst.castTag(.xor).?), @@ -1118,12 +1122,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genUnwrapOptional(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genOptionalPayload(self: *Self, inst: *ir.Inst.UnOp) !MCValue { // No side effects, so if it's unreferenced, do nothing. if (inst.base.isUnused()) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement unwrap optional for {}", .{self.target.cpu.arch}), + else => return self.fail(inst.base.src, "TODO implement .optional_payload for {}", .{self.target.cpu.arch}), + } + } + + fn genOptionalPayloadPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + // No side effects, so if it's unreferenced, do nothing. + if (inst.base.isUnused()) + return MCValue.dead; + switch (arch) { + else => return self.fail(inst.base.src, "TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch}), } } @@ -2306,6 +2319,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } + fn genIsNullPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + return self.fail(inst.base.src, "TODO load the operand and call genIsNull", .{}); + } + fn genIsNonNull(self: *Self, inst: *ir.Inst.UnOp) !MCValue { // Here you can specialize this instruction if it makes sense to, otherwise the default // will call genIsNull and invert the result. @@ -2314,12 +2331,20 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } + fn genIsNonNullPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + return self.fail(inst.base.src, "TODO load the operand and call genIsNonNull", .{}); + } + fn genIsErr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { switch (arch) { else => return self.fail(inst.base.src, "TODO implement iserr for {}", .{self.target.cpu.arch}), } } + fn genIsErrPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + return self.fail(inst.base.src, "TODO load the operand and call genIsErr", .{}); + } + fn genLoop(self: *Self, inst: *ir.Inst.Loop) !MCValue { // A loop is a setup to be able to jump back to the beginning. const start_index = self.code.items.len; diff --git a/src/ir.zig b/src/ir.zig index e43397faba..89698bdd84 100644 --- a/src/ir.zig +++ b/src/ir.zig @@ -73,9 +73,18 @@ pub const Inst = struct { condbr, constant, dbg_stmt, - isnonnull, - isnull, - iserr, + // ?T => bool + is_null, + // ?T => bool (inverted logic) + is_non_null, + // *?T => bool + is_null_ptr, + // *?T => bool (inverted logic) + is_non_null_ptr, + // E!T => bool + is_err, + // *E!T => bool + is_err_ptr, booland, boolor, /// Read a value from a pointer. @@ -93,7 +102,10 @@ pub const Inst = struct { not, floatcast, intcast, - unwrap_optional, + // ?T => T + optional_payload, + // *?T => *T + optional_payload_ptr, wrap_optional, xor, switchbr, @@ -111,14 +123,18 @@ pub const Inst = struct { .ret, .bitcast, .not, - .isnonnull, - .isnull, - .iserr, + .is_non_null, + .is_non_null_ptr, + .is_null, + .is_null_ptr, + .is_err, + .is_err_ptr, .ptrtoint, .floatcast, .intcast, .load, - .unwrap_optional, + .optional_payload, + .optional_payload_ptr, .wrap_optional, => UnOp, diff --git a/src/test.zig b/src/test.zig index 1c9fb57f01..150b6496c1 100644 --- a/src/test.zig +++ b/src/test.zig @@ -696,7 +696,10 @@ pub const TestContext = struct { var all_errors = try comp.getAllErrorsAlloc(); defer all_errors.deinit(allocator); if (all_errors.list.len != 0) { - std.debug.print("\nErrors occurred updating the compilation:\n{s}\n", .{hr}); + std.debug.print( + "\nCase '{s}': unexpected errors at update_index={d}:\n{s}\n", + .{ case.name, update_index, hr }, + ); for (all_errors.list) |err_msg| { switch (err_msg) { .src => |src| { diff --git a/src/zir.zig b/src/zir.zig index 0e7b3a3520..be45538288 100644 --- a/src/zir.zig +++ b/src/zir.zig @@ -174,11 +174,17 @@ pub const Inst = struct { /// Make an integer type out of signedness and bit count. inttype, /// Return a boolean false if an optional is null. `x != null` - isnonnull, + is_non_null, /// Return a boolean true if an optional is null. `x == null` - isnull, + is_null, + /// Return a boolean false if an optional is null. `x.* != null` + is_non_null_ptr, + /// Return a boolean true if an optional is null. `x.* == null` + is_null_ptr, /// Return a boolean true if value is an error - iserr, + is_err, + /// Return a boolean true if dereferenced pointer is an error + is_err_ptr, /// A labeled block of code that loops forever. At the end of the body it is implied /// to repeat; no explicit "repeat" instruction terminates loop bodies. loop, @@ -278,16 +284,42 @@ pub const Inst = struct { optional_type, /// Create a union type. union_type, - /// Unwraps an optional value 'lhs.?' - unwrap_optional_safe, - /// Same as previous, but without safety checks. Used for orelse, if and while - unwrap_optional_unsafe, - /// Gets the payload of an error union - unwrap_err_safe, - /// Same as previous, but without safety checks. Used for orelse, if and while - unwrap_err_unsafe, - /// Gets the error code value of an error union - unwrap_err_code, + /// ?T => T with safety. + /// Given an optional value, returns the payload value, with a safety check that + /// the value is non-null. Used for `orelse`, `if` and `while`. + optional_payload_safe, + /// ?T => T without safety. + /// Given an optional value, returns the payload value. No safety checks. + optional_payload_unsafe, + /// *?T => *T with safety. + /// Given a pointer to an optional value, returns a pointer to the payload value, + /// with a safety check that the value is non-null. Used for `orelse`, `if` and `while`. + optional_payload_safe_ptr, + /// *?T => *T without safety. + /// Given a pointer to an optional value, returns a pointer to the payload value. + /// No safety checks. + optional_payload_unsafe_ptr, + /// E!T => T with safety. + /// Given an error union value, returns the payload value, with a safety check + /// that the value is not an error. Used for catch, if, and while. + err_union_payload_safe, + /// E!T => T without safety. + /// Given an error union value, returns the payload value. No safety checks. + err_union_payload_unsafe, + /// *E!T => *T with safety. + /// Given a pointer to an error union value, returns a pointer to the payload value, + /// with a safety check that the value is not an error. Used for catch, if, and while. + err_union_payload_safe_ptr, + /// *E!T => *T without safety. + /// Given a pointer to a error union value, returns a pointer to the payload value. + /// No safety checks. + err_union_payload_unsafe_ptr, + /// E!T => E without safety. + /// Given an error union value, returns the error code. No safety checks. + err_union_code, + /// *E!T => E without safety. + /// Given a pointer to an error union value, returns the error code. No safety checks. + err_union_code_ptr, /// Takes a *E!T and raises a compiler error if T != void ensure_err_payload_void, /// Create a enum literal, @@ -320,9 +352,12 @@ pub const Inst = struct { .compileerror, .deref, .@"return", - .isnull, - .isnonnull, - .iserr, + .is_null, + .is_non_null, + .is_null_ptr, + .is_non_null_ptr, + .is_err, + .is_err_ptr, .ptrtoint, .ensure_result_used, .ensure_result_non_error, @@ -341,11 +376,16 @@ pub const Inst = struct { .mut_slice_type, .const_slice_type, .optional_type, - .unwrap_optional_safe, - .unwrap_optional_unsafe, - .unwrap_err_safe, - .unwrap_err_unsafe, - .unwrap_err_code, + .optional_payload_safe, + .optional_payload_unsafe, + .optional_payload_safe_ptr, + .optional_payload_unsafe_ptr, + .err_union_payload_safe, + .err_union_payload_unsafe, + .err_union_payload_safe_ptr, + .err_union_payload_unsafe_ptr, + .err_union_code, + .err_union_code_ptr, .ensure_err_payload_void, .anyframe_type, .bitnot, @@ -495,9 +535,12 @@ pub const Inst = struct { .int, .intcast, .inttype, - .isnonnull, - .isnull, - .iserr, + .is_non_null, + .is_null, + .is_non_null_ptr, + .is_null_ptr, + .is_err, + .is_err_ptr, .mod_rem, .mul, .mulwrap, @@ -525,11 +568,16 @@ pub const Inst = struct { .typeof, .xor, .optional_type, - .unwrap_optional_safe, - .unwrap_optional_unsafe, - .unwrap_err_safe, - .unwrap_err_unsafe, - .unwrap_err_code, + .optional_payload_safe, + .optional_payload_unsafe, + .optional_payload_safe_ptr, + .optional_payload_unsafe_ptr, + .err_union_payload_safe, + .err_union_payload_unsafe, + .err_union_payload_safe_ptr, + .err_union_payload_unsafe_ptr, + .err_union_code, + .err_union_code_ptr, .ptr_type, .ensure_err_payload_void, .enum_literal, @@ -1540,14 +1588,18 @@ const DumpTzir = struct { .ret, .bitcast, .not, - .isnonnull, - .isnull, - .iserr, + .is_non_null, + .is_non_null_ptr, + .is_null, + .is_null_ptr, + .is_err, + .is_err_ptr, .ptrtoint, .floatcast, .intcast, .load, - .unwrap_optional, + .optional_payload, + .optional_payload_ptr, .wrap_optional, => { const un_op = inst.cast(ir.Inst.UnOp).?; @@ -1637,14 +1689,18 @@ const DumpTzir = struct { .ret, .bitcast, .not, - .isnonnull, - .isnull, - .iserr, + .is_non_null, + .is_null, + .is_non_null_ptr, + .is_null_ptr, + .is_err, + .is_err_ptr, .ptrtoint, .floatcast, .intcast, .load, - .unwrap_optional, + .optional_payload, + .optional_payload_ptr, .wrap_optional, => { const un_op = inst.cast(ir.Inst.UnOp).?; diff --git a/src/zir_sema.zig b/src/zir_sema.zig index 36eb5f4239..82772cac16 100644 --- a/src/zir_sema.zig +++ b/src/zir_sema.zig @@ -127,18 +127,26 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError! .cmp_gt => return analyzeInstCmp(mod, scope, old_inst.castTag(.cmp_gt).?, .gt), .cmp_neq => return analyzeInstCmp(mod, scope, old_inst.castTag(.cmp_neq).?, .neq), .condbr => return analyzeInstCondBr(mod, scope, old_inst.castTag(.condbr).?), - .isnull => return analyzeInstIsNonNull(mod, scope, old_inst.castTag(.isnull).?, true), - .isnonnull => return analyzeInstIsNonNull(mod, scope, old_inst.castTag(.isnonnull).?, false), - .iserr => return analyzeInstIsErr(mod, scope, old_inst.castTag(.iserr).?), + .is_null => return isNull(mod, scope, old_inst.castTag(.is_null).?, false), + .is_non_null => return isNull(mod, scope, old_inst.castTag(.is_non_null).?, true), + .is_null_ptr => return isNullPtr(mod, scope, old_inst.castTag(.is_null_ptr).?, false), + .is_non_null_ptr => return isNullPtr(mod, scope, old_inst.castTag(.is_non_null_ptr).?, true), + .is_err => return isErr(mod, scope, old_inst.castTag(.is_err).?), + .is_err_ptr => return isErrPtr(mod, scope, old_inst.castTag(.is_err_ptr).?), .boolnot => return analyzeInstBoolNot(mod, scope, old_inst.castTag(.boolnot).?), .typeof => return analyzeInstTypeOf(mod, scope, old_inst.castTag(.typeof).?), .typeof_peer => return analyzeInstTypeOfPeer(mod, scope, old_inst.castTag(.typeof_peer).?), .optional_type => return analyzeInstOptionalType(mod, scope, old_inst.castTag(.optional_type).?), - .unwrap_optional_safe => return analyzeInstUnwrapOptional(mod, scope, old_inst.castTag(.unwrap_optional_safe).?, true), - .unwrap_optional_unsafe => return analyzeInstUnwrapOptional(mod, scope, old_inst.castTag(.unwrap_optional_unsafe).?, false), - .unwrap_err_safe => return analyzeInstUnwrapErr(mod, scope, old_inst.castTag(.unwrap_err_safe).?, true), - .unwrap_err_unsafe => return analyzeInstUnwrapErr(mod, scope, old_inst.castTag(.unwrap_err_unsafe).?, false), - .unwrap_err_code => return analyzeInstUnwrapErrCode(mod, scope, old_inst.castTag(.unwrap_err_code).?), + .optional_payload_safe => return optionalPayload(mod, scope, old_inst.castTag(.optional_payload_safe).?, true), + .optional_payload_unsafe => return optionalPayload(mod, scope, old_inst.castTag(.optional_payload_unsafe).?, false), + .optional_payload_safe_ptr => return optionalPayloadPtr(mod, scope, old_inst.castTag(.optional_payload_safe_ptr).?, true), + .optional_payload_unsafe_ptr => return optionalPayloadPtr(mod, scope, old_inst.castTag(.optional_payload_unsafe_ptr).?, false), + .err_union_payload_safe => return errorUnionPayload(mod, scope, old_inst.castTag(.err_union_payload_safe).?, true), + .err_union_payload_unsafe => return errorUnionPayload(mod, scope, old_inst.castTag(.err_union_payload_unsafe).?, false), + .err_union_payload_safe_ptr => return errorUnionPayloadPtr(mod, scope, old_inst.castTag(.err_union_payload_safe_ptr).?, true), + .err_union_payload_unsafe_ptr => return errorUnionPayloadPtr(mod, scope, old_inst.castTag(.err_union_payload_unsafe_ptr).?, false), + .err_union_code => return errorUnionCode(mod, scope, old_inst.castTag(.err_union_code).?), + .err_union_code_ptr => return errorUnionCodePtr(mod, scope, old_inst.castTag(.err_union_code_ptr).?), .ensure_err_payload_void => return analyzeInstEnsureErrPayloadVoid(mod, scope, old_inst.castTag(.ensure_err_payload_void).?), .array_type => return analyzeInstArrayType(mod, scope, old_inst.castTag(.array_type).?), .array_type_sentinel => return analyzeInstArrayTypeSentinel(mod, scope, old_inst.castTag(.array_type_sentinel).?), @@ -1104,48 +1112,109 @@ fn analyzeInstEnumLiteral(mod: *Module, scope: *Scope, inst: *zir.Inst.EnumLiter }); } -fn analyzeInstUnwrapOptional(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp, safety_check: bool) InnerError!*Inst { +/// Pointer in, pointer out. +fn optionalPayloadPtr( + mod: *Module, + scope: *Scope, + unwrap: *zir.Inst.UnOp, + safety_check: bool, +) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - const operand = try resolveInst(mod, scope, unwrap.positionals.operand); - assert(operand.ty.zigTypeTag() == .Pointer); - const elem_type = operand.ty.elemType(); - if (elem_type.zigTypeTag() != .Optional) { - return mod.fail(scope, unwrap.base.src, "expected optional type, found {}", .{elem_type}); + const optional_ptr = try resolveInst(mod, scope, unwrap.positionals.operand); + assert(optional_ptr.ty.zigTypeTag() == .Pointer); + + const opt_type = optional_ptr.ty.elemType(); + if (opt_type.zigTypeTag() != .Optional) { + return mod.fail(scope, unwrap.base.src, "expected optional type, found {}", .{opt_type}); } - const child_type = try elem_type.optionalChildAlloc(scope.arena()); - const child_pointer = try mod.simplePtrType(scope, unwrap.base.src, child_type, operand.ty.isConstPtr(), .One); + const child_type = try opt_type.optionalChildAlloc(scope.arena()); + const child_pointer = try mod.simplePtrType(scope, unwrap.base.src, child_type, !optional_ptr.ty.isConstPtr(), .One); - if (operand.value()) |val| { + if (optional_ptr.value()) |pointer_val| { + const val = try pointer_val.pointerDeref(scope.arena()); if (val.isNull()) { return mod.fail(scope, unwrap.base.src, "unable to unwrap null", .{}); } + // The same Value represents the pointer to the optional and the payload. return mod.constInst(scope, unwrap.base.src, .{ .ty = child_pointer, + .val = pointer_val, + }); + } + + const b = try mod.requireRuntimeBlock(scope, unwrap.base.src); + if (safety_check and mod.wantSafety(scope)) { + const is_non_null = try mod.addUnOp(b, unwrap.base.src, Type.initTag(.bool), .is_non_null_ptr, optional_ptr); + try mod.addSafetyCheck(b, is_non_null, .unwrap_null); + } + return mod.addUnOp(b, unwrap.base.src, child_pointer, .optional_payload_ptr, optional_ptr); +} + +/// Value in, value out. +fn optionalPayload( + mod: *Module, + scope: *Scope, + unwrap: *zir.Inst.UnOp, + safety_check: bool, +) InnerError!*Inst { + const tracy = trace(@src()); + defer tracy.end(); + + const operand = try resolveInst(mod, scope, unwrap.positionals.operand); + const opt_type = operand.ty; + if (opt_type.zigTypeTag() != .Optional) { + return mod.fail(scope, unwrap.base.src, "expected optional type, found {}", .{opt_type}); + } + + const child_type = try opt_type.optionalChildAlloc(scope.arena()); + + if (operand.value()) |val| { + if (val.isNull()) { + return mod.fail(scope, unwrap.base.src, "unable to unwrap null", .{}); + } + return mod.constInst(scope, unwrap.base.src, .{ + .ty = child_type, .val = val, }); } const b = try mod.requireRuntimeBlock(scope, unwrap.base.src); if (safety_check and mod.wantSafety(scope)) { - const is_non_null = try mod.addUnOp(b, unwrap.base.src, Type.initTag(.bool), .isnonnull, operand); + const is_non_null = try mod.addUnOp(b, unwrap.base.src, Type.initTag(.bool), .is_non_null, operand); try mod.addSafetyCheck(b, is_non_null, .unwrap_null); } - return mod.addUnOp(b, unwrap.base.src, child_pointer, .unwrap_optional, operand); + return mod.addUnOp(b, unwrap.base.src, child_type, .optional_payload, operand); } -fn analyzeInstUnwrapErr(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp, safety_check: bool) InnerError!*Inst { +/// Value in, value out +fn errorUnionPayload(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp, safety_check: bool) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, unwrap.base.src, "TODO implement analyzeInstUnwrapErr", .{}); + return mod.fail(scope, unwrap.base.src, "TODO implement zir_sema.errorUnionPayload", .{}); } -fn analyzeInstUnwrapErrCode(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp) InnerError!*Inst { +/// Pointer in, pointer out +fn errorUnionPayloadPtr(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp, safety_check: bool) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, unwrap.base.src, "TODO implement analyzeInstUnwrapErrCode", .{}); + return mod.fail(scope, unwrap.base.src, "TODO implement zir_sema.errorUnionPayloadPtr", .{}); +} + +/// Value in, value out +fn errorUnionCode(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp) InnerError!*Inst { + const tracy = trace(@src()); + defer tracy.end(); + return mod.fail(scope, unwrap.base.src, "TODO implement zir_sema.errorUnionCode", .{}); +} + +/// Pointer in, value out +fn errorUnionCodePtr(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp) InnerError!*Inst { + const tracy = trace(@src()); + defer tracy.end(); + return mod.fail(scope, unwrap.base.src, "TODO implement zir_sema.errorUnionCodePtr", .{}); } fn analyzeInstEnsureErrPayloadVoid(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp) InnerError!*Inst { @@ -2074,20 +2143,36 @@ fn analyzeInstBoolOp(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerEr return mod.addBinOp(b, inst.base.src, bool_type, if (is_bool_or) .boolor else .booland, lhs, rhs); } -fn analyzeInstIsNonNull(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp, invert_logic: bool) InnerError!*Inst { +fn isNull(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp, invert_logic: bool) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const operand = try resolveInst(mod, scope, inst.positionals.operand); return mod.analyzeIsNull(scope, inst.base.src, operand, invert_logic); } -fn analyzeInstIsErr(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn isNullPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp, invert_logic: bool) InnerError!*Inst { + const tracy = trace(@src()); + defer tracy.end(); + const ptr = try resolveInst(mod, scope, inst.positionals.operand); + const loaded = try mod.analyzeDeref(scope, inst.base.src, ptr, ptr.src); + return mod.analyzeIsNull(scope, inst.base.src, loaded, invert_logic); +} + +fn isErr(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const operand = try resolveInst(mod, scope, inst.positionals.operand); return mod.analyzeIsErr(scope, inst.base.src, operand); } +fn isErrPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { + const tracy = trace(@src()); + defer tracy.end(); + const ptr = try resolveInst(mod, scope, inst.positionals.operand); + const loaded = try mod.analyzeDeref(scope, inst.base.src, ptr, ptr.src); + return mod.analyzeIsErr(scope, inst.base.src, loaded); +} + fn analyzeInstCondBr(mod: *Module, scope: *Scope, inst: *zir.Inst.CondBr) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); -- cgit v1.2.3 From b7452fe35f514d4c04aae4582bc8071bc9e70f1b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 20 Jan 2021 20:37:44 -0700 Subject: stage2: rework astgen result locations Motivating test case: ```zig export fn _start() noreturn { var x: u64 = 1; var y: u32 = 2; var thing: u32 = 1; const result = if (thing == 1) x else y; exit(); } ``` The main idea here is for astgen to output ideal ZIR depending on whether or not the sub-expressions of a block consume the result location. Here, neither `x` nor `y` consume the result location of the conditional expression block, and so the ZIR should communicate the result of the condbr using break instructions, not with the result location pointer. With this commit, this is accomplished: ``` %22 = alloc_inferred() %23 = block({ %24 = const(TypedValue{ .ty = type, .val = bool}) %25 = deref(%18) %26 = const(TypedValue{ .ty = comptime_int, .val = 1}) %27 = cmp_eq(%25, %26) %28 = as(%24, %27) %29 = condbr(%28, { %30 = deref(%4) < there is no longer a store instruction here > %31 = break("label_23", %30) }, { %32 = deref(%11) < there is no longer a store instruction here > %33 = break("label_23", %32) }) }) %34 = store_to_inferred_ptr(%22, %23) <-- the store is only here %35 = resolve_inferred_alloc(%22) ``` However if the result location gets consumed, the break instructions change to break_void, and the result value is communicated only by the stores, not by the break instructions. Implementation: * The GenZIR scope that conditional branches uses now has an optional result location pointer field and a count of how many times the result location ended up being an rvalue (not consumed). * When rvalue() is called on a result location for a block, it increments this counter. After generating the branches of a block, astgen for the conditional branch checks this count and if it is 2 then the store_to_block_ptr instructions are elided and it calls rvalue() using the block result (which will account for peer type resolution on the break operands). astgen has many functions disabled until they can be reworked with these new semantics. That will be done before merging the branch. There are some new rules for astgen to follow regarding result locations and what you are allowed/required to do depending on which one is passed to expr(). See the updated doc comments of ResultLoc for details. I also changed naming conventions of stuff in this commit, sorry about that. --- src/Module.zig | 9 +- src/astgen.zig | 402 ++++++++++++++++++++++++-------------- src/codegen.zig | 24 +-- src/ir.zig | 16 +- src/zir.zig | 155 +++++++-------- src/zir_sema.zig | 572 ++++++++++++++++++++++++++++--------------------------- 6 files changed, 649 insertions(+), 529 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/Module.zig b/src/Module.zig index fa9722814e..2dc84a93a9 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -697,6 +697,13 @@ pub const Scope = struct { continue_block: ?*zir.Inst.Block = null, /// only valid if label != null or (continue_block and break_block) != null break_result_loc: astgen.ResultLoc = undefined, + /// When a block has a pointer result location, here it is. + rl_ptr: ?*zir.Inst = null, + /// Keeps track of how many branches of a block did not actually + /// consume the result location. astgen uses this to figure out + /// whether to rely on break instructions or writing to the result + /// pointer for the result instruction. + rvalue_rl_count: usize = 0, pub const Label = struct { token: ast.TokenIndex, @@ -1171,7 +1178,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { !gen_scope.instructions.items[gen_scope.instructions.items.len - 1].tag.isNoReturn()) { const src = tree.token_locs[body_block.rbrace].start; - _ = try astgen.addZIRNoOp(self, &gen_scope.base, src, .returnvoid); + _ = try astgen.addZIRNoOp(self, &gen_scope.base, src, .return_void); } if (std.builtin.mode == .Debug and self.comp.verbose_ir) { diff --git a/src/astgen.zig b/src/astgen.zig index a74b83de44..617937aa82 100644 --- a/src/astgen.zig +++ b/src/astgen.zig @@ -14,25 +14,30 @@ const InnerError = Module.InnerError; pub const ResultLoc = union(enum) { /// The expression is the right-hand side of assignment to `_`. Only the side-effects of the - /// expression should be generated. + /// expression should be generated. The result instruction from the expression must + /// be ignored. discard, /// The expression has an inferred type, and it will be evaluated as an rvalue. none, /// The expression must generate a pointer rather than a value. For example, the left hand side /// of an assignment uses this kind of result location. ref, - /// The expression will be type coerced into this type, but it will be evaluated as an rvalue. + /// The expression will be coerced into this type, but it will be evaluated as an rvalue. ty: *zir.Inst, - /// The expression must store its result into this typed pointer. + /// The expression must store its result into this typed pointer. The result instruction + /// from the expression must be ignored. ptr: *zir.Inst, /// The expression must store its result into this allocation, which has an inferred type. + /// The result instruction from the expression must be ignored. inferred_ptr: *zir.Inst.Tag.alloc_inferred.Type(), /// The expression must store its result into this pointer, which is a typed pointer that /// has been bitcasted to whatever the expression's type is. + /// The result instruction from the expression must be ignored. bitcasted_ptr: *zir.Inst.UnOp, /// There is a pointer for the expression to store its result into, however, its type /// is inferred based on peer type resolution for a `zir.Inst.Block`. - block_ptr: *zir.Inst.Block, + /// The result instruction from the expression must be ignored. + block_ptr: *Module.Scope.GenZIR, }; pub fn typeExpr(mod: *Module, scope: *Scope, type_node: *ast.Node) InnerError!*zir.Inst { @@ -179,6 +184,9 @@ fn lvalExpr(mod: *Module, scope: *Scope, node: *ast.Node) InnerError!*zir.Inst { } /// Turn Zig AST into untyped ZIR istructions. +/// When `rl` is discard, ptr, inferred_ptr, bitcasted_ptr, or inferred_ptr, the +/// result instruction can be used to inspect whether it is isNoReturn() but that is it, +/// it must otherwise not be used. pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerError!*zir.Inst { switch (node.tag) { .Root => unreachable, // Top-level declaration. @@ -197,20 +205,20 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr .FieldInitializer => unreachable, // Handled explicitly. .ContainerField => unreachable, // Handled explicitly. - .Assign => return rlWrapVoid(mod, scope, rl, node, try assign(mod, scope, node.castTag(.Assign).?)), - .AssignBitAnd => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitAnd).?, .bitand)), - .AssignBitOr => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitOr).?, .bitor)), - .AssignBitShiftLeft => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitShiftLeft).?, .shl)), - .AssignBitShiftRight => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitShiftRight).?, .shr)), - .AssignBitXor => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitXor).?, .xor)), - .AssignDiv => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignDiv).?, .div)), - .AssignSub => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignSub).?, .sub)), - .AssignSubWrap => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignSubWrap).?, .subwrap)), - .AssignMod => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignMod).?, .mod_rem)), - .AssignAdd => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignAdd).?, .add)), - .AssignAddWrap => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignAddWrap).?, .addwrap)), - .AssignMul => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignMul).?, .mul)), - .AssignMulWrap => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignMulWrap).?, .mulwrap)), + .Assign => return rvalueVoid(mod, scope, rl, node, try assign(mod, scope, node.castTag(.Assign).?)), + .AssignBitAnd => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitAnd).?, .bit_and)), + .AssignBitOr => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitOr).?, .bit_or)), + .AssignBitShiftLeft => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitShiftLeft).?, .shl)), + .AssignBitShiftRight => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitShiftRight).?, .shr)), + .AssignBitXor => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitXor).?, .xor)), + .AssignDiv => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignDiv).?, .div)), + .AssignSub => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignSub).?, .sub)), + .AssignSubWrap => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignSubWrap).?, .subwrap)), + .AssignMod => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignMod).?, .mod_rem)), + .AssignAdd => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignAdd).?, .add)), + .AssignAddWrap => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignAddWrap).?, .addwrap)), + .AssignMul => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignMul).?, .mul)), + .AssignMulWrap => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignMulWrap).?, .mulwrap)), .Add => return simpleBinOp(mod, scope, rl, node.castTag(.Add).?, .add), .AddWrap => return simpleBinOp(mod, scope, rl, node.castTag(.AddWrap).?, .addwrap), @@ -220,8 +228,8 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr .MulWrap => return simpleBinOp(mod, scope, rl, node.castTag(.MulWrap).?, .mulwrap), .Div => return simpleBinOp(mod, scope, rl, node.castTag(.Div).?, .div), .Mod => return simpleBinOp(mod, scope, rl, node.castTag(.Mod).?, .mod_rem), - .BitAnd => return simpleBinOp(mod, scope, rl, node.castTag(.BitAnd).?, .bitand), - .BitOr => return simpleBinOp(mod, scope, rl, node.castTag(.BitOr).?, .bitor), + .BitAnd => return simpleBinOp(mod, scope, rl, node.castTag(.BitAnd).?, .bit_and), + .BitOr => return simpleBinOp(mod, scope, rl, node.castTag(.BitOr).?, .bit_or), .BitShiftLeft => return simpleBinOp(mod, scope, rl, node.castTag(.BitShiftLeft).?, .shl), .BitShiftRight => return simpleBinOp(mod, scope, rl, node.castTag(.BitShiftRight).?, .shr), .BitXor => return simpleBinOp(mod, scope, rl, node.castTag(.BitXor).?, .xor), @@ -239,15 +247,15 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr .BoolAnd => return boolBinOp(mod, scope, rl, node.castTag(.BoolAnd).?), .BoolOr => return boolBinOp(mod, scope, rl, node.castTag(.BoolOr).?), - .BoolNot => return rlWrap(mod, scope, rl, try boolNot(mod, scope, node.castTag(.BoolNot).?)), - .BitNot => return rlWrap(mod, scope, rl, try bitNot(mod, scope, node.castTag(.BitNot).?)), - .Negation => return rlWrap(mod, scope, rl, try negation(mod, scope, node.castTag(.Negation).?, .sub)), - .NegationWrap => return rlWrap(mod, scope, rl, try negation(mod, scope, node.castTag(.NegationWrap).?, .subwrap)), + .BoolNot => return rvalue(mod, scope, rl, try boolNot(mod, scope, node.castTag(.BoolNot).?)), + .BitNot => return rvalue(mod, scope, rl, try bitNot(mod, scope, node.castTag(.BitNot).?)), + .Negation => return rvalue(mod, scope, rl, try negation(mod, scope, node.castTag(.Negation).?, .sub)), + .NegationWrap => return rvalue(mod, scope, rl, try negation(mod, scope, node.castTag(.NegationWrap).?, .subwrap)), .Identifier => return try identifier(mod, scope, rl, node.castTag(.Identifier).?), - .Asm => return rlWrap(mod, scope, rl, try assembly(mod, scope, node.castTag(.Asm).?)), - .StringLiteral => return rlWrap(mod, scope, rl, try stringLiteral(mod, scope, node.castTag(.StringLiteral).?)), - .IntegerLiteral => return rlWrap(mod, scope, rl, try integerLiteral(mod, scope, node.castTag(.IntegerLiteral).?)), + .Asm => return rvalue(mod, scope, rl, try assembly(mod, scope, node.castTag(.Asm).?)), + .StringLiteral => return rvalue(mod, scope, rl, try stringLiteral(mod, scope, node.castTag(.StringLiteral).?)), + .IntegerLiteral => return rvalue(mod, scope, rl, try integerLiteral(mod, scope, node.castTag(.IntegerLiteral).?)), .BuiltinCall => return builtinCall(mod, scope, rl, node.castTag(.BuiltinCall).?), .Call => return callExpr(mod, scope, rl, node.castTag(.Call).?), .Unreachable => return unreach(mod, scope, node.castTag(.Unreachable).?), @@ -255,34 +263,34 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr .If => return ifExpr(mod, scope, rl, node.castTag(.If).?), .While => return whileExpr(mod, scope, rl, node.castTag(.While).?), .Period => return field(mod, scope, rl, node.castTag(.Period).?), - .Deref => return rlWrap(mod, scope, rl, try deref(mod, scope, node.castTag(.Deref).?)), - .AddressOf => return rlWrap(mod, scope, rl, try addressOf(mod, scope, node.castTag(.AddressOf).?)), - .FloatLiteral => return rlWrap(mod, scope, rl, try floatLiteral(mod, scope, node.castTag(.FloatLiteral).?)), - .UndefinedLiteral => return rlWrap(mod, scope, rl, try undefLiteral(mod, scope, node.castTag(.UndefinedLiteral).?)), - .BoolLiteral => return rlWrap(mod, scope, rl, try boolLiteral(mod, scope, node.castTag(.BoolLiteral).?)), - .NullLiteral => return rlWrap(mod, scope, rl, try nullLiteral(mod, scope, node.castTag(.NullLiteral).?)), - .OptionalType => return rlWrap(mod, scope, rl, try optionalType(mod, scope, node.castTag(.OptionalType).?)), + .Deref => return rvalue(mod, scope, rl, try deref(mod, scope, node.castTag(.Deref).?)), + .AddressOf => return rvalue(mod, scope, rl, try addressOf(mod, scope, node.castTag(.AddressOf).?)), + .FloatLiteral => return rvalue(mod, scope, rl, try floatLiteral(mod, scope, node.castTag(.FloatLiteral).?)), + .UndefinedLiteral => return rvalue(mod, scope, rl, try undefLiteral(mod, scope, node.castTag(.UndefinedLiteral).?)), + .BoolLiteral => return rvalue(mod, scope, rl, try boolLiteral(mod, scope, node.castTag(.BoolLiteral).?)), + .NullLiteral => return rvalue(mod, scope, rl, try nullLiteral(mod, scope, node.castTag(.NullLiteral).?)), + .OptionalType => return rvalue(mod, scope, rl, try optionalType(mod, scope, node.castTag(.OptionalType).?)), .UnwrapOptional => return unwrapOptional(mod, scope, rl, node.castTag(.UnwrapOptional).?), - .Block => return rlWrapVoid(mod, scope, rl, node, try blockExpr(mod, scope, node.castTag(.Block).?)), + .Block => return rvalueVoid(mod, scope, rl, node, try blockExpr(mod, scope, node.castTag(.Block).?)), .LabeledBlock => return labeledBlockExpr(mod, scope, rl, node.castTag(.LabeledBlock).?, .block), - .Break => return rlWrap(mod, scope, rl, try breakExpr(mod, scope, node.castTag(.Break).?)), - .Continue => return rlWrap(mod, scope, rl, try continueExpr(mod, scope, node.castTag(.Continue).?)), - .PtrType => return rlWrap(mod, scope, rl, try ptrType(mod, scope, node.castTag(.PtrType).?)), + .Break => return rvalue(mod, scope, rl, try breakExpr(mod, scope, node.castTag(.Break).?)), + .Continue => return rvalue(mod, scope, rl, try continueExpr(mod, scope, node.castTag(.Continue).?)), + .PtrType => return rvalue(mod, scope, rl, try ptrType(mod, scope, node.castTag(.PtrType).?)), .GroupedExpression => return expr(mod, scope, rl, node.castTag(.GroupedExpression).?.expr), - .ArrayType => return rlWrap(mod, scope, rl, try arrayType(mod, scope, node.castTag(.ArrayType).?)), - .ArrayTypeSentinel => return rlWrap(mod, scope, rl, try arrayTypeSentinel(mod, scope, node.castTag(.ArrayTypeSentinel).?)), - .EnumLiteral => return rlWrap(mod, scope, rl, try enumLiteral(mod, scope, node.castTag(.EnumLiteral).?)), - .MultilineStringLiteral => return rlWrap(mod, scope, rl, try multilineStrLiteral(mod, scope, node.castTag(.MultilineStringLiteral).?)), - .CharLiteral => return rlWrap(mod, scope, rl, try charLiteral(mod, scope, node.castTag(.CharLiteral).?)), - .SliceType => return rlWrap(mod, scope, rl, try sliceType(mod, scope, node.castTag(.SliceType).?)), - .ErrorUnion => return rlWrap(mod, scope, rl, try typeInixOp(mod, scope, node.castTag(.ErrorUnion).?, .error_union_type)), - .MergeErrorSets => return rlWrap(mod, scope, rl, try typeInixOp(mod, scope, node.castTag(.MergeErrorSets).?, .merge_error_sets)), - .AnyFrameType => return rlWrap(mod, scope, rl, try anyFrameType(mod, scope, node.castTag(.AnyFrameType).?)), - .ErrorSetDecl => return rlWrap(mod, scope, rl, try errorSetDecl(mod, scope, node.castTag(.ErrorSetDecl).?)), - .ErrorType => return rlWrap(mod, scope, rl, try errorType(mod, scope, node.castTag(.ErrorType).?)), + .ArrayType => return rvalue(mod, scope, rl, try arrayType(mod, scope, node.castTag(.ArrayType).?)), + .ArrayTypeSentinel => return rvalue(mod, scope, rl, try arrayTypeSentinel(mod, scope, node.castTag(.ArrayTypeSentinel).?)), + .EnumLiteral => return rvalue(mod, scope, rl, try enumLiteral(mod, scope, node.castTag(.EnumLiteral).?)), + .MultilineStringLiteral => return rvalue(mod, scope, rl, try multilineStrLiteral(mod, scope, node.castTag(.MultilineStringLiteral).?)), + .CharLiteral => return rvalue(mod, scope, rl, try charLiteral(mod, scope, node.castTag(.CharLiteral).?)), + .SliceType => return rvalue(mod, scope, rl, try sliceType(mod, scope, node.castTag(.SliceType).?)), + .ErrorUnion => return rvalue(mod, scope, rl, try typeInixOp(mod, scope, node.castTag(.ErrorUnion).?, .error_union_type)), + .MergeErrorSets => return rvalue(mod, scope, rl, try typeInixOp(mod, scope, node.castTag(.MergeErrorSets).?, .merge_error_sets)), + .AnyFrameType => return rvalue(mod, scope, rl, try anyFrameType(mod, scope, node.castTag(.AnyFrameType).?)), + .ErrorSetDecl => return rvalue(mod, scope, rl, try errorSetDecl(mod, scope, node.castTag(.ErrorSetDecl).?)), + .ErrorType => return rvalue(mod, scope, rl, try errorType(mod, scope, node.castTag(.ErrorType).?)), .For => return forExpr(mod, scope, rl, node.castTag(.For).?), .ArrayAccess => return arrayAccess(mod, scope, rl, node.castTag(.ArrayAccess).?), - .Slice => return rlWrap(mod, scope, rl, try sliceExpr(mod, scope, node.castTag(.Slice).?)), + .Slice => return rvalue(mod, scope, rl, try sliceExpr(mod, scope, node.castTag(.Slice).?)), .Catch => return catchExpr(mod, scope, rl, node.castTag(.Catch).?), .Comptime => return comptimeKeyword(mod, scope, rl, node.castTag(.Comptime).?), .OrElse => return orelseExpr(mod, scope, rl, node.castTag(.OrElse).?), @@ -341,6 +349,9 @@ pub fn comptimeExpr(mod: *Module, parent_scope: *Scope, rl: ResultLoc, node: *as } fn breakExpr(mod: *Module, parent_scope: *Scope, node: *ast.Node.ControlFlowExpression) InnerError!*zir.Inst { + if (true) { + @panic("TODO reimplement this"); + } const tree = parent_scope.tree(); const src = tree.token_locs[node.ltoken].start; @@ -563,8 +574,8 @@ fn blockExprStmts(mod: *Module, parent_scope: *Scope, node: *ast.Node, statement scope = try varDecl(mod, scope, var_decl_node, &block_arena.allocator); }, .Assign => try assign(mod, scope, statement.castTag(.Assign).?), - .AssignBitAnd => try assignOp(mod, scope, statement.castTag(.AssignBitAnd).?, .bitand), - .AssignBitOr => try assignOp(mod, scope, statement.castTag(.AssignBitOr).?, .bitor), + .AssignBitAnd => try assignOp(mod, scope, statement.castTag(.AssignBitAnd).?, .bit_and), + .AssignBitOr => try assignOp(mod, scope, statement.castTag(.AssignBitOr).?, .bit_or), .AssignBitShiftLeft => try assignOp(mod, scope, statement.castTag(.AssignBitShiftLeft).?, .shl), .AssignBitShiftRight => try assignOp(mod, scope, statement.castTag(.AssignBitShiftRight).?, .shr), .AssignBitXor => try assignOp(mod, scope, statement.castTag(.AssignBitXor).?, .xor), @@ -644,6 +655,7 @@ fn varDecl( // Namespace vars shadowing detection if (mod.lookupDeclName(scope, ident_name)) |_| { + // TODO add note for other definition return mod.fail(scope, name_src, "redefinition of '{s}'", .{ident_name}); } const init_node = node.getInitNode() orelse @@ -751,14 +763,14 @@ fn boolNot(mod: *Module, scope: *Scope, node: *ast.Node.SimplePrefixOp) InnerErr .val = Value.initTag(.bool_type), }); const operand = try expr(mod, scope, .{ .ty = bool_type }, node.rhs); - return addZIRUnOp(mod, scope, src, .boolnot, operand); + return addZIRUnOp(mod, scope, src, .bool_not, operand); } fn bitNot(mod: *Module, scope: *Scope, node: *ast.Node.SimplePrefixOp) InnerError!*zir.Inst { const tree = scope.tree(); const src = tree.token_locs[node.op_token].start; const operand = try expr(mod, scope, .none, node.rhs); - return addZIRUnOp(mod, scope, src, .bitnot, operand); + return addZIRUnOp(mod, scope, src, .bit_not, operand); } fn negation(mod: *Module, scope: *Scope, node: *ast.Node.SimplePrefixOp, op_inst_tag: zir.Inst.Tag) InnerError!*zir.Inst { @@ -1101,7 +1113,7 @@ fn containerDecl(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Con if (rl == .ref) { return addZIRInst(mod, scope, src, zir.Inst.DeclRef, .{ .decl = decl }, .{}); } else { - return rlWrap(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.DeclVal, .{ + return rvalue(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.DeclVal, .{ .decl = decl, }, .{})); } @@ -1200,6 +1212,9 @@ fn orelseCatchExpr( rhs: *ast.Node, payload_node: ?*ast.Node, ) InnerError!*zir.Inst { + if (true) { + @panic("TODO reimplement this"); + } const tree = scope.tree(); const src = tree.token_locs[op_token].start; @@ -1308,7 +1323,7 @@ pub fn field(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.SimpleI .field_name = field_name, }); } - return rlWrap(mod, scope, rl, try addZirInstTag(mod, scope, src, .field_val, .{ + return rvalue(mod, scope, rl, try addZirInstTag(mod, scope, src, .field_val, .{ .object = try expr(mod, scope, .none, node.lhs), .field_name = field_name, })); @@ -1338,7 +1353,7 @@ fn namedField( .field_name = try comptimeExpr(mod, scope, string_rl, params[1]), }); } - return rlWrap(mod, scope, rl, try addZirInstTag(mod, scope, src, .field_val_named, .{ + return rvalue(mod, scope, rl, try addZirInstTag(mod, scope, src, .field_val_named, .{ .object = try expr(mod, scope, .none, params[0]), .field_name = try comptimeExpr(mod, scope, string_rl, params[1]), })); @@ -1359,7 +1374,7 @@ fn arrayAccess(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Array .index = try expr(mod, scope, index_rl, node.index_expr), }); } - return rlWrap(mod, scope, rl, try addZirInstTag(mod, scope, src, .elem_val, .{ + return rvalue(mod, scope, rl, try addZirInstTag(mod, scope, src, .elem_val, .{ .array = try expr(mod, scope, .none, node.lhs), .index = try expr(mod, scope, index_rl, node.index_expr), })); @@ -1416,7 +1431,7 @@ fn simpleBinOp( const rhs = try expr(mod, scope, .none, infix_node.rhs); const result = try addZIRBinOp(mod, scope, src, op_inst_tag, lhs, rhs); - return rlWrap(mod, scope, rl, result); + return rvalue(mod, scope, rl, result); } fn boolBinOp( @@ -1498,7 +1513,7 @@ fn boolBinOp( condbr.positionals.else_body = .{ .instructions = try rhs_scope.arena.dupe(*zir.Inst, rhs_scope.instructions.items) }; } - return rlWrap(mod, scope, rl, &block.base); + return rvalue(mod, scope, rl, &block.base); } const CondKind = union(enum) { @@ -1578,6 +1593,7 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn cond_kind = .{ .err_union = null }; } } + const block_branch_count = 2; // then and else var block_scope: Scope.GenZIR = .{ .parent = scope, .decl = scope.ownerDecl().?, @@ -1600,6 +1616,33 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn .instructions = try block_scope.arena.dupe(*zir.Inst, block_scope.instructions.items), }); + // Depending on whether the result location is a pointer or value, different + // ZIR needs to be generated. In the former case we rely on storing to the + // pointer to communicate the result, and use breakvoid; in the latter case + // the block break instructions will have the result values. + // One more complication: when the result location is a pointer, we detect + // the scenario where the result location is not consumed. In this case + // we emit ZIR for the block break instructions to have the result values, + // and then rvalue() on that to pass the value to the result location. + const branch_rl: ResultLoc = switch (rl) { + .discard, .none, .ty, .ptr, .ref => rl, + + .inferred_ptr => |ptr| blk: { + block_scope.rl_ptr = &ptr.base; + break :blk .{ .block_ptr = &block_scope }; + }, + + .bitcasted_ptr => |ptr| blk: { + block_scope.rl_ptr = &ptr.base; + break :blk .{ .block_ptr = &block_scope }; + }, + + .block_ptr => |parent_block_scope| blk: { + block_scope.rl_ptr = parent_block_scope.rl_ptr.?; + break :blk .{ .block_ptr = &block_scope }; + }, + }; + const then_src = tree.token_locs[if_node.body.lastToken()].start; var then_scope: Scope.GenZIR = .{ .parent = scope, @@ -1612,25 +1655,10 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn // declare payload to the then_scope const then_sub_scope = try cond_kind.thenSubScope(mod, &then_scope, then_src, if_node.payload); - // Most result location types can be forwarded directly; however - // if we need to write to a pointer which has an inferred type, - // proper type inference requires peer type resolution on the if's - // branches. - const branch_rl: ResultLoc = switch (rl) { - .discard, .none, .ty, .ptr, .ref => rl, - .inferred_ptr, .bitcasted_ptr, .block_ptr => .{ .block_ptr = block }, - }; - const then_result = try expr(mod, then_sub_scope, branch_rl, if_node.body); - if (!then_result.tag.isNoReturn()) { - _ = try addZIRInst(mod, then_sub_scope, then_src, zir.Inst.Break, .{ - .block = block, - .operand = then_result, - }, .{}); - } - condbr.positionals.then_body = .{ - .instructions = try then_scope.arena.dupe(*zir.Inst, then_scope.instructions.items), - }; + // We hold off on the break instructions as well as copying the then/else + // instructions into place until we know whether to keep store_to_block_ptr + // instructions or not. var else_scope: Scope.GenZIR = .{ .parent = scope, @@ -1640,34 +1668,127 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn }; defer else_scope.instructions.deinit(mod.gpa); - if (if_node.@"else") |else_node| { - const else_src = tree.token_locs[else_node.body.lastToken()].start; + var else_src: usize = undefined; + var else_sub_scope: *Module.Scope = undefined; + const else_result: ?*zir.Inst = if (if_node.@"else") |else_node| blk: { + else_src = tree.token_locs[else_node.body.lastToken()].start; // declare payload to the then_scope - const else_sub_scope = try cond_kind.elseSubScope(mod, &else_scope, else_src, else_node.payload); + else_sub_scope = try cond_kind.elseSubScope(mod, &else_scope, else_src, else_node.payload); + + break :blk try expr(mod, else_sub_scope, branch_rl, else_node.body); + } else blk: { + else_src = tree.token_locs[if_node.lastToken()].start; + else_sub_scope = &else_scope.base; + block_scope.rvalue_rl_count += 1; + break :blk null; + }; - const else_result = try expr(mod, else_sub_scope, branch_rl, else_node.body); - if (!else_result.tag.isNoReturn()) { - _ = try addZIRInst(mod, else_sub_scope, else_src, zir.Inst.Break, .{ - .block = block, - .operand = else_result, - }, .{}); - } - } else { - // TODO Optimization opportunity: we can avoid an allocation and a memcpy here - // by directly allocating the body for this one instruction. - const else_src = tree.token_locs[if_node.lastToken()].start; - _ = try addZIRInst(mod, &else_scope.base, else_src, zir.Inst.BreakVoid, .{ - .block = block, - }, .{}); + // We now have enough information to decide whether the result instruction should + // be communicated via result location pointer or break instructions. + const Strategy = enum { + /// Both branches will use break_void; result location is used to communicate the + /// result instruction. + break_void, + /// Use break statements to pass the block result value, and call rvalue() at + /// the end depending on rl. Also elide the store_to_block_ptr instructions + /// depending on rl. + break_operand, + }; + var elide_store_to_block_ptr_instructions = false; + const strategy: Strategy = switch (rl) { + // In this branch there will not be any store_to_block_ptr instructions. + .discard, .none, .ty, .ref => .break_operand, + // The pointer got passed through to the sub-expressions, so we will use + // break_void here. + // In this branch there will not be any store_to_block_ptr instructions. + .ptr => .break_void, + .inferred_ptr, .bitcasted_ptr, .block_ptr => blk: { + if (block_scope.rvalue_rl_count == 2) { + // Neither prong of the if consumed the result location, so we can + // use break instructions to create an rvalue. + elide_store_to_block_ptr_instructions = true; + break :blk Strategy.break_operand; + } else { + // Allow the store_to_block_ptr instructions to remain so that + // semantic analysis can turn them into bitcasts. + break :blk Strategy.break_void; + } + }, + }; + switch (strategy) { + .break_void => { + if (!then_result.tag.isNoReturn()) { + _ = try addZIRNoOp(mod, then_sub_scope, then_src, .break_void); + } + if (else_result) |inst| { + if (!inst.tag.isNoReturn()) { + _ = try addZIRNoOp(mod, else_sub_scope, else_src, .break_void); + } + } else { + _ = try addZIRNoOp(mod, else_sub_scope, else_src, .break_void); + } + assert(!elide_store_to_block_ptr_instructions); + try copyBodyNoEliding(&condbr.positionals.then_body, then_scope); + try copyBodyNoEliding(&condbr.positionals.else_body, else_scope); + return &block.base; + }, + .break_operand => { + if (!then_result.tag.isNoReturn()) { + _ = try addZirInstTag(mod, then_sub_scope, then_src, .@"break", .{ + .block = block, + .operand = then_result, + }); + } + if (else_result) |inst| { + if (!inst.tag.isNoReturn()) { + _ = try addZirInstTag(mod, else_sub_scope, else_src, .@"break", .{ + .block = block, + .operand = inst, + }); + } + } else { + _ = try addZIRNoOp(mod, else_sub_scope, else_src, .break_void); + } + if (elide_store_to_block_ptr_instructions) { + try copyBodyWithElidedStoreBlockPtr(&condbr.positionals.then_body, then_scope); + try copyBodyWithElidedStoreBlockPtr(&condbr.positionals.else_body, else_scope); + } else { + try copyBodyNoEliding(&condbr.positionals.then_body, then_scope); + try copyBodyNoEliding(&condbr.positionals.else_body, else_scope); + } + switch (rl) { + .ref => return &block.base, + else => return rvalue(mod, scope, rl, &block.base), + } + }, } - condbr.positionals.else_body = .{ - .instructions = try else_scope.arena.dupe(*zir.Inst, else_scope.instructions.items), +} + +/// Expects to find exactly 1 .store_to_block_ptr instruction. +fn copyBodyWithElidedStoreBlockPtr(body: *zir.Body, scope: Module.Scope.GenZIR) !void { + body.* = .{ + .instructions = try scope.arena.alloc(*zir.Inst, scope.instructions.items.len - 1), }; + var dst_index: usize = 0; + for (scope.instructions.items) |src_inst| { + if (src_inst.tag != .store_to_block_ptr) { + body.instructions[dst_index] = src_inst; + dst_index += 1; + } + } + assert(dst_index == body.instructions.len); +} - return &block.base; +fn copyBodyNoEliding(body: *zir.Body, scope: Module.Scope.GenZIR) !void { + body.* = .{ + .instructions = try scope.arena.dupe(*zir.Inst, scope.instructions.items), + }; } fn whileExpr(mod: *Module, scope: *Scope, rl: ResultLoc, while_node: *ast.Node.While) InnerError!*zir.Inst { + if (true) { + @panic("TODO reimplement this"); + } var cond_kind: CondKind = .bool; if (while_node.payload) |_| cond_kind = .{ .optional = null }; if (while_node.@"else") |else_node| { @@ -1821,6 +1942,9 @@ fn forExpr( rl: ResultLoc, for_node: *ast.Node.For, ) InnerError!*zir.Inst { + if (true) { + @panic("TODO reimplement this"); + } if (for_node.label) |label| { try checkLabelRedefinition(mod, scope, label); } @@ -2017,6 +2141,9 @@ fn getRangeNode(node: *ast.Node) ?*ast.Node.SimpleInfixOp { } fn switchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, switch_node: *ast.Node.Switch) InnerError!*zir.Inst { + if (true) { + @panic("TODO reimplement this"); + } var block_scope: Scope.GenZIR = .{ .parent = scope, .decl = scope.ownerDecl().?, @@ -2186,10 +2313,10 @@ fn switchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, switch_node: *ast.Node // target >= start and target <= end const range_start_ok = try addZIRBinOp(mod, &else_scope.base, range_src, .cmp_gte, target, start); const range_end_ok = try addZIRBinOp(mod, &else_scope.base, range_src, .cmp_lte, target, end); - const range_ok = try addZIRBinOp(mod, &else_scope.base, range_src, .booland, range_start_ok, range_end_ok); + const range_ok = try addZIRBinOp(mod, &else_scope.base, range_src, .bool_and, range_start_ok, range_end_ok); if (any_ok) |some| { - any_ok = try addZIRBinOp(mod, &else_scope.base, range_src, .boolor, some, range_ok); + any_ok = try addZIRBinOp(mod, &else_scope.base, range_src, .bool_or, some, range_ok); } else { any_ok = range_ok; } @@ -2201,7 +2328,7 @@ fn switchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, switch_node: *ast.Node const cpm_ok = try addZIRBinOp(mod, &else_scope.base, item_inst.src, .cmp_eq, target, item_inst); if (any_ok) |some| { - any_ok = try addZIRBinOp(mod, &else_scope.base, item_inst.src, .boolor, some, cpm_ok); + any_ok = try addZIRBinOp(mod, &else_scope.base, item_inst.src, .bool_or, some, cpm_ok); } else { any_ok = cpm_ok; } @@ -2238,7 +2365,7 @@ fn switchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, switch_node: *ast.Node try switchCaseExpr(mod, &else_scope.base, case_rl, block, case); } else { // Not handling all possible cases is a compile error. - _ = try addZIRNoOp(mod, &else_scope.base, switch_src, .unreach_nocheck); + _ = try addZIRNoOp(mod, &else_scope.base, switch_src, .unreachable_unsafe); } // All items have been generated, add the instructions to the comptime block. @@ -2288,7 +2415,7 @@ fn ret(mod: *Module, scope: *Scope, cfe: *ast.Node.ControlFlowExpression) InnerE return addZIRUnOp(mod, scope, src, .@"return", operand); } } else { - return addZIRNoOp(mod, scope, src, .returnvoid); + return addZIRNoOp(mod, scope, src, .return_void); } } @@ -2305,7 +2432,7 @@ fn identifier(mod: *Module, scope: *Scope, rl: ResultLoc, ident: *ast.Node.OneTo if (getSimplePrimitiveValue(ident_name)) |typed_value| { const result = try addZIRInstConst(mod, scope, src, typed_value); - return rlWrap(mod, scope, rl, result); + return rvalue(mod, scope, rl, result); } if (ident_name.len >= 2) integer: { @@ -2327,7 +2454,7 @@ fn identifier(mod: *Module, scope: *Scope, rl: ResultLoc, ident: *ast.Node.OneTo 32 => if (is_signed) Value.initTag(.i32_type) else Value.initTag(.u32_type), 64 => if (is_signed) Value.initTag(.i64_type) else Value.initTag(.u64_type), else => { - return rlWrap(mod, scope, rl, try addZIRInstConst(mod, scope, src, .{ + return rvalue(mod, scope, rl, try addZIRInstConst(mod, scope, src, .{ .ty = Type.initTag(.type), .val = try Value.Tag.int_type.create(scope.arena(), .{ .signed = is_signed, @@ -2340,7 +2467,7 @@ fn identifier(mod: *Module, scope: *Scope, rl: ResultLoc, ident: *ast.Node.OneTo .ty = Type.initTag(.type), .val = val, }); - return rlWrap(mod, scope, rl, result); + return rvalue(mod, scope, rl, result); } } @@ -2351,7 +2478,7 @@ fn identifier(mod: *Module, scope: *Scope, rl: ResultLoc, ident: *ast.Node.OneTo .local_val => { const local_val = s.cast(Scope.LocalVal).?; if (mem.eql(u8, local_val.name, ident_name)) { - return rlWrap(mod, scope, rl, local_val.inst); + return rvalue(mod, scope, rl, local_val.inst); } s = local_val.parent; }, @@ -2360,7 +2487,7 @@ fn identifier(mod: *Module, scope: *Scope, rl: ResultLoc, ident: *ast.Node.OneTo if (mem.eql(u8, local_ptr.name, ident_name)) { if (rl == .ref) return local_ptr.ptr; const loaded = try addZIRUnOp(mod, scope, src, .deref, local_ptr.ptr); - return rlWrap(mod, scope, rl, loaded); + return rvalue(mod, scope, rl, loaded); } s = local_ptr.parent; }, @@ -2373,7 +2500,7 @@ fn identifier(mod: *Module, scope: *Scope, rl: ResultLoc, ident: *ast.Node.OneTo if (rl == .ref) { return addZIRInst(mod, scope, src, zir.Inst.DeclRef, .{ .decl = decl }, .{}); } else { - return rlWrap(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.DeclVal, .{ + return rvalue(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.DeclVal, .{ .decl = decl, }, .{})); } @@ -2590,7 +2717,7 @@ fn simpleCast( const dest_type = try typeExpr(mod, scope, params[0]); const rhs = try expr(mod, scope, .none, params[1]); const result = try addZIRBinOp(mod, scope, src, inst_tag, dest_type, rhs); - return rlWrap(mod, scope, rl, result); + return rvalue(mod, scope, rl, result); } fn ptrToInt(mod: *Module, scope: *Scope, call: *ast.Node.BuiltinCall) InnerError!*zir.Inst { @@ -2634,11 +2761,11 @@ fn as(mod: *Module, scope: *Scope, rl: ResultLoc, call: *ast.Node.BuiltinCall) I // TODO here we should be able to resolve the inference; we now have a type for the result. return mod.failTok(scope, call.builtin_token, "TODO implement @as with inferred-type result location pointer", .{}); }, - .block_ptr => |block_ptr| { - const casted_block_ptr = try addZIRInst(mod, scope, src, zir.Inst.CoerceResultBlockPtr, .{ + .block_ptr => |block_scope| { + const casted_block_ptr = try addZirInstTag(mod, scope, src, .coerce_result_block_ptr, .{ .dest_type = dest_type, - .block = block_ptr, - }, .{}); + .block_ptr = block_scope.rl_ptr.?, + }); return expr(mod, scope, .{ .ptr = casted_block_ptr }, params[1]); }, } @@ -2703,7 +2830,7 @@ fn compileError(mod: *Module, scope: *Scope, call: *ast.Node.BuiltinCall) InnerE const src = tree.token_locs[call.builtin_token].start; const params = call.params(); const target = try expr(mod, scope, .none, params[0]); - return addZIRUnOp(mod, scope, src, .compileerror, target); + return addZIRUnOp(mod, scope, src, .compile_error, target); } fn setEvalBranchQuota(mod: *Module, scope: *Scope, call: *ast.Node.BuiltinCall) InnerError!*zir.Inst { @@ -2728,12 +2855,12 @@ fn typeOf(mod: *Module, scope: *Scope, rl: ResultLoc, call: *ast.Node.BuiltinCal return mod.failTok(scope, call.builtin_token, "expected at least 1 argument, found 0", .{}); } if (params.len == 1) { - return rlWrap(mod, scope, rl, try addZIRUnOp(mod, scope, src, .typeof, try expr(mod, scope, .none, params[0]))); + return rvalue(mod, scope, rl, try addZIRUnOp(mod, scope, src, .typeof, try expr(mod, scope, .none, params[0]))); } var items = try arena.alloc(*zir.Inst, params.len); for (params) |param, param_i| items[param_i] = try expr(mod, scope, .none, param); - return rlWrap(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.TypeOfPeer, .{ .items = items }, .{})); + return rvalue(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.TypeOfPeer, .{ .items = items }, .{})); } fn compileLog(mod: *Module, scope: *Scope, call: *ast.Node.BuiltinCall) InnerError!*zir.Inst { const tree = scope.tree(); @@ -2756,7 +2883,7 @@ fn builtinCall(mod: *Module, scope: *Scope, rl: ResultLoc, call: *ast.Node.Built // Also, some builtins have a variable number of parameters. if (mem.eql(u8, builtin_name, "@ptrToInt")) { - return rlWrap(mod, scope, rl, try ptrToInt(mod, scope, call)); + return rvalue(mod, scope, rl, try ptrToInt(mod, scope, call)); } else if (mem.eql(u8, builtin_name, "@as")) { return as(mod, scope, rl, call); } else if (mem.eql(u8, builtin_name, "@floatCast")) { @@ -2769,9 +2896,9 @@ fn builtinCall(mod: *Module, scope: *Scope, rl: ResultLoc, call: *ast.Node.Built return typeOf(mod, scope, rl, call); } else if (mem.eql(u8, builtin_name, "@breakpoint")) { const src = tree.token_locs[call.builtin_token].start; - return rlWrap(mod, scope, rl, try addZIRNoOp(mod, scope, src, .breakpoint)); + return rvalue(mod, scope, rl, try addZIRNoOp(mod, scope, src, .breakpoint)); } else if (mem.eql(u8, builtin_name, "@import")) { - return rlWrap(mod, scope, rl, try import(mod, scope, call)); + return rvalue(mod, scope, rl, try import(mod, scope, call)); } else if (mem.eql(u8, builtin_name, "@compileError")) { return compileError(mod, scope, call); } else if (mem.eql(u8, builtin_name, "@setEvalBranchQuota")) { @@ -2806,13 +2933,13 @@ fn callExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Call) In .args = args, }, .{}); // TODO function call with result location - return rlWrap(mod, scope, rl, result); + return rvalue(mod, scope, rl, result); } fn unreach(mod: *Module, scope: *Scope, unreach_node: *ast.Node.OneToken) InnerError!*zir.Inst { const tree = scope.tree(); const src = tree.token_locs[unreach_node.token].start; - return addZIRNoOp(mod, scope, src, .@"unreachable"); + return addZIRNoOp(mod, scope, src, .unreachable_safe); } fn getSimplePrimitiveValue(name: []const u8) ?TypedValue { @@ -3099,7 +3226,7 @@ fn nodeMayNeedMemoryLocation(start_node: *ast.Node, scope: *Scope) bool { /// result locations must call this function on their result. /// As an example, if the `ResultLoc` is `ptr`, it will write the result to the pointer. /// If the `ResultLoc` is `ty`, it will coerce the result to the type. -fn rlWrap(mod: *Module, scope: *Scope, rl: ResultLoc, result: *zir.Inst) InnerError!*zir.Inst { +fn rvalue(mod: *Module, scope: *Scope, rl: ResultLoc, result: *zir.Inst) InnerError!*zir.Inst { switch (rl) { .none => return result, .discard => { @@ -3113,42 +3240,31 @@ fn rlWrap(mod: *Module, scope: *Scope, rl: ResultLoc, result: *zir.Inst) InnerEr }, .ty => |ty_inst| return addZIRBinOp(mod, scope, result.src, .as, ty_inst, result), .ptr => |ptr_inst| { - const casted_result = try addZIRInst(mod, scope, result.src, zir.Inst.CoerceToPtrElem, .{ - .ptr = ptr_inst, - .value = result, - }, .{}); - _ = try addZIRBinOp(mod, scope, result.src, .store, ptr_inst, casted_result); - return casted_result; + _ = try addZIRBinOp(mod, scope, result.src, .store, ptr_inst, result); + return result; }, .bitcasted_ptr => |bitcasted_ptr| { - return mod.fail(scope, result.src, "TODO implement rlWrap .bitcasted_ptr", .{}); + return mod.fail(scope, result.src, "TODO implement rvalue .bitcasted_ptr", .{}); }, .inferred_ptr => |alloc| { _ = try addZIRBinOp(mod, scope, result.src, .store_to_inferred_ptr, &alloc.base, result); return result; }, - .block_ptr => |block_ptr| { - return mod.fail(scope, result.src, "TODO implement rlWrap .block_ptr", .{}); + .block_ptr => |block_scope| { + block_scope.rvalue_rl_count += 1; + _ = try addZIRBinOp(mod, scope, result.src, .store_to_block_ptr, block_scope.rl_ptr.?, result); + return result; }, } } -fn rlWrapVoid(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node, result: void) InnerError!*zir.Inst { +fn rvalueVoid(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node, result: void) InnerError!*zir.Inst { const src = scope.tree().token_locs[node.firstToken()].start; const void_inst = try addZIRInstConst(mod, scope, src, .{ .ty = Type.initTag(.void), .val = Value.initTag(.void_value), }); - return rlWrap(mod, scope, rl, void_inst); -} - -/// TODO go over all the callsites and see where we can introduce "by-value" ZIR instructions -/// to save ZIR memory. For example, see DeclVal vs DeclRef. -/// Do not add additional callsites to this function. -fn rlWrapPtr(mod: *Module, scope: *Scope, rl: ResultLoc, ptr: *zir.Inst) InnerError!*zir.Inst { - if (rl == .ref) return ptr; - - return rlWrap(mod, scope, rl, try addZIRUnOp(mod, scope, ptr.src, .deref, ptr)); + return rvalue(mod, scope, rl, void_inst); } pub fn addZirInstTag( diff --git a/src/codegen.zig b/src/codegen.zig index 1ca2bb2abe..a7b067f7e1 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -840,14 +840,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .arg => return self.genArg(inst.castTag(.arg).?), .assembly => return self.genAsm(inst.castTag(.assembly).?), .bitcast => return self.genBitCast(inst.castTag(.bitcast).?), - .bitand => return self.genBitAnd(inst.castTag(.bitand).?), - .bitor => return self.genBitOr(inst.castTag(.bitor).?), + .bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), + .bit_or => return self.genBitOr(inst.castTag(.bit_or).?), .block => return self.genBlock(inst.castTag(.block).?), .br => return self.genBr(inst.castTag(.br).?), .breakpoint => return self.genBreakpoint(inst.src), .brvoid => return self.genBrVoid(inst.castTag(.brvoid).?), - .booland => return self.genBoolOp(inst.castTag(.booland).?), - .boolor => return self.genBoolOp(inst.castTag(.boolor).?), + .bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), + .bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), .call => return self.genCall(inst.castTag(.call).?), .cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), .cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte), @@ -1097,7 +1097,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (inst.base.isUnused()) return MCValue.dead; switch (arch) { - .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bitand), + .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bit_and), else => return self.fail(inst.base.src, "TODO implement bitwise and for {}", .{self.target.cpu.arch}), } } @@ -1107,7 +1107,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (inst.base.isUnused()) return MCValue.dead; switch (arch) { - .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bitor), + .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bit_or), else => return self.fail(inst.base.src, "TODO implement bitwise or for {}", .{self.target.cpu.arch}), } } @@ -1371,10 +1371,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.rsb(.al, dst_reg, dst_reg, operand).toU32()); } }, - .booland, .bitand => { + .bool_and, .bit_and => { writeInt(u32, try self.code.addManyAsArray(4), Instruction.@"and"(.al, dst_reg, dst_reg, operand).toU32()); }, - .boolor, .bitor => { + .bool_or, .bit_or => { writeInt(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, dst_reg, dst_reg, operand).toU32()); }, .not, .xor => { @@ -2464,14 +2464,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (arch) { .x86_64 => switch (inst.base.tag) { // lhs AND rhs - .booland => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs, 4, 0x20), + .bool_and => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs, 4, 0x20), // lhs OR rhs - .boolor => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs, 1, 0x08), + .bool_or => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs, 1, 0x08), else => unreachable, // Not a boolean operation }, .arm, .armeb => switch (inst.base.tag) { - .booland => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .booland), - .boolor => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .boolor), + .bool_and => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bool_and), + .bool_or => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bool_or), else => unreachable, // Not a boolean operation }, else => return self.fail(inst.base.src, "TODO implement boolean operations for {}", .{self.target.cpu.arch}), diff --git a/src/ir.zig b/src/ir.zig index 89698bdd84..b1147871f4 100644 --- a/src/ir.zig +++ b/src/ir.zig @@ -56,9 +56,9 @@ pub const Inst = struct { alloc, arg, assembly, - bitand, + bit_and, bitcast, - bitor, + bit_or, block, br, breakpoint, @@ -85,8 +85,8 @@ pub const Inst = struct { is_err, // *E!T => bool is_err_ptr, - booland, - boolor, + bool_and, + bool_or, /// Read a value from a pointer. load, loop, @@ -147,10 +147,10 @@ pub const Inst = struct { .cmp_gt, .cmp_neq, .store, - .booland, - .boolor, - .bitand, - .bitor, + .bool_and, + .bool_or, + .bit_and, + .bit_or, .xor, => BinOp, diff --git a/src/zir.zig b/src/zir.zig index 9e5830e79a..07fc64b65a 100644 --- a/src/zir.zig +++ b/src/zir.zig @@ -59,7 +59,7 @@ pub const Inst = struct { /// Inline assembly. @"asm", /// Bitwise AND. `&` - bitand, + bit_and, /// TODO delete this instruction, it has no purpose. bitcast, /// An arbitrary typed pointer is pointer-casted to a new Pointer. @@ -71,9 +71,9 @@ pub const Inst = struct { /// The new result location pointer has an inferred type. bitcast_result_ptr, /// Bitwise NOT. `~` - bitnot, + bit_not, /// Bitwise OR. `|` - bitor, + bit_or, /// A labeled block of code, which can return a value. block, /// A block of code, which can return a value. There are no instructions that break out of @@ -83,17 +83,17 @@ pub const Inst = struct { block_comptime, /// Same as `block_flat` but additionally makes the inner instructions execute at comptime. block_comptime_flat, - /// Boolean AND. See also `bitand`. - booland, - /// Boolean NOT. See also `bitnot`. - boolnot, - /// Boolean OR. See also `bitor`. - boolor, + /// Boolean AND. See also `bit_and`. + bool_and, + /// Boolean NOT. See also `bit_not`. + bool_not, + /// Boolean OR. See also `bit_or`. + bool_or, /// Return a value from a `Block`. @"break", breakpoint, /// Same as `break` but without an operand; the operand is assumed to be the void value. - breakvoid, + break_void, /// Function call. call, /// `<` @@ -116,12 +116,10 @@ pub const Inst = struct { /// result location pointer, whose type is inferred by peer type resolution on the /// `Block`'s corresponding `break` instructions. coerce_result_block_ptr, - /// Equivalent to `as(ptr_child_type(typeof(ptr)), value)`. - coerce_to_ptr_elem, /// Emit an error message and fail compilation. - compileerror, + compile_error, /// Log compile time variables and emit an error message. - compilelog, + compile_log, /// Conditional branch. Splits control flow based on a boolean condition value. condbr, /// Special case, has no textual representation. @@ -135,11 +133,11 @@ pub const Inst = struct { /// Declares the beginning of a statement. Used for debug info. dbg_stmt, /// Represents a pointer to a global decl. - declref, + decl_ref, /// Represents a pointer to a global decl by string name. - declref_str, - /// Equivalent to a declref followed by deref. - declval, + decl_ref_str, + /// Equivalent to a decl_ref followed by deref. + decl_val, /// Load the value from a pointer. deref, /// Arithmetic division. Asserts no integer overflow. @@ -185,7 +183,7 @@ pub const Inst = struct { /// can hold the same mathematical value. intcast, /// Make an integer type out of signedness and bit count. - inttype, + int_type, /// Return a boolean false if an optional is null. `x != null` is_non_null, /// Return a boolean true if an optional is null. `x == null` @@ -232,7 +230,7 @@ pub const Inst = struct { /// Sends control flow back to the function's callee. Takes an operand as the return value. @"return", /// Same as `return` but there is no operand; the operand is implicitly the void value. - returnvoid, + return_void, /// Changes the maximum number of backwards branches that compile-time /// code execution can use before giving up and making a compile error. set_eval_branch_quota, @@ -270,6 +268,10 @@ pub const Inst = struct { /// Write a value to a pointer. For loading, see `deref`. store, /// Same as `store` but the type of the value being stored will be used to infer + /// the block type. The LHS is a block instruction, whose result location is + /// being stored to. + store_to_block_ptr, + /// Same as `store` but the type of the value being stored will be used to infer /// the pointer type. store_to_inferred_ptr, /// String Literal. Makes an anonymous Decl and then takes a pointer to it. @@ -286,11 +288,11 @@ pub const Inst = struct { typeof_peer, /// Asserts control-flow will not reach this instruction. Not safety checked - the compiler /// will assume the correctness of this instruction. - unreach_nocheck, + unreachable_unsafe, /// Asserts control-flow will not reach this instruction. In safety-checked modes, /// this will generate a call to the panic function unless it can be proven unreachable /// by the compiler. - @"unreachable", + unreachable_safe, /// Bitwise XOR. `^` xor, /// Create an optional type '?T' @@ -352,17 +354,17 @@ pub const Inst = struct { .alloc_inferred_mut, .breakpoint, .dbg_stmt, - .returnvoid, + .return_void, .ret_ptr, .ret_type, - .unreach_nocheck, - .@"unreachable", + .unreachable_unsafe, + .unreachable_safe, => NoOp, .alloc, .alloc_mut, - .boolnot, - .compileerror, + .bool_not, + .compile_error, .deref, .@"return", .is_null, @@ -400,7 +402,7 @@ pub const Inst = struct { .err_union_code_ptr, .ensure_err_payload_void, .anyframe_type, - .bitnot, + .bit_not, .import, .set_eval_branch_quota, .indexable_ptr_len, @@ -411,10 +413,10 @@ pub const Inst = struct { .array_cat, .array_mul, .array_type, - .bitand, - .bitor, - .booland, - .boolor, + .bit_and, + .bit_or, + .bool_and, + .bool_or, .div, .mod_rem, .mul, @@ -422,6 +424,7 @@ pub const Inst = struct { .shl, .shr, .store, + .store_to_block_ptr, .store_to_inferred_ptr, .sub, .subwrap, @@ -452,19 +455,18 @@ pub const Inst = struct { .arg => Arg, .array_type_sentinel => ArrayTypeSentinel, .@"break" => Break, - .breakvoid => BreakVoid, + .break_void => BreakVoid, .call => Call, - .coerce_to_ptr_elem => CoerceToPtrElem, - .declref => DeclRef, - .declref_str => DeclRefStr, - .declval => DeclVal, + .decl_ref => DeclRef, + .decl_ref_str => DeclRefStr, + .decl_val => DeclVal, .coerce_result_block_ptr => CoerceResultBlockPtr, - .compilelog => CompileLog, + .compile_log => CompileLog, .loop => Loop, .@"const" => Const, .str => Str, .int => Int, - .inttype => IntType, + .int_type => IntType, .field_ptr, .field_val => Field, .field_ptr_named, .field_val_named => FieldNamed, .@"asm" => Asm, @@ -508,18 +510,18 @@ pub const Inst = struct { .arg, .as, .@"asm", - .bitand, + .bit_and, .bitcast, .bitcast_ref, .bitcast_result_ptr, - .bitor, + .bit_or, .block, .block_flat, .block_comptime, .block_comptime_flat, - .boolnot, - .booland, - .boolor, + .bool_not, + .bool_and, + .bool_or, .breakpoint, .call, .cmp_lt, @@ -530,12 +532,11 @@ pub const Inst = struct { .cmp_neq, .coerce_result_ptr, .coerce_result_block_ptr, - .coerce_to_ptr_elem, .@"const", .dbg_stmt, - .declref, - .declref_str, - .declval, + .decl_ref, + .decl_ref_str, + .decl_val, .deref, .div, .elem_ptr, @@ -552,7 +553,7 @@ pub const Inst = struct { .fntype, .int, .intcast, - .inttype, + .int_type, .is_non_null, .is_null, .is_non_null_ptr, @@ -579,6 +580,7 @@ pub const Inst = struct { .mut_slice_type, .const_slice_type, .store, + .store_to_block_ptr, .store_to_inferred_ptr, .str, .sub, @@ -602,7 +604,7 @@ pub const Inst = struct { .merge_error_sets, .anyframe_type, .error_union_type, - .bitnot, + .bit_not, .error_set, .slice, .slice_start, @@ -611,20 +613,20 @@ pub const Inst = struct { .typeof_peer, .resolve_inferred_alloc, .set_eval_branch_quota, - .compilelog, + .compile_log, .enum_type, .union_type, .struct_type, => false, .@"break", - .breakvoid, + .break_void, .condbr, - .compileerror, + .compile_error, .@"return", - .returnvoid, - .unreach_nocheck, - .@"unreachable", + .return_void, + .unreachable_unsafe, + .unreachable_safe, .loop, .switchbr, .container_field_named, @@ -717,7 +719,7 @@ pub const Inst = struct { }; pub const BreakVoid = struct { - pub const base_tag = Tag.breakvoid; + pub const base_tag = Tag.break_void; base: Inst, positionals: struct { @@ -739,19 +741,8 @@ pub const Inst = struct { }, }; - pub const CoerceToPtrElem = struct { - pub const base_tag = Tag.coerce_to_ptr_elem; - base: Inst, - - positionals: struct { - ptr: *Inst, - value: *Inst, - }, - kw_args: struct {}, - }; - pub const DeclRef = struct { - pub const base_tag = Tag.declref; + pub const base_tag = Tag.decl_ref; base: Inst, positionals: struct { @@ -761,7 +752,7 @@ pub const Inst = struct { }; pub const DeclRefStr = struct { - pub const base_tag = Tag.declref_str; + pub const base_tag = Tag.decl_ref_str; base: Inst, positionals: struct { @@ -771,7 +762,7 @@ pub const Inst = struct { }; pub const DeclVal = struct { - pub const base_tag = Tag.declval; + pub const base_tag = Tag.decl_val; base: Inst, positionals: struct { @@ -786,13 +777,13 @@ pub const Inst = struct { positionals: struct { dest_type: *Inst, - block: *Block, + block_ptr: *Inst, }, kw_args: struct {}, }; pub const CompileLog = struct { - pub const base_tag = Tag.compilelog; + pub const base_tag = Tag.compile_log; base: Inst, positionals: struct { @@ -905,7 +896,7 @@ pub const Inst = struct { }; pub const IntType = struct { - pub const base_tag = Tag.inttype; + pub const base_tag = Tag.int_type; base: Inst, positionals: struct { @@ -1641,10 +1632,10 @@ const DumpTzir = struct { .cmp_gt, .cmp_neq, .store, - .booland, - .boolor, - .bitand, - .bitor, + .bool_and, + .bool_or, + .bit_and, + .bit_or, .xor, => { const bin_op = inst.cast(ir.Inst.BinOp).?; @@ -1753,10 +1744,10 @@ const DumpTzir = struct { .cmp_gt, .cmp_neq, .store, - .booland, - .boolor, - .bitand, - .bitor, + .bool_and, + .bool_or, + .bit_and, + .bit_or, .xor, => { const bin_op = inst.cast(ir.Inst.BinOp).?; diff --git a/src/zir_sema.zig b/src/zir_sema.zig index 0caaa2a03f..ca8255df94 100644 --- a/src/zir_sema.zig +++ b/src/zir_sema.zig @@ -28,144 +28,134 @@ const Decl = Module.Decl; pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Inst { switch (old_inst.tag) { - .alloc => return analyzeInstAlloc(mod, scope, old_inst.castTag(.alloc).?), - .alloc_mut => return analyzeInstAllocMut(mod, scope, old_inst.castTag(.alloc_mut).?), - .alloc_inferred => return analyzeInstAllocInferred( - mod, - scope, - old_inst.castTag(.alloc_inferred).?, - .inferred_alloc_const, - ), - .alloc_inferred_mut => return analyzeInstAllocInferred( - mod, - scope, - old_inst.castTag(.alloc_inferred_mut).?, - .inferred_alloc_mut, - ), - .arg => return analyzeInstArg(mod, scope, old_inst.castTag(.arg).?), - .bitcast_ref => return bitCastRef(mod, scope, old_inst.castTag(.bitcast_ref).?), - .bitcast_result_ptr => return bitCastResultPtr(mod, scope, old_inst.castTag(.bitcast_result_ptr).?), - .block => return analyzeInstBlock(mod, scope, old_inst.castTag(.block).?, false), - .block_comptime => return analyzeInstBlock(mod, scope, old_inst.castTag(.block_comptime).?, true), - .block_flat => return analyzeInstBlockFlat(mod, scope, old_inst.castTag(.block_flat).?, false), - .block_comptime_flat => return analyzeInstBlockFlat(mod, scope, old_inst.castTag(.block_comptime_flat).?, true), - .@"break" => return analyzeInstBreak(mod, scope, old_inst.castTag(.@"break").?), - .breakpoint => return analyzeInstBreakpoint(mod, scope, old_inst.castTag(.breakpoint).?), - .breakvoid => return analyzeInstBreakVoid(mod, scope, old_inst.castTag(.breakvoid).?), - .call => return call(mod, scope, old_inst.castTag(.call).?), - .coerce_result_block_ptr => return analyzeInstCoerceResultBlockPtr(mod, scope, old_inst.castTag(.coerce_result_block_ptr).?), - .coerce_result_ptr => return analyzeInstCoerceResultPtr(mod, scope, old_inst.castTag(.coerce_result_ptr).?), - .coerce_to_ptr_elem => return analyzeInstCoerceToPtrElem(mod, scope, old_inst.castTag(.coerce_to_ptr_elem).?), - .compileerror => return analyzeInstCompileError(mod, scope, old_inst.castTag(.compileerror).?), - .compilelog => return analyzeInstCompileLog(mod, scope, old_inst.castTag(.compilelog).?), - .@"const" => return analyzeInstConst(mod, scope, old_inst.castTag(.@"const").?), - .dbg_stmt => return analyzeInstDbgStmt(mod, scope, old_inst.castTag(.dbg_stmt).?), - .declref => return declRef(mod, scope, old_inst.castTag(.declref).?), - .declref_str => return analyzeInstDeclRefStr(mod, scope, old_inst.castTag(.declref_str).?), - .declval => return declVal(mod, scope, old_inst.castTag(.declval).?), - .ensure_result_used => return analyzeInstEnsureResultUsed(mod, scope, old_inst.castTag(.ensure_result_used).?), - .ensure_result_non_error => return analyzeInstEnsureResultNonError(mod, scope, old_inst.castTag(.ensure_result_non_error).?), - .indexable_ptr_len => return indexablePtrLen(mod, scope, old_inst.castTag(.indexable_ptr_len).?), - .ref => return ref(mod, scope, old_inst.castTag(.ref).?), - .resolve_inferred_alloc => return analyzeInstResolveInferredAlloc(mod, scope, old_inst.castTag(.resolve_inferred_alloc).?), - .ret_ptr => return analyzeInstRetPtr(mod, scope, old_inst.castTag(.ret_ptr).?), - .ret_type => return analyzeInstRetType(mod, scope, old_inst.castTag(.ret_type).?), - .store_to_inferred_ptr => return analyzeInstStoreToInferredPtr(mod, scope, old_inst.castTag(.store_to_inferred_ptr).?), - .single_const_ptr_type => return analyzeInstSimplePtrType(mod, scope, old_inst.castTag(.single_const_ptr_type).?, false, .One), - .single_mut_ptr_type => return analyzeInstSimplePtrType(mod, scope, old_inst.castTag(.single_mut_ptr_type).?, true, .One), - .many_const_ptr_type => return analyzeInstSimplePtrType(mod, scope, old_inst.castTag(.many_const_ptr_type).?, false, .Many), - .many_mut_ptr_type => return analyzeInstSimplePtrType(mod, scope, old_inst.castTag(.many_mut_ptr_type).?, true, .Many), - .c_const_ptr_type => return analyzeInstSimplePtrType(mod, scope, old_inst.castTag(.c_const_ptr_type).?, false, .C), - .c_mut_ptr_type => return analyzeInstSimplePtrType(mod, scope, old_inst.castTag(.c_mut_ptr_type).?, true, .C), - .const_slice_type => return analyzeInstSimplePtrType(mod, scope, old_inst.castTag(.const_slice_type).?, false, .Slice), - .mut_slice_type => return analyzeInstSimplePtrType(mod, scope, old_inst.castTag(.mut_slice_type).?, true, .Slice), - .ptr_type => return analyzeInstPtrType(mod, scope, old_inst.castTag(.ptr_type).?), - .store => return analyzeInstStore(mod, scope, old_inst.castTag(.store).?), - .set_eval_branch_quota => return analyzeInstSetEvalBranchQuota(mod, scope, old_inst.castTag(.set_eval_branch_quota).?), - .str => return analyzeInstStr(mod, scope, old_inst.castTag(.str).?), - .int => return analyzeInstInt(mod, scope, old_inst.castTag(.int).?), - .inttype => return analyzeInstIntType(mod, scope, old_inst.castTag(.inttype).?), - .loop => return analyzeInstLoop(mod, scope, old_inst.castTag(.loop).?), - .param_type => return analyzeInstParamType(mod, scope, old_inst.castTag(.param_type).?), - .ptrtoint => return analyzeInstPtrToInt(mod, scope, old_inst.castTag(.ptrtoint).?), - .field_ptr => return fieldPtr(mod, scope, old_inst.castTag(.field_ptr).?), - .field_val => return fieldVal(mod, scope, old_inst.castTag(.field_val).?), - .field_ptr_named => return fieldPtrNamed(mod, scope, old_inst.castTag(.field_ptr_named).?), - .field_val_named => return fieldValNamed(mod, scope, old_inst.castTag(.field_val_named).?), - .deref => return analyzeInstDeref(mod, scope, old_inst.castTag(.deref).?), - .as => return analyzeInstAs(mod, scope, old_inst.castTag(.as).?), - .@"asm" => return analyzeInstAsm(mod, scope, old_inst.castTag(.@"asm").?), - .@"unreachable" => return analyzeInstUnreachable(mod, scope, old_inst.castTag(.@"unreachable").?, true), - .unreach_nocheck => return analyzeInstUnreachable(mod, scope, old_inst.castTag(.unreach_nocheck).?, false), - .@"return" => return analyzeInstRet(mod, scope, old_inst.castTag(.@"return").?), - .returnvoid => return analyzeInstRetVoid(mod, scope, old_inst.castTag(.returnvoid).?), - .@"fn" => return analyzeInstFn(mod, scope, old_inst.castTag(.@"fn").?), - .@"export" => return analyzeInstExport(mod, scope, old_inst.castTag(.@"export").?), - .primitive => return analyzeInstPrimitive(mod, scope, old_inst.castTag(.primitive).?), - .fntype => return analyzeInstFnType(mod, scope, old_inst.castTag(.fntype).?), - .intcast => return analyzeInstIntCast(mod, scope, old_inst.castTag(.intcast).?), - .bitcast => return analyzeInstBitCast(mod, scope, old_inst.castTag(.bitcast).?), - .floatcast => return analyzeInstFloatCast(mod, scope, old_inst.castTag(.floatcast).?), - .elem_ptr => return elemPtr(mod, scope, old_inst.castTag(.elem_ptr).?), - .elem_val => return elemVal(mod, scope, old_inst.castTag(.elem_val).?), - .add => return analyzeInstArithmetic(mod, scope, old_inst.castTag(.add).?), - .addwrap => return analyzeInstArithmetic(mod, scope, old_inst.castTag(.addwrap).?), - .sub => return analyzeInstArithmetic(mod, scope, old_inst.castTag(.sub).?), - .subwrap => return analyzeInstArithmetic(mod, scope, old_inst.castTag(.subwrap).?), - .mul => return analyzeInstArithmetic(mod, scope, old_inst.castTag(.mul).?), - .mulwrap => return analyzeInstArithmetic(mod, scope, old_inst.castTag(.mulwrap).?), - .div => return analyzeInstArithmetic(mod, scope, old_inst.castTag(.div).?), - .mod_rem => return analyzeInstArithmetic(mod, scope, old_inst.castTag(.mod_rem).?), - .array_cat => return analyzeInstArrayCat(mod, scope, old_inst.castTag(.array_cat).?), - .array_mul => return analyzeInstArrayMul(mod, scope, old_inst.castTag(.array_mul).?), - .bitand => return analyzeInstBitwise(mod, scope, old_inst.castTag(.bitand).?), - .bitnot => return analyzeInstBitNot(mod, scope, old_inst.castTag(.bitnot).?), - .bitor => return analyzeInstBitwise(mod, scope, old_inst.castTag(.bitor).?), - .xor => return analyzeInstBitwise(mod, scope, old_inst.castTag(.xor).?), - .shl => return analyzeInstShl(mod, scope, old_inst.castTag(.shl).?), - .shr => return analyzeInstShr(mod, scope, old_inst.castTag(.shr).?), - .cmp_lt => return analyzeInstCmp(mod, scope, old_inst.castTag(.cmp_lt).?, .lt), - .cmp_lte => return analyzeInstCmp(mod, scope, old_inst.castTag(.cmp_lte).?, .lte), - .cmp_eq => return analyzeInstCmp(mod, scope, old_inst.castTag(.cmp_eq).?, .eq), - .cmp_gte => return analyzeInstCmp(mod, scope, old_inst.castTag(.cmp_gte).?, .gte), - .cmp_gt => return analyzeInstCmp(mod, scope, old_inst.castTag(.cmp_gt).?, .gt), - .cmp_neq => return analyzeInstCmp(mod, scope, old_inst.castTag(.cmp_neq).?, .neq), - .condbr => return analyzeInstCondBr(mod, scope, old_inst.castTag(.condbr).?), - .is_null => return isNull(mod, scope, old_inst.castTag(.is_null).?, false), - .is_non_null => return isNull(mod, scope, old_inst.castTag(.is_non_null).?, true), - .is_null_ptr => return isNullPtr(mod, scope, old_inst.castTag(.is_null_ptr).?, false), - .is_non_null_ptr => return isNullPtr(mod, scope, old_inst.castTag(.is_non_null_ptr).?, true), - .is_err => return isErr(mod, scope, old_inst.castTag(.is_err).?), - .is_err_ptr => return isErrPtr(mod, scope, old_inst.castTag(.is_err_ptr).?), - .boolnot => return analyzeInstBoolNot(mod, scope, old_inst.castTag(.boolnot).?), - .typeof => return analyzeInstTypeOf(mod, scope, old_inst.castTag(.typeof).?), - .typeof_peer => return analyzeInstTypeOfPeer(mod, scope, old_inst.castTag(.typeof_peer).?), - .optional_type => return analyzeInstOptionalType(mod, scope, old_inst.castTag(.optional_type).?), - .optional_payload_safe => return optionalPayload(mod, scope, old_inst.castTag(.optional_payload_safe).?, true), - .optional_payload_unsafe => return optionalPayload(mod, scope, old_inst.castTag(.optional_payload_unsafe).?, false), - .optional_payload_safe_ptr => return optionalPayloadPtr(mod, scope, old_inst.castTag(.optional_payload_safe_ptr).?, true), - .optional_payload_unsafe_ptr => return optionalPayloadPtr(mod, scope, old_inst.castTag(.optional_payload_unsafe_ptr).?, false), - .err_union_payload_safe => return errorUnionPayload(mod, scope, old_inst.castTag(.err_union_payload_safe).?, true), - .err_union_payload_unsafe => return errorUnionPayload(mod, scope, old_inst.castTag(.err_union_payload_unsafe).?, false), - .err_union_payload_safe_ptr => return errorUnionPayloadPtr(mod, scope, old_inst.castTag(.err_union_payload_safe_ptr).?, true), - .err_union_payload_unsafe_ptr => return errorUnionPayloadPtr(mod, scope, old_inst.castTag(.err_union_payload_unsafe_ptr).?, false), - .err_union_code => return errorUnionCode(mod, scope, old_inst.castTag(.err_union_code).?), - .err_union_code_ptr => return errorUnionCodePtr(mod, scope, old_inst.castTag(.err_union_code_ptr).?), - .ensure_err_payload_void => return analyzeInstEnsureErrPayloadVoid(mod, scope, old_inst.castTag(.ensure_err_payload_void).?), - .array_type => return analyzeInstArrayType(mod, scope, old_inst.castTag(.array_type).?), - .array_type_sentinel => return analyzeInstArrayTypeSentinel(mod, scope, old_inst.castTag(.array_type_sentinel).?), - .enum_literal => return analyzeInstEnumLiteral(mod, scope, old_inst.castTag(.enum_literal).?), - .merge_error_sets => return analyzeInstMergeErrorSets(mod, scope, old_inst.castTag(.merge_error_sets).?), - .error_union_type => return analyzeInstErrorUnionType(mod, scope, old_inst.castTag(.error_union_type).?), - .anyframe_type => return analyzeInstAnyframeType(mod, scope, old_inst.castTag(.anyframe_type).?), - .error_set => return analyzeInstErrorSet(mod, scope, old_inst.castTag(.error_set).?), - .slice => return analyzeInstSlice(mod, scope, old_inst.castTag(.slice).?), - .slice_start => return analyzeInstSliceStart(mod, scope, old_inst.castTag(.slice_start).?), - .import => return analyzeInstImport(mod, scope, old_inst.castTag(.import).?), - .switchbr => return analyzeInstSwitchBr(mod, scope, old_inst.castTag(.switchbr).?), - .switch_range => return analyzeInstSwitchRange(mod, scope, old_inst.castTag(.switch_range).?), - .booland => return analyzeInstBoolOp(mod, scope, old_inst.castTag(.booland).?), - .boolor => return analyzeInstBoolOp(mod, scope, old_inst.castTag(.boolor).?), + .alloc => return zirAlloc(mod, scope, old_inst.castTag(.alloc).?), + .alloc_mut => return zirAllocMut(mod, scope, old_inst.castTag(.alloc_mut).?), + .alloc_inferred => return zirAllocInferred(mod, scope, old_inst.castTag(.alloc_inferred).?, .inferred_alloc_const), + .alloc_inferred_mut => return zirAllocInferred(mod, scope, old_inst.castTag(.alloc_inferred_mut).?, .inferred_alloc_mut), + .arg => return zirArg(mod, scope, old_inst.castTag(.arg).?), + .bitcast_ref => return zirBitcastRef(mod, scope, old_inst.castTag(.bitcast_ref).?), + .bitcast_result_ptr => return zirBitcastResultPtr(mod, scope, old_inst.castTag(.bitcast_result_ptr).?), + .block => return zirBlock(mod, scope, old_inst.castTag(.block).?, false), + .block_comptime => return zirBlock(mod, scope, old_inst.castTag(.block_comptime).?, true), + .block_flat => return zirBlockFlat(mod, scope, old_inst.castTag(.block_flat).?, false), + .block_comptime_flat => return zirBlockFlat(mod, scope, old_inst.castTag(.block_comptime_flat).?, true), + .@"break" => return zirBreak(mod, scope, old_inst.castTag(.@"break").?), + .breakpoint => return zirBreakpoint(mod, scope, old_inst.castTag(.breakpoint).?), + .break_void => return zirBreakVoid(mod, scope, old_inst.castTag(.break_void).?), + .call => return zirCall(mod, scope, old_inst.castTag(.call).?), + .coerce_result_block_ptr => return zirCoerceResultBlockPtr(mod, scope, old_inst.castTag(.coerce_result_block_ptr).?), + .coerce_result_ptr => return zirCoerceResultPtr(mod, scope, old_inst.castTag(.coerce_result_ptr).?), + .compile_error => return zirCompileError(mod, scope, old_inst.castTag(.compile_error).?), + .compile_log => return zirCompileLog(mod, scope, old_inst.castTag(.compile_log).?), + .@"const" => return zirConst(mod, scope, old_inst.castTag(.@"const").?), + .dbg_stmt => return zirDbgStmt(mod, scope, old_inst.castTag(.dbg_stmt).?), + .decl_ref => return zirDeclRef(mod, scope, old_inst.castTag(.decl_ref).?), + .decl_ref_str => return zirDeclRefStr(mod, scope, old_inst.castTag(.decl_ref_str).?), + .decl_val => return zirDeclVal(mod, scope, old_inst.castTag(.decl_val).?), + .ensure_result_used => return zirEnsureResultUsed(mod, scope, old_inst.castTag(.ensure_result_used).?), + .ensure_result_non_error => return zirEnsureResultNonError(mod, scope, old_inst.castTag(.ensure_result_non_error).?), + .indexable_ptr_len => return zirIndexablePtrLen(mod, scope, old_inst.castTag(.indexable_ptr_len).?), + .ref => return zirRef(mod, scope, old_inst.castTag(.ref).?), + .resolve_inferred_alloc => return zirResolveInferredAlloc(mod, scope, old_inst.castTag(.resolve_inferred_alloc).?), + .ret_ptr => return zirRetPtr(mod, scope, old_inst.castTag(.ret_ptr).?), + .ret_type => return zirRetType(mod, scope, old_inst.castTag(.ret_type).?), + .store_to_block_ptr => return zirStoreToBlockPtr(mod, scope, old_inst.castTag(.store_to_block_ptr).?), + .store_to_inferred_ptr => return zirStoreToInferredPtr(mod, scope, old_inst.castTag(.store_to_inferred_ptr).?), + .single_const_ptr_type => return zirSimplePtrType(mod, scope, old_inst.castTag(.single_const_ptr_type).?, false, .One), + .single_mut_ptr_type => return zirSimplePtrType(mod, scope, old_inst.castTag(.single_mut_ptr_type).?, true, .One), + .many_const_ptr_type => return zirSimplePtrType(mod, scope, old_inst.castTag(.many_const_ptr_type).?, false, .Many), + .many_mut_ptr_type => return zirSimplePtrType(mod, scope, old_inst.castTag(.many_mut_ptr_type).?, true, .Many), + .c_const_ptr_type => return zirSimplePtrType(mod, scope, old_inst.castTag(.c_const_ptr_type).?, false, .C), + .c_mut_ptr_type => return zirSimplePtrType(mod, scope, old_inst.castTag(.c_mut_ptr_type).?, true, .C), + .const_slice_type => return zirSimplePtrType(mod, scope, old_inst.castTag(.const_slice_type).?, false, .Slice), + .mut_slice_type => return zirSimplePtrType(mod, scope, old_inst.castTag(.mut_slice_type).?, true, .Slice), + .ptr_type => return zirPtrType(mod, scope, old_inst.castTag(.ptr_type).?), + .store => return zirStore(mod, scope, old_inst.castTag(.store).?), + .set_eval_branch_quota => return zirSetEvalBranchQuota(mod, scope, old_inst.castTag(.set_eval_branch_quota).?), + .str => return zirStr(mod, scope, old_inst.castTag(.str).?), + .int => return zirInt(mod, scope, old_inst.castTag(.int).?), + .int_type => return zirIntType(mod, scope, old_inst.castTag(.int_type).?), + .loop => return zirLoop(mod, scope, old_inst.castTag(.loop).?), + .param_type => return zirParamType(mod, scope, old_inst.castTag(.param_type).?), + .ptrtoint => return zirPtrtoint(mod, scope, old_inst.castTag(.ptrtoint).?), + .field_ptr => return zirFieldPtr(mod, scope, old_inst.castTag(.field_ptr).?), + .field_val => return zirFieldVal(mod, scope, old_inst.castTag(.field_val).?), + .field_ptr_named => return zirFieldPtrNamed(mod, scope, old_inst.castTag(.field_ptr_named).?), + .field_val_named => return zirFieldValNamed(mod, scope, old_inst.castTag(.field_val_named).?), + .deref => return zirDeref(mod, scope, old_inst.castTag(.deref).?), + .as => return zirAs(mod, scope, old_inst.castTag(.as).?), + .@"asm" => return zirAsm(mod, scope, old_inst.castTag(.@"asm").?), + .unreachable_safe => return zirUnreachable(mod, scope, old_inst.castTag(.unreachable_safe).?, true), + .unreachable_unsafe => return zirUnreachable(mod, scope, old_inst.castTag(.unreachable_unsafe).?, false), + .@"return" => return zirReturn(mod, scope, old_inst.castTag(.@"return").?), + .return_void => return zirReturnVoid(mod, scope, old_inst.castTag(.return_void).?), + .@"fn" => return zirFn(mod, scope, old_inst.castTag(.@"fn").?), + .@"export" => return zirExport(mod, scope, old_inst.castTag(.@"export").?), + .primitive => return zirPrimitive(mod, scope, old_inst.castTag(.primitive).?), + .fntype => return zirFnType(mod, scope, old_inst.castTag(.fntype).?), + .intcast => return zirIntcast(mod, scope, old_inst.castTag(.intcast).?), + .bitcast => return zirBitcast(mod, scope, old_inst.castTag(.bitcast).?), + .floatcast => return zirFloatcast(mod, scope, old_inst.castTag(.floatcast).?), + .elem_ptr => return zirElemPtr(mod, scope, old_inst.castTag(.elem_ptr).?), + .elem_val => return zirElemVal(mod, scope, old_inst.castTag(.elem_val).?), + .add => return zirArithmetic(mod, scope, old_inst.castTag(.add).?), + .addwrap => return zirArithmetic(mod, scope, old_inst.castTag(.addwrap).?), + .sub => return zirArithmetic(mod, scope, old_inst.castTag(.sub).?), + .subwrap => return zirArithmetic(mod, scope, old_inst.castTag(.subwrap).?), + .mul => return zirArithmetic(mod, scope, old_inst.castTag(.mul).?), + .mulwrap => return zirArithmetic(mod, scope, old_inst.castTag(.mulwrap).?), + .div => return zirArithmetic(mod, scope, old_inst.castTag(.div).?), + .mod_rem => return zirArithmetic(mod, scope, old_inst.castTag(.mod_rem).?), + .array_cat => return zirArrayCat(mod, scope, old_inst.castTag(.array_cat).?), + .array_mul => return zirArrayMul(mod, scope, old_inst.castTag(.array_mul).?), + .bit_and => return zirBitwise(mod, scope, old_inst.castTag(.bit_and).?), + .bit_not => return zirBitNot(mod, scope, old_inst.castTag(.bit_not).?), + .bit_or => return zirBitwise(mod, scope, old_inst.castTag(.bit_or).?), + .xor => return zirBitwise(mod, scope, old_inst.castTag(.xor).?), + .shl => return zirShl(mod, scope, old_inst.castTag(.shl).?), + .shr => return zirShr(mod, scope, old_inst.castTag(.shr).?), + .cmp_lt => return zirCmp(mod, scope, old_inst.castTag(.cmp_lt).?, .lt), + .cmp_lte => return zirCmp(mod, scope, old_inst.castTag(.cmp_lte).?, .lte), + .cmp_eq => return zirCmp(mod, scope, old_inst.castTag(.cmp_eq).?, .eq), + .cmp_gte => return zirCmp(mod, scope, old_inst.castTag(.cmp_gte).?, .gte), + .cmp_gt => return zirCmp(mod, scope, old_inst.castTag(.cmp_gt).?, .gt), + .cmp_neq => return zirCmp(mod, scope, old_inst.castTag(.cmp_neq).?, .neq), + .condbr => return zirCondbr(mod, scope, old_inst.castTag(.condbr).?), + .is_null => return zirIsNull(mod, scope, old_inst.castTag(.is_null).?, false), + .is_non_null => return zirIsNull(mod, scope, old_inst.castTag(.is_non_null).?, true), + .is_null_ptr => return zirIsNullPtr(mod, scope, old_inst.castTag(.is_null_ptr).?, false), + .is_non_null_ptr => return zirIsNullPtr(mod, scope, old_inst.castTag(.is_non_null_ptr).?, true), + .is_err => return zirIsErr(mod, scope, old_inst.castTag(.is_err).?), + .is_err_ptr => return zirIsErrPtr(mod, scope, old_inst.castTag(.is_err_ptr).?), + .bool_not => return zirBoolNot(mod, scope, old_inst.castTag(.bool_not).?), + .typeof => return zirTypeof(mod, scope, old_inst.castTag(.typeof).?), + .typeof_peer => return zirTypeofPeer(mod, scope, old_inst.castTag(.typeof_peer).?), + .optional_type => return zirOptionalType(mod, scope, old_inst.castTag(.optional_type).?), + .optional_payload_safe => return zirOptionalPayload(mod, scope, old_inst.castTag(.optional_payload_safe).?, true), + .optional_payload_unsafe => return zirOptionalPayload(mod, scope, old_inst.castTag(.optional_payload_unsafe).?, false), + .optional_payload_safe_ptr => return zirOptionalPayloadPtr(mod, scope, old_inst.castTag(.optional_payload_safe_ptr).?, true), + .optional_payload_unsafe_ptr => return zirOptionalPayloadPtr(mod, scope, old_inst.castTag(.optional_payload_unsafe_ptr).?, false), + .err_union_payload_safe => return zirErrUnionPayload(mod, scope, old_inst.castTag(.err_union_payload_safe).?, true), + .err_union_payload_unsafe => return zirErrUnionPayload(mod, scope, old_inst.castTag(.err_union_payload_unsafe).?, false), + .err_union_payload_safe_ptr => return zirErrUnionPayloadPtr(mod, scope, old_inst.castTag(.err_union_payload_safe_ptr).?, true), + .err_union_payload_unsafe_ptr => return zirErrUnionPayloadPtr(mod, scope, old_inst.castTag(.err_union_payload_unsafe_ptr).?, false), + .err_union_code => return zirErrUnionCode(mod, scope, old_inst.castTag(.err_union_code).?), + .err_union_code_ptr => return zirErrUnionCodePtr(mod, scope, old_inst.castTag(.err_union_code_ptr).?), + .ensure_err_payload_void => return zirEnsureErrPayloadVoid(mod, scope, old_inst.castTag(.ensure_err_payload_void).?), + .array_type => return zirArrayType(mod, scope, old_inst.castTag(.array_type).?), + .array_type_sentinel => return zirArrayTypeSentinel(mod, scope, old_inst.castTag(.array_type_sentinel).?), + .enum_literal => return zirEnumLiteral(mod, scope, old_inst.castTag(.enum_literal).?), + .merge_error_sets => return zirMergeErrorSets(mod, scope, old_inst.castTag(.merge_error_sets).?), + .error_union_type => return zirErrorUnionType(mod, scope, old_inst.castTag(.error_union_type).?), + .anyframe_type => return zirAnyframeType(mod, scope, old_inst.castTag(.anyframe_type).?), + .error_set => return zirErrorSet(mod, scope, old_inst.castTag(.error_set).?), + .slice => return zirSlice(mod, scope, old_inst.castTag(.slice).?), + .slice_start => return zirSliceStart(mod, scope, old_inst.castTag(.slice_start).?), + .import => return zirImport(mod, scope, old_inst.castTag(.import).?), + .switchbr => return zirSwitchbr(mod, scope, old_inst.castTag(.switchbr).?), + .switch_range => return zirSwitchRange(mod, scope, old_inst.castTag(.switch_range).?), + .bool_and => return zirBoolOp(mod, scope, old_inst.castTag(.bool_and).?), + .bool_or => return zirBoolOp(mod, scope, old_inst.castTag(.bool_or).?), .container_field_named, .container_field_typed, @@ -258,7 +248,7 @@ pub fn resolveInstConst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerE }; } -fn analyzeInstConst(mod: *Module, scope: *Scope, const_inst: *zir.Inst.Const) InnerError!*Inst { +fn zirConst(mod: *Module, scope: *Scope, const_inst: *zir.Inst.Const) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); // Move the TypedValue from old memory to new memory. This allows freeing the ZIR instructions @@ -275,44 +265,35 @@ fn analyzeConstInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError }; } -fn analyzeInstCoerceResultBlockPtr( +fn zirCoerceResultBlockPtr( mod: *Module, scope: *Scope, inst: *zir.Inst.CoerceResultBlockPtr, ) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, inst.base.src, "TODO implement analyzeInstCoerceResultBlockPtr", .{}); + return mod.fail(scope, inst.base.src, "TODO implement zirCoerceResultBlockPtr", .{}); } -fn bitCastRef(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn zirBitcastRef(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, inst.base.src, "TODO implement zir_sema.bitCastRef", .{}); + return mod.fail(scope, inst.base.src, "TODO implement zir_sema.zirBitcastRef", .{}); } -fn bitCastResultPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn zirBitcastResultPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, inst.base.src, "TODO implement zir_sema.bitCastResultPtr", .{}); + return mod.fail(scope, inst.base.src, "TODO implement zir_sema.zirBitcastResultPtr", .{}); } -fn analyzeInstCoerceResultPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { +fn zirCoerceResultPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, inst.base.src, "TODO implement analyzeInstCoerceResultPtr", .{}); + return mod.fail(scope, inst.base.src, "TODO implement zirCoerceResultPtr", .{}); } -/// Equivalent to `as(ptr_child_type(typeof(ptr)), value)`. -fn analyzeInstCoerceToPtrElem(mod: *Module, scope: *Scope, inst: *zir.Inst.CoerceToPtrElem) InnerError!*Inst { - const tracy = trace(@src()); - defer tracy.end(); - const ptr = try resolveInst(mod, scope, inst.positionals.ptr); - const operand = try resolveInst(mod, scope, inst.positionals.value); - return mod.coerce(scope, ptr.ty.elemType(), operand); -} - -fn analyzeInstRetPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!*Inst { +fn zirRetPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const b = try mod.requireFunctionBlock(scope, inst.base.src); @@ -322,7 +303,7 @@ fn analyzeInstRetPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerErr return mod.addNoOp(b, inst.base.src, ptr_type, .alloc); } -fn ref(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn zirRef(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); @@ -330,7 +311,7 @@ fn ref(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { return mod.analyzeRef(scope, inst.base.src, operand); } -fn analyzeInstRetType(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!*Inst { +fn zirRetType(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const b = try mod.requireFunctionBlock(scope, inst.base.src); @@ -339,7 +320,7 @@ fn analyzeInstRetType(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerEr return mod.constType(scope, inst.base.src, ret_type); } -fn analyzeInstEnsureResultUsed(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn zirEnsureResultUsed(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const operand = try resolveInst(mod, scope, inst.positionals.operand); @@ -349,7 +330,7 @@ fn analyzeInstEnsureResultUsed(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp } } -fn analyzeInstEnsureResultNonError(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn zirEnsureResultNonError(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const operand = try resolveInst(mod, scope, inst.positionals.operand); @@ -359,7 +340,7 @@ fn analyzeInstEnsureResultNonError(mod: *Module, scope: *Scope, inst: *zir.Inst. } } -fn indexablePtrLen(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn zirIndexablePtrLen(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); @@ -389,7 +370,7 @@ fn indexablePtrLen(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError return mod.analyzeDeref(scope, inst.base.src, result_ptr, result_ptr.src); } -fn analyzeInstAlloc(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn zirAlloc(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const var_type = try resolveType(mod, scope, inst.positionals.operand); @@ -398,7 +379,7 @@ fn analyzeInstAlloc(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerErro return mod.addNoOp(b, inst.base.src, ptr_type, .alloc); } -fn analyzeInstAllocMut(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn zirAllocMut(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const var_type = try resolveType(mod, scope, inst.positionals.operand); @@ -408,7 +389,7 @@ fn analyzeInstAllocMut(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerE return mod.addNoOp(b, inst.base.src, ptr_type, .alloc); } -fn analyzeInstAllocInferred( +fn zirAllocInferred( mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp, @@ -437,7 +418,7 @@ fn analyzeInstAllocInferred( return result; } -fn analyzeInstResolveInferredAlloc( +fn zirResolveInferredAlloc( mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp, @@ -466,28 +447,44 @@ fn analyzeInstResolveInferredAlloc( return mod.constVoid(scope, inst.base.src); } -fn analyzeInstStoreToInferredPtr( +fn zirStoreToBlockPtr( + mod: *Module, + scope: *Scope, + inst: *zir.Inst.BinOp, +) InnerError!*Inst { + const tracy = trace(@src()); + defer tracy.end(); + + const ptr = try resolveInst(mod, scope, inst.positionals.lhs); + const value = try resolveInst(mod, scope, inst.positionals.rhs); + const ptr_ty = try mod.simplePtrType(scope, inst.base.src, value.ty, true, .One); + const b = try mod.requireRuntimeBlock(scope, inst.base.src); + const bitcasted_ptr = try mod.addUnOp(b, inst.base.src, ptr_ty, .bitcast, ptr); + return mod.storePtr(scope, inst.base.src, bitcasted_ptr, value); +} + +fn zirStoreToInferredPtr( mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp, ) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); + const ptr = try resolveInst(mod, scope, inst.positionals.lhs); const value = try resolveInst(mod, scope, inst.positionals.rhs); const inferred_alloc = ptr.castTag(.constant).?.val.castTag(.inferred_alloc).?; // Add the stored instruction to the set we will use to resolve peer types // for the inferred allocation. try inferred_alloc.data.stored_inst_list.append(scope.arena(), value); - // Create a new alloc with exactly the type the pointer wants. - // Later it gets cleaned up by aliasing the alloc we are supposed to be storing to. + // Create a runtime bitcast instruction with exactly the type the pointer wants. const ptr_ty = try mod.simplePtrType(scope, inst.base.src, value.ty, true, .One); const b = try mod.requireRuntimeBlock(scope, inst.base.src); const bitcasted_ptr = try mod.addUnOp(b, inst.base.src, ptr_ty, .bitcast, ptr); return mod.storePtr(scope, inst.base.src, bitcasted_ptr, value); } -fn analyzeInstSetEvalBranchQuota( +fn zirSetEvalBranchQuota( mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp, @@ -499,15 +496,16 @@ fn analyzeInstSetEvalBranchQuota( return mod.constVoid(scope, inst.base.src); } -fn analyzeInstStore(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { +fn zirStore(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); + const ptr = try resolveInst(mod, scope, inst.positionals.lhs); const value = try resolveInst(mod, scope, inst.positionals.rhs); return mod.storePtr(scope, inst.base.src, ptr, value); } -fn analyzeInstParamType(mod: *Module, scope: *Scope, inst: *zir.Inst.ParamType) InnerError!*Inst { +fn zirParamType(mod: *Module, scope: *Scope, inst: *zir.Inst.ParamType) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const fn_inst = try resolveInst(mod, scope, inst.positionals.func); @@ -516,7 +514,7 @@ fn analyzeInstParamType(mod: *Module, scope: *Scope, inst: *zir.Inst.ParamType) const fn_ty: Type = switch (fn_inst.ty.zigTypeTag()) { .Fn => fn_inst.ty, .BoundFn => { - return mod.fail(scope, fn_inst.src, "TODO implement analyzeInstParamType for method call syntax", .{}); + return mod.fail(scope, fn_inst.src, "TODO implement zirParamType for method call syntax", .{}); }, else => { return mod.fail(scope, fn_inst.src, "expected function, found '{}'", .{fn_inst.ty}); @@ -538,7 +536,7 @@ fn analyzeInstParamType(mod: *Module, scope: *Scope, inst: *zir.Inst.ParamType) return mod.constType(scope, inst.base.src, param_type); } -fn analyzeInstStr(mod: *Module, scope: *Scope, str_inst: *zir.Inst.Str) InnerError!*Inst { +fn zirStr(mod: *Module, scope: *Scope, str_inst: *zir.Inst.Str) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); // The bytes references memory inside the ZIR module, which can get deallocated @@ -557,14 +555,14 @@ fn analyzeInstStr(mod: *Module, scope: *Scope, str_inst: *zir.Inst.Str) InnerErr return mod.analyzeDeclRef(scope, str_inst.base.src, new_decl); } -fn analyzeInstInt(mod: *Module, scope: *Scope, inst: *zir.Inst.Int) InnerError!*Inst { +fn zirInt(mod: *Module, scope: *Scope, inst: *zir.Inst.Int) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); return mod.constIntBig(scope, inst.base.src, Type.initTag(.comptime_int), inst.positionals.int); } -fn analyzeInstExport(mod: *Module, scope: *Scope, export_inst: *zir.Inst.Export) InnerError!*Inst { +fn zirExport(mod: *Module, scope: *Scope, export_inst: *zir.Inst.Export) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const symbol_name = try resolveConstString(mod, scope, export_inst.positionals.symbol_name); @@ -574,14 +572,14 @@ fn analyzeInstExport(mod: *Module, scope: *Scope, export_inst: *zir.Inst.Export) return mod.constVoid(scope, export_inst.base.src); } -fn analyzeInstCompileError(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn zirCompileError(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const msg = try resolveConstString(mod, scope, inst.positionals.operand); return mod.fail(scope, inst.base.src, "{s}", .{msg}); } -fn analyzeInstCompileLog(mod: *Module, scope: *Scope, inst: *zir.Inst.CompileLog) InnerError!*Inst { +fn zirCompileLog(mod: *Module, scope: *Scope, inst: *zir.Inst.CompileLog) InnerError!*Inst { var managed = mod.compile_log_text.toManaged(mod.gpa); defer mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); @@ -608,7 +606,7 @@ fn analyzeInstCompileLog(mod: *Module, scope: *Scope, inst: *zir.Inst.CompileLog return mod.constVoid(scope, inst.base.src); } -fn analyzeInstArg(mod: *Module, scope: *Scope, inst: *zir.Inst.Arg) InnerError!*Inst { +fn zirArg(mod: *Module, scope: *Scope, inst: *zir.Inst.Arg) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const b = try mod.requireFunctionBlock(scope, inst.base.src); @@ -631,7 +629,7 @@ fn analyzeInstArg(mod: *Module, scope: *Scope, inst: *zir.Inst.Arg) InnerError!* return mod.addArg(b, inst.base.src, param_type, name); } -fn analyzeInstLoop(mod: *Module, scope: *Scope, inst: *zir.Inst.Loop) InnerError!*Inst { +fn zirLoop(mod: *Module, scope: *Scope, inst: *zir.Inst.Loop) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const parent_block = scope.cast(Scope.Block).?; @@ -672,7 +670,7 @@ fn analyzeInstLoop(mod: *Module, scope: *Scope, inst: *zir.Inst.Loop) InnerError return &loop_inst.base; } -fn analyzeInstBlockFlat(mod: *Module, scope: *Scope, inst: *zir.Inst.Block, is_comptime: bool) InnerError!*Inst { +fn zirBlockFlat(mod: *Module, scope: *Scope, inst: *zir.Inst.Block, is_comptime: bool) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const parent_block = scope.cast(Scope.Block).?; @@ -704,9 +702,15 @@ fn analyzeInstBlockFlat(mod: *Module, scope: *Scope, inst: *zir.Inst.Block, is_c return resolveInst(mod, scope, last_zir_inst); } -fn analyzeInstBlock(mod: *Module, scope: *Scope, inst: *zir.Inst.Block, is_comptime: bool) InnerError!*Inst { +fn zirBlock( + mod: *Module, + scope: *Scope, + inst: *zir.Inst.Block, + is_comptime: bool, +) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); + const parent_block = scope.cast(Scope.Block).?; // Reserve space for a Block instruction so that generated Break instructions can @@ -798,30 +802,52 @@ fn analyzeBlockBody( return &merges.block_inst.base; } -fn analyzeInstBreakpoint(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!*Inst { +fn zirBreakpoint(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const b = try mod.requireRuntimeBlock(scope, inst.base.src); return mod.addNoOp(b, inst.base.src, Type.initTag(.void), .breakpoint); } -fn analyzeInstBreak(mod: *Module, scope: *Scope, inst: *zir.Inst.Break) InnerError!*Inst { +fn zirBreak(mod: *Module, scope: *Scope, inst: *zir.Inst.Break) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); + const operand = try resolveInst(mod, scope, inst.positionals.operand); const block = inst.positionals.block; return analyzeBreak(mod, scope, inst.base.src, block, operand); } -fn analyzeInstBreakVoid(mod: *Module, scope: *Scope, inst: *zir.Inst.BreakVoid) InnerError!*Inst { +fn zirBreakVoid(mod: *Module, scope: *Scope, inst: *zir.Inst.BreakVoid) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); + const block = inst.positionals.block; const void_inst = try mod.constVoid(scope, inst.base.src); return analyzeBreak(mod, scope, inst.base.src, block, void_inst); } -fn analyzeInstDbgStmt(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!*Inst { +fn analyzeBreak( + mod: *Module, + scope: *Scope, + src: usize, + zir_block: *zir.Inst.Block, + operand: *Inst, +) InnerError!*Inst { + var opt_block = scope.cast(Scope.Block); + while (opt_block) |block| { + if (block.label) |*label| { + if (label.zir_block == zir_block) { + try label.merges.results.append(mod.gpa, operand); + const b = try mod.requireFunctionBlock(scope, src); + return mod.addBr(b, src, label.merges.block_inst, operand); + } + } + opt_block = block.parent; + } else unreachable; +} + +fn zirDbgStmt(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); if (scope.cast(Scope.Block)) |b| { @@ -832,26 +858,26 @@ fn analyzeInstDbgStmt(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerEr return mod.constVoid(scope, inst.base.src); } -fn analyzeInstDeclRefStr(mod: *Module, scope: *Scope, inst: *zir.Inst.DeclRefStr) InnerError!*Inst { +fn zirDeclRefStr(mod: *Module, scope: *Scope, inst: *zir.Inst.DeclRefStr) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const decl_name = try resolveConstString(mod, scope, inst.positionals.name); return mod.analyzeDeclRefByName(scope, inst.base.src, decl_name); } -fn declRef(mod: *Module, scope: *Scope, inst: *zir.Inst.DeclRef) InnerError!*Inst { +fn zirDeclRef(mod: *Module, scope: *Scope, inst: *zir.Inst.DeclRef) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); return mod.analyzeDeclRef(scope, inst.base.src, inst.positionals.decl); } -fn declVal(mod: *Module, scope: *Scope, inst: *zir.Inst.DeclVal) InnerError!*Inst { +fn zirDeclVal(mod: *Module, scope: *Scope, inst: *zir.Inst.DeclVal) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); return mod.analyzeDeclVal(scope, inst.base.src, inst.positionals.decl); } -fn call(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError!*Inst { +fn zirCall(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); @@ -1002,7 +1028,7 @@ fn call(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError!*Inst { return mod.addCall(b, inst.base.src, ret_type, func, casted_args); } -fn analyzeInstFn(mod: *Module, scope: *Scope, fn_inst: *zir.Inst.Fn) InnerError!*Inst { +fn zirFn(mod: *Module, scope: *Scope, fn_inst: *zir.Inst.Fn) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const fn_type = try resolveType(mod, scope, fn_inst.positionals.fn_type); @@ -1019,13 +1045,13 @@ fn analyzeInstFn(mod: *Module, scope: *Scope, fn_inst: *zir.Inst.Fn) InnerError! }); } -fn analyzeInstIntType(mod: *Module, scope: *Scope, inttype: *zir.Inst.IntType) InnerError!*Inst { +fn zirIntType(mod: *Module, scope: *Scope, inttype: *zir.Inst.IntType) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); return mod.fail(scope, inttype.base.src, "TODO implement inttype", .{}); } -fn analyzeInstOptionalType(mod: *Module, scope: *Scope, optional: *zir.Inst.UnOp) InnerError!*Inst { +fn zirOptionalType(mod: *Module, scope: *Scope, optional: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const child_type = try resolveType(mod, scope, optional.positionals.operand); @@ -1033,7 +1059,7 @@ fn analyzeInstOptionalType(mod: *Module, scope: *Scope, optional: *zir.Inst.UnOp return mod.constType(scope, optional.base.src, try mod.optionalType(scope, child_type)); } -fn analyzeInstArrayType(mod: *Module, scope: *Scope, array: *zir.Inst.BinOp) InnerError!*Inst { +fn zirArrayType(mod: *Module, scope: *Scope, array: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); // TODO these should be lazily evaluated @@ -1043,7 +1069,7 @@ fn analyzeInstArrayType(mod: *Module, scope: *Scope, array: *zir.Inst.BinOp) Inn return mod.constType(scope, array.base.src, try mod.arrayType(scope, len.val.toUnsignedInt(), null, elem_type)); } -fn analyzeInstArrayTypeSentinel(mod: *Module, scope: *Scope, array: *zir.Inst.ArrayTypeSentinel) InnerError!*Inst { +fn zirArrayTypeSentinel(mod: *Module, scope: *Scope, array: *zir.Inst.ArrayTypeSentinel) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); // TODO these should be lazily evaluated @@ -1054,7 +1080,7 @@ fn analyzeInstArrayTypeSentinel(mod: *Module, scope: *Scope, array: *zir.Inst.Ar return mod.constType(scope, array.base.src, try mod.arrayType(scope, len.val.toUnsignedInt(), sentinel.val, elem_type)); } -fn analyzeInstErrorUnionType(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { +fn zirErrorUnionType(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const error_union = try resolveType(mod, scope, inst.positionals.lhs); @@ -1067,7 +1093,7 @@ fn analyzeInstErrorUnionType(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) return mod.constType(scope, inst.base.src, try mod.errorUnionType(scope, error_union, payload)); } -fn analyzeInstAnyframeType(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn zirAnyframeType(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const return_type = try resolveType(mod, scope, inst.positionals.operand); @@ -1075,7 +1101,7 @@ fn analyzeInstAnyframeType(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) In return mod.constType(scope, inst.base.src, try mod.anyframeType(scope, return_type)); } -fn analyzeInstErrorSet(mod: *Module, scope: *Scope, inst: *zir.Inst.ErrorSet) InnerError!*Inst { +fn zirErrorSet(mod: *Module, scope: *Scope, inst: *zir.Inst.ErrorSet) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); // The declarations arena will store the hashmap. @@ -1107,13 +1133,13 @@ fn analyzeInstErrorSet(mod: *Module, scope: *Scope, inst: *zir.Inst.ErrorSet) In return mod.analyzeDeclVal(scope, inst.base.src, new_decl); } -fn analyzeInstMergeErrorSets(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { +fn zirMergeErrorSets(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); return mod.fail(scope, inst.base.src, "TODO implement merge_error_sets", .{}); } -fn analyzeInstEnumLiteral(mod: *Module, scope: *Scope, inst: *zir.Inst.EnumLiteral) InnerError!*Inst { +fn zirEnumLiteral(mod: *Module, scope: *Scope, inst: *zir.Inst.EnumLiteral) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const duped_name = try scope.arena().dupe(u8, inst.positionals.name); @@ -1124,7 +1150,7 @@ fn analyzeInstEnumLiteral(mod: *Module, scope: *Scope, inst: *zir.Inst.EnumLiter } /// Pointer in, pointer out. -fn optionalPayloadPtr( +fn zirOptionalPayloadPtr( mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp, @@ -1165,7 +1191,7 @@ fn optionalPayloadPtr( } /// Value in, value out. -fn optionalPayload( +fn zirOptionalPayload( mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp, @@ -1201,40 +1227,40 @@ fn optionalPayload( } /// Value in, value out -fn errorUnionPayload(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp, safety_check: bool) InnerError!*Inst { +fn zirErrUnionPayload(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp, safety_check: bool) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, unwrap.base.src, "TODO implement zir_sema.errorUnionPayload", .{}); + return mod.fail(scope, unwrap.base.src, "TODO implement zir_sema.zirErrUnionPayload", .{}); } /// Pointer in, pointer out -fn errorUnionPayloadPtr(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp, safety_check: bool) InnerError!*Inst { +fn zirErrUnionPayloadPtr(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp, safety_check: bool) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, unwrap.base.src, "TODO implement zir_sema.errorUnionPayloadPtr", .{}); + return mod.fail(scope, unwrap.base.src, "TODO implement zir_sema.zirErrUnionPayloadPtr", .{}); } /// Value in, value out -fn errorUnionCode(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp) InnerError!*Inst { +fn zirErrUnionCode(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, unwrap.base.src, "TODO implement zir_sema.errorUnionCode", .{}); + return mod.fail(scope, unwrap.base.src, "TODO implement zir_sema.zirErrUnionCode", .{}); } /// Pointer in, value out -fn errorUnionCodePtr(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp) InnerError!*Inst { +fn zirErrUnionCodePtr(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, unwrap.base.src, "TODO implement zir_sema.errorUnionCodePtr", .{}); + return mod.fail(scope, unwrap.base.src, "TODO implement zir_sema.zirErrUnionCodePtr", .{}); } -fn analyzeInstEnsureErrPayloadVoid(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp) InnerError!*Inst { +fn zirEnsureErrPayloadVoid(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, unwrap.base.src, "TODO implement analyzeInstEnsureErrPayloadVoid", .{}); + return mod.fail(scope, unwrap.base.src, "TODO implement zirEnsureErrPayloadVoid", .{}); } -fn analyzeInstFnType(mod: *Module, scope: *Scope, fntype: *zir.Inst.FnType) InnerError!*Inst { +fn zirFnType(mod: *Module, scope: *Scope, fntype: *zir.Inst.FnType) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const return_type = try resolveType(mod, scope, fntype.positionals.return_type); @@ -1277,13 +1303,13 @@ fn analyzeInstFnType(mod: *Module, scope: *Scope, fntype: *zir.Inst.FnType) Inne return mod.constType(scope, fntype.base.src, fn_ty); } -fn analyzeInstPrimitive(mod: *Module, scope: *Scope, primitive: *zir.Inst.Primitive) InnerError!*Inst { +fn zirPrimitive(mod: *Module, scope: *Scope, primitive: *zir.Inst.Primitive) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); return mod.constInst(scope, primitive.base.src, primitive.positionals.tag.toTypedValue()); } -fn analyzeInstAs(mod: *Module, scope: *Scope, as: *zir.Inst.BinOp) InnerError!*Inst { +fn zirAs(mod: *Module, scope: *Scope, as: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const dest_type = try resolveType(mod, scope, as.positionals.lhs); @@ -1291,7 +1317,7 @@ fn analyzeInstAs(mod: *Module, scope: *Scope, as: *zir.Inst.BinOp) InnerError!*I return mod.coerce(scope, dest_type, new_inst); } -fn analyzeInstPtrToInt(mod: *Module, scope: *Scope, ptrtoint: *zir.Inst.UnOp) InnerError!*Inst { +fn zirPtrtoint(mod: *Module, scope: *Scope, ptrtoint: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const ptr = try resolveInst(mod, scope, ptrtoint.positionals.operand); @@ -1304,7 +1330,7 @@ fn analyzeInstPtrToInt(mod: *Module, scope: *Scope, ptrtoint: *zir.Inst.UnOp) In return mod.addUnOp(b, ptrtoint.base.src, ty, .ptrtoint, ptr); } -fn fieldVal(mod: *Module, scope: *Scope, inst: *zir.Inst.Field) InnerError!*Inst { +fn zirFieldVal(mod: *Module, scope: *Scope, inst: *zir.Inst.Field) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); @@ -1315,7 +1341,7 @@ fn fieldVal(mod: *Module, scope: *Scope, inst: *zir.Inst.Field) InnerError!*Inst return mod.analyzeDeref(scope, inst.base.src, result_ptr, result_ptr.src); } -fn fieldPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.Field) InnerError!*Inst { +fn zirFieldPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.Field) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); @@ -1324,7 +1350,7 @@ fn fieldPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.Field) InnerError!*Inst return mod.namedFieldPtr(scope, inst.base.src, object_ptr, field_name, inst.base.src); } -fn fieldValNamed(mod: *Module, scope: *Scope, inst: *zir.Inst.FieldNamed) InnerError!*Inst { +fn zirFieldValNamed(mod: *Module, scope: *Scope, inst: *zir.Inst.FieldNamed) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); @@ -1336,7 +1362,7 @@ fn fieldValNamed(mod: *Module, scope: *Scope, inst: *zir.Inst.FieldNamed) InnerE return mod.analyzeDeref(scope, inst.base.src, result_ptr, result_ptr.src); } -fn fieldPtrNamed(mod: *Module, scope: *Scope, inst: *zir.Inst.FieldNamed) InnerError!*Inst { +fn zirFieldPtrNamed(mod: *Module, scope: *Scope, inst: *zir.Inst.FieldNamed) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); @@ -1346,7 +1372,7 @@ fn fieldPtrNamed(mod: *Module, scope: *Scope, inst: *zir.Inst.FieldNamed) InnerE return mod.namedFieldPtr(scope, inst.base.src, object_ptr, field_name, fsrc); } -fn analyzeInstIntCast(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { +fn zirIntcast(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const dest_type = try resolveType(mod, scope, inst.positionals.lhs); @@ -1384,7 +1410,7 @@ fn analyzeInstIntCast(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerE return mod.fail(scope, inst.base.src, "TODO implement analyze widen or shorten int", .{}); } -fn analyzeInstBitCast(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { +fn zirBitcast(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const dest_type = try resolveType(mod, scope, inst.positionals.lhs); @@ -1392,7 +1418,7 @@ fn analyzeInstBitCast(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerE return mod.bitcast(scope, dest_type, operand); } -fn analyzeInstFloatCast(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { +fn zirFloatcast(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const dest_type = try resolveType(mod, scope, inst.positionals.lhs); @@ -1430,7 +1456,7 @@ fn analyzeInstFloatCast(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) Inne return mod.fail(scope, inst.base.src, "TODO implement analyze widen or shorten float", .{}); } -fn elemVal(mod: *Module, scope: *Scope, inst: *zir.Inst.Elem) InnerError!*Inst { +fn zirElemVal(mod: *Module, scope: *Scope, inst: *zir.Inst.Elem) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); @@ -1441,7 +1467,7 @@ fn elemVal(mod: *Module, scope: *Scope, inst: *zir.Inst.Elem) InnerError!*Inst { return mod.analyzeDeref(scope, inst.base.src, result_ptr, result_ptr.src); } -fn elemPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.Elem) InnerError!*Inst { +fn zirElemPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.Elem) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); @@ -1450,7 +1476,7 @@ fn elemPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.Elem) InnerError!*Inst { return mod.elemPtr(scope, inst.base.src, array_ptr, elem_index); } -fn analyzeInstSlice(mod: *Module, scope: *Scope, inst: *zir.Inst.Slice) InnerError!*Inst { +fn zirSlice(mod: *Module, scope: *Scope, inst: *zir.Inst.Slice) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const array_ptr = try resolveInst(mod, scope, inst.positionals.array_ptr); @@ -1461,7 +1487,7 @@ fn analyzeInstSlice(mod: *Module, scope: *Scope, inst: *zir.Inst.Slice) InnerErr return mod.analyzeSlice(scope, inst.base.src, array_ptr, start, end, sentinel); } -fn analyzeInstSliceStart(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { +fn zirSliceStart(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const array_ptr = try resolveInst(mod, scope, inst.positionals.lhs); @@ -1470,7 +1496,7 @@ fn analyzeInstSliceStart(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) Inn return mod.analyzeSlice(scope, inst.base.src, array_ptr, start, null, null); } -fn analyzeInstSwitchRange(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { +fn zirSwitchRange(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const start = try resolveInst(mod, scope, inst.positionals.lhs); @@ -1494,7 +1520,7 @@ fn analyzeInstSwitchRange(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) In return mod.constVoid(scope, inst.base.src); } -fn analyzeInstSwitchBr(mod: *Module, scope: *Scope, inst: *zir.Inst.SwitchBr) InnerError!*Inst { +fn zirSwitchbr(mod: *Module, scope: *Scope, inst: *zir.Inst.SwitchBr) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const target_ptr = try resolveInst(mod, scope, inst.positionals.target_ptr); @@ -1698,7 +1724,7 @@ fn validateSwitch(mod: *Module, scope: *Scope, target: *Inst, inst: *zir.Inst.Sw } } -fn analyzeInstImport(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn zirImport(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const operand = try resolveConstString(mod, scope, inst.positionals.operand); @@ -1718,19 +1744,19 @@ fn analyzeInstImport(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerErr return mod.constType(scope, inst.base.src, file_scope.root_container.ty); } -fn analyzeInstShl(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { +fn zirShl(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, inst.base.src, "TODO implement analyzeInstShl", .{}); + return mod.fail(scope, inst.base.src, "TODO implement zirShl", .{}); } -fn analyzeInstShr(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { +fn zirShr(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, inst.base.src, "TODO implement analyzeInstShr", .{}); + return mod.fail(scope, inst.base.src, "TODO implement zirShr", .{}); } -fn analyzeInstBitwise(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { +fn zirBitwise(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); @@ -1784,8 +1810,8 @@ fn analyzeInstBitwise(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerE const b = try mod.requireRuntimeBlock(scope, inst.base.src); const ir_tag = switch (inst.base.tag) { - .bitand => Inst.Tag.bitand, - .bitor => Inst.Tag.bitor, + .bit_and => Inst.Tag.bit_and, + .bit_or => Inst.Tag.bit_or, .xor => Inst.Tag.xor, else => unreachable, }; @@ -1793,25 +1819,25 @@ fn analyzeInstBitwise(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerE return mod.addBinOp(b, inst.base.src, scalar_type, ir_tag, casted_lhs, casted_rhs); } -fn analyzeInstBitNot(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn zirBitNot(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, inst.base.src, "TODO implement analyzeInstBitNot", .{}); + return mod.fail(scope, inst.base.src, "TODO implement zirBitNot", .{}); } -fn analyzeInstArrayCat(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { +fn zirArrayCat(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, inst.base.src, "TODO implement analyzeInstArrayCat", .{}); + return mod.fail(scope, inst.base.src, "TODO implement zirArrayCat", .{}); } -fn analyzeInstArrayMul(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { +fn zirArrayMul(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, inst.base.src, "TODO implement analyzeInstArrayMul", .{}); + return mod.fail(scope, inst.base.src, "TODO implement zirArrayMul", .{}); } -fn analyzeInstArithmetic(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { +fn zirArithmetic(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); @@ -1912,14 +1938,14 @@ fn analyzeInstComptimeOp(mod: *Module, scope: *Scope, res_type: Type, inst: *zir }); } -fn analyzeInstDeref(mod: *Module, scope: *Scope, deref: *zir.Inst.UnOp) InnerError!*Inst { +fn zirDeref(mod: *Module, scope: *Scope, deref: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const ptr = try resolveInst(mod, scope, deref.positionals.operand); return mod.analyzeDeref(scope, deref.base.src, ptr, deref.positionals.operand.src); } -fn analyzeInstAsm(mod: *Module, scope: *Scope, assembly: *zir.Inst.Asm) InnerError!*Inst { +fn zirAsm(mod: *Module, scope: *Scope, assembly: *zir.Inst.Asm) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const return_type = try resolveType(mod, scope, assembly.positionals.return_type); @@ -1960,7 +1986,7 @@ fn analyzeInstAsm(mod: *Module, scope: *Scope, assembly: *zir.Inst.Asm) InnerErr return &inst.base; } -fn analyzeInstCmp( +fn zirCmp( mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp, @@ -2018,14 +2044,14 @@ fn analyzeInstCmp( return mod.fail(scope, inst.base.src, "TODO implement more cmp analysis", .{}); } -fn analyzeInstTypeOf(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn zirTypeof(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const operand = try resolveInst(mod, scope, inst.positionals.operand); return mod.constType(scope, inst.base.src, operand.ty); } -fn analyzeInstTypeOfPeer(mod: *Module, scope: *Scope, inst: *zir.Inst.TypeOfPeer) InnerError!*Inst { +fn zirTypeofPeer(mod: *Module, scope: *Scope, inst: *zir.Inst.TypeOfPeer) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); var insts_to_res = try mod.gpa.alloc(*ir.Inst, inst.positionals.items.len); @@ -2037,7 +2063,7 @@ fn analyzeInstTypeOfPeer(mod: *Module, scope: *Scope, inst: *zir.Inst.TypeOfPeer return mod.constType(scope, inst.base.src, pt_res); } -fn analyzeInstBoolNot(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn zirBoolNot(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const uncasted_operand = try resolveInst(mod, scope, inst.positionals.operand); @@ -2050,7 +2076,7 @@ fn analyzeInstBoolNot(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerEr return mod.addUnOp(b, inst.base.src, bool_type, .not, operand); } -fn analyzeInstBoolOp(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { +fn zirBoolOp(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const bool_type = Type.initTag(.bool); @@ -2059,7 +2085,7 @@ fn analyzeInstBoolOp(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerEr const uncasted_rhs = try resolveInst(mod, scope, inst.positionals.rhs); const rhs = try mod.coerce(scope, bool_type, uncasted_rhs); - const is_bool_or = inst.base.tag == .boolor; + const is_bool_or = inst.base.tag == .bool_or; if (lhs.value()) |lhs_val| { if (rhs.value()) |rhs_val| { @@ -2071,17 +2097,17 @@ fn analyzeInstBoolOp(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerEr } } const b = try mod.requireRuntimeBlock(scope, inst.base.src); - return mod.addBinOp(b, inst.base.src, bool_type, if (is_bool_or) .boolor else .booland, lhs, rhs); + return mod.addBinOp(b, inst.base.src, bool_type, if (is_bool_or) .bool_or else .bool_and, lhs, rhs); } -fn isNull(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp, invert_logic: bool) InnerError!*Inst { +fn zirIsNull(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp, invert_logic: bool) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const operand = try resolveInst(mod, scope, inst.positionals.operand); return mod.analyzeIsNull(scope, inst.base.src, operand, invert_logic); } -fn isNullPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp, invert_logic: bool) InnerError!*Inst { +fn zirIsNullPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp, invert_logic: bool) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const ptr = try resolveInst(mod, scope, inst.positionals.operand); @@ -2089,14 +2115,14 @@ fn isNullPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp, invert_logic: bo return mod.analyzeIsNull(scope, inst.base.src, loaded, invert_logic); } -fn isErr(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn zirIsErr(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const operand = try resolveInst(mod, scope, inst.positionals.operand); return mod.analyzeIsErr(scope, inst.base.src, operand); } -fn isErrPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn zirIsErrPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const ptr = try resolveInst(mod, scope, inst.positionals.operand); @@ -2104,7 +2130,7 @@ fn isErrPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst return mod.analyzeIsErr(scope, inst.base.src, loaded); } -fn analyzeInstCondBr(mod: *Module, scope: *Scope, inst: *zir.Inst.CondBr) InnerError!*Inst { +fn zirCondbr(mod: *Module, scope: *Scope, inst: *zir.Inst.CondBr) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const uncasted_cond = try resolveInst(mod, scope, inst.positionals.condition); @@ -2153,7 +2179,7 @@ fn analyzeInstCondBr(mod: *Module, scope: *Scope, inst: *zir.Inst.CondBr) InnerE return mod.addCondBr(parent_block, inst.base.src, cond, then_body, else_body); } -fn analyzeInstUnreachable( +fn zirUnreachable( mod: *Module, scope: *Scope, unreach: *zir.Inst.NoOp, @@ -2170,7 +2196,7 @@ fn analyzeInstUnreachable( } } -fn analyzeInstRet(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { +fn zirReturn(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const operand = try resolveInst(mod, scope, inst.positionals.operand); @@ -2185,7 +2211,7 @@ fn analyzeInstRet(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError! return mod.addUnOp(b, inst.base.src, Type.initTag(.noreturn), .ret, operand); } -fn analyzeInstRetVoid(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!*Inst { +fn zirReturnVoid(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const b = try mod.requireFunctionBlock(scope, inst.base.src); @@ -2216,27 +2242,7 @@ fn floatOpAllowed(tag: zir.Inst.Tag) bool { }; } -fn analyzeBreak( - mod: *Module, - scope: *Scope, - src: usize, - zir_block: *zir.Inst.Block, - operand: *Inst, -) InnerError!*Inst { - var opt_block = scope.cast(Scope.Block); - while (opt_block) |block| { - if (block.label) |*label| { - if (label.zir_block == zir_block) { - try label.merges.results.append(mod.gpa, operand); - const b = try mod.requireFunctionBlock(scope, src); - return mod.addBr(b, src, label.merges.block_inst, operand); - } - } - opt_block = block.parent; - } else unreachable; -} - -fn analyzeInstSimplePtrType(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp, mutable: bool, size: std.builtin.TypeInfo.Pointer.Size) InnerError!*Inst { +fn zirSimplePtrType(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp, mutable: bool, size: std.builtin.TypeInfo.Pointer.Size) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const elem_type = try resolveType(mod, scope, inst.positionals.operand); @@ -2244,7 +2250,7 @@ fn analyzeInstSimplePtrType(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp, m return mod.constType(scope, inst.base.src, ty); } -fn analyzeInstPtrType(mod: *Module, scope: *Scope, inst: *zir.Inst.PtrType) InnerError!*Inst { +fn zirPtrType(mod: *Module, scope: *Scope, inst: *zir.Inst.PtrType) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); // TODO lazy values -- cgit v1.2.3 From 588171c30b34426fbb07645aa2625e989f369eec Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 22 Jan 2021 16:45:09 -0700 Subject: sema: after block gets peer type resolved, insert type coercions on the break instruction operands. This involves a new TZIR instruction, br_block_flat, which represents a break instruction where the operand is the result of a flat block. See the doc comments on the instructions for more details. How it works: when adding break instructions in semantic analysis, the underlying allocation is slightly padded so that it is the size of a br_block_flat instruction, which allows the break instruction to later be converted without removing instructions inside the parent body. The extra type coercion instructions go into the body of the br_block_flat, and backends are responsible for dispatching the instruction correctly (it should map to the same function calls for related instructions). --- src/Module.zig | 28 +++++++++++++++-- src/codegen.zig | 31 ++++++++++++------- src/ir.zig | 32 +++++++++++++++++++- src/zir.zig | 28 +++++++++++++++-- src/zir_sema.zig | 91 ++++++++++++++++++++++++++++++++++++++++++-------------- 5 files changed, 171 insertions(+), 39 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/Module.zig b/src/Module.zig index 2dc84a93a9..b7967aacc5 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -671,14 +671,36 @@ pub const Scope = struct { }; pub const Merges = struct { - results: ArrayListUnmanaged(*Inst), block_inst: *Inst.Block, + /// Separate array list from break_inst_list so that it can be passed directly + /// to resolvePeerTypes. + results: ArrayListUnmanaged(*Inst), + /// Keeps track of the break instructions so that the operand can be replaced + /// if we need to add type coercion at the end of block analysis. + /// Same indexes, capacity, length as `results`. + br_list: ArrayListUnmanaged(*Inst.Br), }; /// For debugging purposes. pub fn dump(self: *Block, mod: Module) void { zir.dumpBlock(mod, self); } + + pub fn makeSubBlock(parent: *Block) Block { + return .{ + .parent = parent, + .inst_table = parent.inst_table, + .func = parent.func, + .owner_decl = parent.owner_decl, + .src_decl = parent.src_decl, + .instructions = .{}, + .arena = parent.arena, + .label = null, + .inlining = parent.inlining, + .is_comptime = parent.is_comptime, + .branch_quota = parent.branch_quota, + }; + } }; /// This is a temporary structure, references to it are valid only @@ -2107,7 +2129,7 @@ pub fn addBr( src: usize, target_block: *Inst.Block, operand: *Inst, -) !*Inst { +) !*Inst.Br { const inst = try scope_block.arena.create(Inst.Br); inst.* = .{ .base = .{ @@ -2119,7 +2141,7 @@ pub fn addBr( .block = target_block, }; try scope_block.instructions.append(self.gpa, &inst.base); - return &inst.base; + return inst; } pub fn addCondBr( diff --git a/src/codegen.zig b/src/codegen.zig index a7b067f7e1..1f5aad8ab8 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -844,6 +844,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .bit_or => return self.genBitOr(inst.castTag(.bit_or).?), .block => return self.genBlock(inst.castTag(.block).?), .br => return self.genBr(inst.castTag(.br).?), + .br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), .breakpoint => return self.genBreakpoint(inst.src), .brvoid => return self.genBrVoid(inst.castTag(.brvoid).?), .bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), @@ -2441,17 +2442,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } + fn genBrBlockFlat(self: *Self, parent_inst: *ir.Inst.BrBlockFlat) !MCValue { + try self.genBody(parent_inst.body); + const last = parent_inst.body.instructions[parent_inst.body.instructions.len - 1]; + return self.br(parent_inst.base.src, parent_inst.block, last); + } + fn genBr(self: *Self, inst: *ir.Inst.Br) !MCValue { - if (inst.operand.ty.hasCodeGenBits()) { - const operand = try self.resolveInst(inst.operand); - const block_mcv = @bitCast(MCValue, inst.block.codegen.mcv); - if (block_mcv == .none) { - inst.block.codegen.mcv = @bitCast(AnyMCValue, operand); - } else { - try self.setRegOrMem(inst.base.src, inst.block.base.ty, block_mcv, operand); - } - } - return self.brVoid(inst.base.src, inst.block); + return self.br(inst.base.src, inst.block, inst.operand); } fn genBrVoid(self: *Self, inst: *ir.Inst.BrVoid) !MCValue { @@ -2478,6 +2476,19 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } + fn br(self: *Self, src: usize, block: *ir.Inst.Block, operand: *ir.Inst) !MCValue { + if (operand.ty.hasCodeGenBits()) { + const operand_mcv = try self.resolveInst(operand); + const block_mcv = @bitCast(MCValue, block.codegen.mcv); + if (block_mcv == .none) { + block.codegen.mcv = @bitCast(AnyMCValue, operand_mcv); + } else { + try self.setRegOrMem(src, block.base.ty, block_mcv, operand_mcv); + } + } + return self.brVoid(src, block); + } + fn brVoid(self: *Self, src: usize, block: *ir.Inst.Block) !MCValue { // Emit a jump with a relocation. It will be patched up after the block ends. try block.codegen.relocs.ensureCapacity(self.gpa, block.codegen.relocs.items.len + 1); diff --git a/src/ir.zig b/src/ir.zig index b1147871f4..4d421dda4c 100644 --- a/src/ir.zig +++ b/src/ir.zig @@ -61,6 +61,13 @@ pub const Inst = struct { bit_or, block, br, + /// Same as `br` except the operand is a list of instructions to be treated as + /// a flat block; that is there is only 1 break instruction from the block, and + /// it is implied to be after the last instruction, and the last instruction is + /// the break operand. + /// This instruction exists for late-stage semantic analysis patch ups, to + /// replace one br operand with multiple instructions, without moving anything else around. + br_block_flat, breakpoint, brvoid, call, @@ -158,6 +165,7 @@ pub const Inst = struct { .assembly => Assembly, .block => Block, .br => Br, + .br_block_flat => BrBlockFlat, .brvoid => BrVoid, .call => Call, .condbr => CondBr, @@ -252,6 +260,7 @@ pub const Inst = struct { return switch (base.tag) { .br => base.castTag(.br).?.block, .brvoid => base.castTag(.brvoid).?.block, + .br_block_flat => base.castTag(.br_block_flat).?.block, else => null, }; } @@ -355,6 +364,27 @@ pub const Inst = struct { } }; + pub const convertable_br_size = std.math.max(@sizeOf(BrBlockFlat), @sizeOf(Br)); + pub const convertable_br_align = std.math.max(@alignOf(BrBlockFlat), @alignOf(Br)); + comptime { + assert(@byteOffsetOf(BrBlockFlat, "base") == @byteOffsetOf(Br, "base")); + } + + pub const BrBlockFlat = struct { + pub const base_tag = Tag.br_block_flat; + + base: Inst, + block: *Block, + body: Body, + + pub fn operandCount(self: *const BrBlockFlat) usize { + return 0; + } + pub fn getOperand(self: *const BrBlockFlat, index: usize) ?*Inst { + return null; + } + }; + pub const Br = struct { pub const base_tag = Tag.br; @@ -363,7 +393,7 @@ pub const Inst = struct { operand: *Inst, pub fn operandCount(self: *const Br) usize { - return 0; + return 1; } pub fn getOperand(self: *const Br, index: usize) ?*Inst { if (index == 0) diff --git a/src/zir.zig b/src/zir.zig index 651e5ee3dc..301b52efc0 100644 --- a/src/zir.zig +++ b/src/zir.zig @@ -1634,6 +1634,12 @@ const DumpTzir = struct { try dtz.findConst(br.operand); }, + .br_block_flat => { + const br_block_flat = inst.castTag(.br_block_flat).?; + try dtz.findConst(&br_block_flat.block.base); + try dtz.fetchInstsAndResolveConsts(br_block_flat.body); + }, + .brvoid => { const brvoid = inst.castTag(.brvoid).?; try dtz.findConst(&brvoid.block.base); @@ -1779,6 +1785,24 @@ const DumpTzir = struct { } }, + .br_block_flat => { + const br_block_flat = inst.castTag(.br_block_flat).?; + const block_kinky = try dtz.writeInst(writer, &br_block_flat.block.base); + if (block_kinky != null) { + try writer.writeAll(", { // Instruction does not dominate all uses!\n"); + } else { + try writer.writeAll(", {\n"); + } + + const old_indent = dtz.indent; + dtz.indent += 2; + try dtz.dumpBody(br_block_flat.body, writer); + dtz.indent = old_indent; + + try writer.writeByteNTimes(' ', dtz.indent); + try writer.writeAll("})\n"); + }, + .brvoid => { const brvoid = inst.castTag(.brvoid).?; const kinky = try dtz.writeInst(writer, &brvoid.block.base); @@ -1792,7 +1816,7 @@ const DumpTzir = struct { .block => { const block = inst.castTag(.block).?; - try writer.writeAll("\n"); + try writer.writeAll("{\n"); const old_indent = dtz.indent; dtz.indent += 2; @@ -1800,7 +1824,7 @@ const DumpTzir = struct { dtz.indent = old_indent; try writer.writeByteNTimes(' ', dtz.indent); - try writer.writeAll(")\n"); + try writer.writeAll("})\n"); }, .condbr => { diff --git a/src/zir_sema.zig b/src/zir_sema.zig index 40ea563bb6..0297b9873a 100644 --- a/src/zir_sema.zig +++ b/src/zir_sema.zig @@ -664,20 +664,9 @@ fn zirBlockFlat(mod: *Module, scope: *Scope, inst: *zir.Inst.Block, is_comptime: defer tracy.end(); const parent_block = scope.cast(Scope.Block).?; - var child_block: Scope.Block = .{ - .parent = parent_block, - .inst_table = parent_block.inst_table, - .func = parent_block.func, - .owner_decl = parent_block.owner_decl, - .src_decl = parent_block.src_decl, - .instructions = .{}, - .arena = parent_block.arena, - .label = null, - .inlining = parent_block.inlining, - .is_comptime = parent_block.is_comptime or is_comptime, - .branch_quota = parent_block.branch_quota, - }; + var child_block = parent_block.makeSubBlock(); defer child_block.instructions.deinit(mod.gpa); + child_block.is_comptime = child_block.is_comptime or is_comptime; try analyzeBody(mod, &child_block, inst.positionals.body); @@ -728,6 +717,7 @@ fn zirBlock( .zir_block = inst, .merges = .{ .results = .{}, + .br_list = .{}, .block_inst = block_inst, }, }), @@ -739,6 +729,7 @@ fn zirBlock( defer child_block.instructions.deinit(mod.gpa); defer merges.results.deinit(mod.gpa); + defer merges.br_list.deinit(mod.gpa); try analyzeBody(mod, &child_block, inst.positionals.body); @@ -772,22 +763,53 @@ fn analyzeBlockBody( const last_inst = child_block.instructions.items[last_inst_index]; if (last_inst.breakBlock()) |br_block| { if (br_block == merges.block_inst) { - // No need for a block instruction. We can put the new instructions directly into the parent block. - // Here we omit the break instruction. + // No need for a block instruction. We can put the new instructions directly + // into the parent block. Here we omit the break instruction. const copied_instructions = try parent_block.arena.dupe(*Inst, child_block.instructions.items[0..last_inst_index]); try parent_block.instructions.appendSlice(mod.gpa, copied_instructions); return merges.results.items[0]; } } } - // It should be impossible to have the number of results be > 1 in a comptime scope. - assert(!child_block.is_comptime); // We should have already got a compile error in the condbr condition. + // It is impossible to have the number of results be > 1 in a comptime scope. + assert(!child_block.is_comptime); // Should already got a compile error in the condbr condition. // Need to set the type and emit the Block instruction. This allows machine code generation // to emit a jump instruction to after the block when it encounters the break. try parent_block.instructions.append(mod.gpa, &merges.block_inst.base); - merges.block_inst.base.ty = try mod.resolvePeerTypes(scope, merges.results.items); - merges.block_inst.body = .{ .instructions = try parent_block.arena.dupe(*Inst, child_block.instructions.items) }; + const resolved_ty = try mod.resolvePeerTypes(scope, merges.results.items); + merges.block_inst.base.ty = resolved_ty; + merges.block_inst.body = .{ + .instructions = try parent_block.arena.dupe(*Inst, child_block.instructions.items), + }; + // Now that the block has its type resolved, we need to go back into all the break + // instructions, and insert type coercion on the operands. + for (merges.br_list.items) |br| { + if (br.operand.ty.eql(resolved_ty)) { + // No type coercion needed. + continue; + } + var coerce_block = parent_block.makeSubBlock(); + defer coerce_block.instructions.deinit(mod.gpa); + const coerced_operand = try mod.coerce(&coerce_block.base, resolved_ty, br.operand); + assert(coerce_block.instructions.items[coerce_block.instructions.items.len - 1] == coerced_operand); + // Here we depend on the br instruction having been over-allocated (if necessary) + // inide analyzeBreak so that it can be converted into a br_block_flat instruction. + const br_src = br.base.src; + const br_ty = br.base.ty; + const br_block_flat = @ptrCast(*Inst.BrBlockFlat, br); + br_block_flat.* = .{ + .base = .{ + .src = br_src, + .ty = br_ty, + .tag = .br_block_flat, + }, + .block = merges.block_inst, + .body = .{ + .instructions = try parent_block.arena.dupe(*Inst, coerce_block.instructions.items), + }, + }; + } return &merges.block_inst.base; } @@ -827,9 +849,28 @@ fn analyzeBreak( while (opt_block) |block| { if (block.label) |*label| { if (label.zir_block == zir_block) { - try label.merges.results.append(mod.gpa, operand); const b = try mod.requireFunctionBlock(scope, src); - return mod.addBr(b, src, label.merges.block_inst, operand); + // Here we add a br instruction, but we over-allocate a little bit + // (if necessary) to make it possible to convert the instruction into + // a br_block_flat instruction later. + const br = @ptrCast(*Inst.Br, try b.arena.alignedAlloc( + u8, + Inst.convertable_br_align, + Inst.convertable_br_size, + )); + br.* = .{ + .base = .{ + .tag = .br, + .ty = Type.initTag(.noreturn), + .src = src, + }, + .operand = operand, + .block = label.merges.block_inst, + }; + try b.instructions.append(mod.gpa, &br.base); + try label.merges.results.append(mod.gpa, operand); + try label.merges.br_list.append(mod.gpa, br); + return &br.base; } } opt_block = block.parent; @@ -980,6 +1021,7 @@ fn zirCall(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError!*Inst { .casted_args = casted_args, .merges = .{ .results = .{}, + .br_list = .{}, .block_inst = block_inst, }, }; @@ -1004,6 +1046,7 @@ fn zirCall(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError!*Inst { defer child_block.instructions.deinit(mod.gpa); defer merges.results.deinit(mod.gpa); + defer merges.br_list.deinit(mod.gpa); try mod.emitBackwardBranch(&child_block, inst.base.src); @@ -2194,7 +2237,8 @@ fn zirReturn(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst if (b.inlining) |inlining| { // We are inlining a function call; rewrite the `ret` as a `break`. try inlining.merges.results.append(mod.gpa, operand); - return mod.addBr(b, inst.base.src, inlining.merges.block_inst, operand); + const br = try mod.addBr(b, inst.base.src, inlining.merges.block_inst, operand); + return &br.base; } return mod.addUnOp(b, inst.base.src, Type.initTag(.noreturn), .ret, operand); @@ -2208,7 +2252,8 @@ fn zirReturnVoid(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!* // We are inlining a function call; rewrite the `retvoid` as a `breakvoid`. const void_inst = try mod.constVoid(scope, inst.base.src); try inlining.merges.results.append(mod.gpa, void_inst); - return mod.addBr(b, inst.base.src, inlining.merges.block_inst, void_inst); + const br = try mod.addBr(b, inst.base.src, inlining.merges.block_inst, void_inst); + return &br.base; } if (b.func) |func| { -- cgit v1.2.3 From 6c8985fceeeb6314143691570cf0c7a42521e590 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 24 Jan 2021 20:23:37 -0700 Subject: astgen: rework labeled blocks --- src/Module.zig | 23 +++-- src/astgen.zig | 249 +++++++++++++++++++++++++++++++++++-------------------- src/codegen.zig | 10 +-- src/ir.zig | 8 +- src/zir.zig | 19 +++-- src/zir_sema.zig | 3 + 6 files changed, 200 insertions(+), 112 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/Module.zig b/src/Module.zig index b7967aacc5..0bafc72e6b 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -717,7 +717,7 @@ pub const Scope = struct { label: ?Label = null, break_block: ?*zir.Inst.Block = null, continue_block: ?*zir.Inst.Block = null, - /// only valid if label != null or (continue_block and break_block) != null + /// Only valid when setBlockResultLoc is called. break_result_loc: astgen.ResultLoc = undefined, /// When a block has a pointer result location, here it is. rl_ptr: ?*zir.Inst = null, @@ -726,6 +726,17 @@ pub const Scope = struct { /// whether to rely on break instructions or writing to the result /// pointer for the result instruction. rvalue_rl_count: usize = 0, + /// Keeps track of how many break instructions there are. When astgen is finished + /// with a block, it can check this against rvalue_rl_count to find out whether + /// the break instructions should be downgraded to break_void. + break_count: usize = 0, + /// Tracks `break :foo bar` instructions so they can possibly be elided later if + /// the labeled block ends up not needing a result location pointer. + labeled_breaks: std.ArrayListUnmanaged(*zir.Inst.Break) = .{}, + /// Tracks `store_to_block_ptr` instructions that correspond to break instructions + /// so they can possibly be elided later if the labeled block ends up not needing + /// a result location pointer. + labeled_store_to_block_ptr_list: std.ArrayListUnmanaged(*zir.Inst.BinOp) = .{}, pub const Label = struct { token: ast.TokenIndex, @@ -3495,18 +3506,18 @@ pub fn addSafetyCheck(mod: *Module, parent_block: *Scope.Block, ok: *Inst, panic }; const ok_body: ir.Body = .{ - .instructions = try parent_block.arena.alloc(*Inst, 1), // Only need space for the brvoid. + .instructions = try parent_block.arena.alloc(*Inst, 1), // Only need space for the br_void. }; - const brvoid = try parent_block.arena.create(Inst.BrVoid); - brvoid.* = .{ + const br_void = try parent_block.arena.create(Inst.BrVoid); + br_void.* = .{ .base = .{ - .tag = .brvoid, + .tag = .br_void, .ty = Type.initTag(.noreturn), .src = ok.src, }, .block = block_inst, }; - ok_body.instructions[0] = &brvoid.base; + ok_body.instructions[0] = &br_void.base; var fail_block: Scope.Block = .{ .parent = parent_block, diff --git a/src/astgen.zig b/src/astgen.zig index 49f60aa6ba..994205c3a7 100644 --- a/src/astgen.zig +++ b/src/astgen.zig @@ -38,6 +38,21 @@ pub const ResultLoc = union(enum) { /// is inferred based on peer type resolution for a `zir.Inst.Block`. /// The result instruction from the expression must be ignored. block_ptr: *Module.Scope.GenZIR, + + pub const Strategy = struct { + elide_store_to_block_ptr_instructions: bool, + tag: Tag, + + pub const Tag = enum { + /// Both branches will use break_void; result location is used to communicate the + /// result instruction. + break_void, + /// Use break statements to pass the block result value, and call rvalue() at + /// the end depending on rl. Also elide the store_to_block_ptr instructions + /// depending on rl. + break_operand, + }; + }; }; pub fn typeExpr(mod: *Module, scope: *Scope, type_node: *ast.Node) InnerError!*zir.Inst { @@ -348,10 +363,11 @@ pub fn comptimeExpr(mod: *Module, parent_scope: *Scope, rl: ResultLoc, node: *as return &block.base; } -fn breakExpr(mod: *Module, parent_scope: *Scope, node: *ast.Node.ControlFlowExpression) InnerError!*zir.Inst { - if (true) { - @panic("TODO reimplement this"); - } +fn breakExpr( + mod: *Module, + parent_scope: *Scope, + node: *ast.Node.ControlFlowExpression, +) InnerError!*zir.Inst { const tree = parent_scope.tree(); const src = tree.token_locs[node.ltoken].start; @@ -377,25 +393,31 @@ fn breakExpr(mod: *Module, parent_scope: *Scope, node: *ast.Node.ControlFlowExpr continue; }; - if (node.getRHS()) |rhs| { - // Most result location types can be forwarded directly; however - // if we need to write to a pointer which has an inferred type, - // proper type inference requires peer type resolution on the block's - // break operand expressions. - const branch_rl: ResultLoc = switch (gen_zir.break_result_loc) { - .discard, .none, .ty, .ptr, .ref => gen_zir.break_result_loc, - .inferred_ptr, .bitcasted_ptr, .block_ptr => .{ .block_ptr = block_inst }, - }; - const operand = try expr(mod, parent_scope, branch_rl, rhs); - return try addZIRInst(mod, parent_scope, src, zir.Inst.Break, .{ + const rhs = node.getRHS() orelse { + return addZirInstTag(mod, parent_scope, src, .break_void, .{ .block = block_inst, - .operand = operand, - }, .{}); - } else { - return try addZIRInst(mod, parent_scope, src, zir.Inst.BreakVoid, .{ - .block = block_inst, - }, .{}); + }); + }; + gen_zir.break_count += 1; + const prev_rvalue_rl_count = gen_zir.rvalue_rl_count; + const operand = try expr(mod, parent_scope, gen_zir.break_result_loc, rhs); + const have_store_to_block = gen_zir.rvalue_rl_count != prev_rvalue_rl_count; + const br = try addZirInstTag(mod, parent_scope, src, .@"break", .{ + .block = block_inst, + .operand = operand, + }); + if (gen_zir.break_result_loc == .block_ptr) { + try gen_zir.labeled_breaks.append(mod.gpa, br.castTag(.@"break").?); + + if (have_store_to_block) { + const inst_list = parent_scope.cast(Scope.GenZIR).?.instructions.items; + const last_inst = inst_list[inst_list.len - 2]; + const store_inst = last_inst.castTag(.store_to_block_ptr).?; + assert(store_inst.positionals.lhs == gen_zir.rl_ptr.?); + try gen_zir.labeled_store_to_block_ptr_list.append(mod.gpa, store_inst); + } } + return br; }, .local_val => scope = scope.cast(Scope.LocalVal).?.parent, .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, @@ -538,7 +560,6 @@ fn labeledBlockExpr( .decl = parent_scope.ownerDecl().?, .arena = gen_zir.arena, .instructions = .{}, - .break_result_loc = rl, // TODO @as here is working around a stage1 miscompilation bug :( .label = @as(?Scope.GenZIR.Label, Scope.GenZIR.Label{ .token = block_node.label, @@ -546,19 +567,57 @@ fn labeledBlockExpr( }), }; defer block_scope.instructions.deinit(mod.gpa); + defer block_scope.labeled_breaks.deinit(mod.gpa); + defer block_scope.labeled_store_to_block_ptr_list.deinit(mod.gpa); + + setBlockResultLoc(&block_scope, rl); try blockExprStmts(mod, &block_scope.base, &block_node.base, block_node.statements()); + if (!block_scope.label.?.used) { return mod.fail(parent_scope, tree.token_locs[block_node.label].start, "unused block label", .{}); } - block_inst.positionals.body.instructions = try block_scope.arena.dupe(*zir.Inst, block_scope.instructions.items); try gen_zir.instructions.append(mod.gpa, &block_inst.base); - return &block_inst.base; + const strat = rlStrategy(rl, &block_scope); + switch (strat.tag) { + .break_void => { + // The code took advantage of the result location as a pointer. + // Turn the break instructions into break_void instructions. + for (block_scope.labeled_breaks.items) |br| { + br.base.tag = .break_void; + } + // TODO technically not needed since we changed the tag to break_void but + // would be better still to elide the ones that are in this list. + try copyBodyNoEliding(&block_inst.positionals.body, block_scope); + + return &block_inst.base; + }, + .break_operand => { + // All break operands are values that did not use the result location pointer. + if (strat.elide_store_to_block_ptr_instructions) { + for (block_scope.labeled_store_to_block_ptr_list.items) |inst| { + inst.base.tag = .void_value; + } + // TODO technically not needed since we changed the tag to void_value but + // would be better still to elide the ones that are in this list. + } + try copyBodyNoEliding(&block_inst.positionals.body, block_scope); + switch (rl) { + .ref => return &block_inst.base, + else => return rvalue(mod, parent_scope, rl, &block_inst.base), + } + }, + } } -fn blockExprStmts(mod: *Module, parent_scope: *Scope, node: *ast.Node, statements: []*ast.Node) !void { +fn blockExprStmts( + mod: *Module, + parent_scope: *Scope, + node: *ast.Node, + statements: []*ast.Node, +) !void { const tree = parent_scope.tree(); var block_arena = std.heap.ArenaAllocator.init(mod.gpa); @@ -1659,7 +1718,6 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn cond_kind = .{ .err_union = null }; } } - const block_branch_count = 2; // then and else var block_scope: Scope.GenZIR = .{ .parent = scope, .decl = scope.ownerDecl().?, @@ -1668,6 +1726,8 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn }; defer block_scope.instructions.deinit(mod.gpa); + setBlockResultLoc(&block_scope, rl); + const tree = scope.tree(); const if_src = tree.token_locs[if_node.if_token].start; const cond = try cond_kind.cond(mod, &block_scope, if_src, if_node.condition); @@ -1682,33 +1742,6 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn .instructions = try block_scope.arena.dupe(*zir.Inst, block_scope.instructions.items), }); - // Depending on whether the result location is a pointer or value, different - // ZIR needs to be generated. In the former case we rely on storing to the - // pointer to communicate the result, and use breakvoid; in the latter case - // the block break instructions will have the result values. - // One more complication: when the result location is a pointer, we detect - // the scenario where the result location is not consumed. In this case - // we emit ZIR for the block break instructions to have the result values, - // and then rvalue() on that to pass the value to the result location. - const branch_rl: ResultLoc = switch (rl) { - .discard, .none, .ty, .ptr, .ref => rl, - - .inferred_ptr => |ptr| blk: { - block_scope.rl_ptr = &ptr.base; - break :blk .{ .block_ptr = &block_scope }; - }, - - .bitcasted_ptr => |ptr| blk: { - block_scope.rl_ptr = &ptr.base; - break :blk .{ .block_ptr = &block_scope }; - }, - - .block_ptr => |parent_block_scope| blk: { - block_scope.rl_ptr = parent_block_scope.rl_ptr.?; - break :blk .{ .block_ptr = &block_scope }; - }, - }; - const then_src = tree.token_locs[if_node.body.lastToken()].start; var then_scope: Scope.GenZIR = .{ .parent = scope, @@ -1721,7 +1754,8 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn // declare payload to the then_scope const then_sub_scope = try cond_kind.thenSubScope(mod, &then_scope, then_src, if_node.payload); - const then_result = try expr(mod, then_sub_scope, branch_rl, if_node.body); + block_scope.break_count += 1; + const then_result = try expr(mod, then_sub_scope, block_scope.break_result_loc, if_node.body); // We hold off on the break instructions as well as copying the then/else // instructions into place until we know whether to keep store_to_block_ptr // instructions or not. @@ -1741,47 +1775,18 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn // declare payload to the then_scope else_sub_scope = try cond_kind.elseSubScope(mod, &else_scope, else_src, else_node.payload); - break :blk try expr(mod, else_sub_scope, branch_rl, else_node.body); + block_scope.break_count += 1; + break :blk try expr(mod, else_sub_scope, block_scope.break_result_loc, else_node.body); } else blk: { else_src = tree.token_locs[if_node.lastToken()].start; else_sub_scope = &else_scope.base; - block_scope.rvalue_rl_count += 1; break :blk null; }; // We now have enough information to decide whether the result instruction should // be communicated via result location pointer or break instructions. - const Strategy = enum { - /// Both branches will use break_void; result location is used to communicate the - /// result instruction. - break_void, - /// Use break statements to pass the block result value, and call rvalue() at - /// the end depending on rl. Also elide the store_to_block_ptr instructions - /// depending on rl. - break_operand, - }; - var elide_store_to_block_ptr_instructions = false; - const strategy: Strategy = switch (rl) { - // In this branch there will not be any store_to_block_ptr instructions. - .discard, .none, .ty, .ref => .break_operand, - // The pointer got passed through to the sub-expressions, so we will use - // break_void here. - // In this branch there will not be any store_to_block_ptr instructions. - .ptr => .break_void, - .inferred_ptr, .bitcasted_ptr, .block_ptr => blk: { - if (block_scope.rvalue_rl_count == 2) { - // Neither prong of the if consumed the result location, so we can - // use break instructions to create an rvalue. - elide_store_to_block_ptr_instructions = true; - break :blk Strategy.break_operand; - } else { - // Allow the store_to_block_ptr instructions to remain so that - // semantic analysis can turn them into bitcasts. - break :blk Strategy.break_void; - } - }, - }; - switch (strategy) { + const strat = rlStrategy(rl, &block_scope); + switch (strat.tag) { .break_void => { if (!then_result.tag.isNoReturn()) { _ = try addZirInstTag(mod, then_sub_scope, then_src, .break_void, .{ @@ -1799,7 +1804,7 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn .block = block, }); } - assert(!elide_store_to_block_ptr_instructions); + assert(!strat.elide_store_to_block_ptr_instructions); try copyBodyNoEliding(&condbr.positionals.then_body, then_scope); try copyBodyNoEliding(&condbr.positionals.else_body, else_scope); return &block.base; @@ -1823,7 +1828,7 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn .block = block, }); } - if (elide_store_to_block_ptr_instructions) { + if (strat.elide_store_to_block_ptr_instructions) { try copyBodyWithElidedStoreBlockPtr(&condbr.positionals.then_body, then_scope); try copyBodyWithElidedStoreBlockPtr(&condbr.positionals.else_body, else_scope); } else { @@ -3376,6 +3381,72 @@ fn rvalueVoid(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node, resul return rvalue(mod, scope, rl, void_inst); } +fn rlStrategy(rl: ResultLoc, block_scope: *Scope.GenZIR) ResultLoc.Strategy { + var elide_store_to_block_ptr_instructions = false; + switch (rl) { + // In this branch there will not be any store_to_block_ptr instructions. + .discard, .none, .ty, .ref => return .{ + .tag = .break_operand, + .elide_store_to_block_ptr_instructions = false, + }, + // The pointer got passed through to the sub-expressions, so we will use + // break_void here. + // In this branch there will not be any store_to_block_ptr instructions. + .ptr => return .{ + .tag = .break_void, + .elide_store_to_block_ptr_instructions = false, + }, + .inferred_ptr, .bitcasted_ptr, .block_ptr => { + if (block_scope.rvalue_rl_count == block_scope.break_count) { + // Neither prong of the if consumed the result location, so we can + // use break instructions to create an rvalue. + return .{ + .tag = .break_operand, + .elide_store_to_block_ptr_instructions = true, + }; + } else { + // Allow the store_to_block_ptr instructions to remain so that + // semantic analysis can turn them into bitcasts. + return .{ + .tag = .break_void, + .elide_store_to_block_ptr_instructions = false, + }; + } + }, + } +} + +fn setBlockResultLoc(block_scope: *Scope.GenZIR, parent_rl: ResultLoc) void { + // Depending on whether the result location is a pointer or value, different + // ZIR needs to be generated. In the former case we rely on storing to the + // pointer to communicate the result, and use breakvoid; in the latter case + // the block break instructions will have the result values. + // One more complication: when the result location is a pointer, we detect + // the scenario where the result location is not consumed. In this case + // we emit ZIR for the block break instructions to have the result values, + // and then rvalue() on that to pass the value to the result location. + switch (parent_rl) { + .discard, .none, .ty, .ptr, .ref => { + block_scope.break_result_loc = parent_rl; + }, + + .inferred_ptr => |ptr| { + block_scope.rl_ptr = &ptr.base; + block_scope.break_result_loc = .{ .block_ptr = block_scope }; + }, + + .bitcasted_ptr => |ptr| { + block_scope.rl_ptr = &ptr.base; + block_scope.break_result_loc = .{ .block_ptr = block_scope }; + }, + + .block_ptr => |parent_block_scope| { + block_scope.rl_ptr = parent_block_scope.rl_ptr.?; + block_scope.break_result_loc = .{ .block_ptr = block_scope }; + }, + } +} + pub fn addZirInstTag( mod: *Module, scope: *Scope, diff --git a/src/codegen.zig b/src/codegen.zig index 1f5aad8ab8..362b04ab26 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -846,7 +846,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .br => return self.genBr(inst.castTag(.br).?), .br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), .breakpoint => return self.genBreakpoint(inst.src), - .brvoid => return self.genBrVoid(inst.castTag(.brvoid).?), + .br_void => return self.genBrVoid(inst.castTag(.br_void).?), .bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), .bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), .call => return self.genCall(inst.castTag(.call).?), @@ -2442,10 +2442,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genBrBlockFlat(self: *Self, parent_inst: *ir.Inst.BrBlockFlat) !MCValue { - try self.genBody(parent_inst.body); - const last = parent_inst.body.instructions[parent_inst.body.instructions.len - 1]; - return self.br(parent_inst.base.src, parent_inst.block, last); + fn genBrBlockFlat(self: *Self, inst: *ir.Inst.BrBlockFlat) !MCValue { + try self.genBody(inst.body); + const last = inst.body.instructions[inst.body.instructions.len - 1]; + return self.br(inst.base.src, inst.block, last); } fn genBr(self: *Self, inst: *ir.Inst.Br) !MCValue { diff --git a/src/ir.zig b/src/ir.zig index 4d421dda4c..408efc3bba 100644 --- a/src/ir.zig +++ b/src/ir.zig @@ -69,7 +69,7 @@ pub const Inst = struct { /// replace one br operand with multiple instructions, without moving anything else around. br_block_flat, breakpoint, - brvoid, + br_void, call, cmp_lt, cmp_lte, @@ -166,7 +166,7 @@ pub const Inst = struct { .block => Block, .br => Br, .br_block_flat => BrBlockFlat, - .brvoid => BrVoid, + .br_void => BrVoid, .call => Call, .condbr => CondBr, .constant => Constant, @@ -259,7 +259,7 @@ pub const Inst = struct { pub fn breakBlock(base: *Inst) ?*Block { return switch (base.tag) { .br => base.castTag(.br).?.block, - .brvoid => base.castTag(.brvoid).?.block, + .br_void => base.castTag(.br_void).?.block, .br_block_flat => base.castTag(.br_block_flat).?.block, else => null, }; @@ -403,7 +403,7 @@ pub const Inst = struct { }; pub const BrVoid = struct { - pub const base_tag = Tag.brvoid; + pub const base_tag = Tag.br_void; base: Inst, block: *Block, diff --git a/src/zir.zig b/src/zir.zig index 301b52efc0..d372cfbf00 100644 --- a/src/zir.zig +++ b/src/zir.zig @@ -264,8 +264,7 @@ pub const Inst = struct { /// Write a value to a pointer. For loading, see `deref`. store, /// Same as `store` but the type of the value being stored will be used to infer - /// the block type. The LHS is a block instruction, whose result location is - /// being stored to. + /// the block type. The LHS is the pointer to store to. store_to_block_ptr, /// Same as `store` but the type of the value being stored will be used to infer /// the pointer type. @@ -343,6 +342,8 @@ pub const Inst = struct { /// Only checks that `lhs >= rhs` if they are ints, everything else is /// validated by the .switch instruction. switch_range, + /// Does nothing; returns a void value. + void_value, pub fn Type(tag: Tag) type { return switch (tag) { @@ -355,6 +356,7 @@ pub const Inst = struct { .ret_type, .unreachable_unsafe, .unreachable_safe, + .void_value, => NoOp, .alloc, @@ -611,6 +613,7 @@ pub const Inst = struct { .enum_type, .union_type, .struct_type, + .void_value, => false, .@"break", @@ -1640,9 +1643,9 @@ const DumpTzir = struct { try dtz.fetchInstsAndResolveConsts(br_block_flat.body); }, - .brvoid => { - const brvoid = inst.castTag(.brvoid).?; - try dtz.findConst(&brvoid.block.base); + .br_void => { + const br_void = inst.castTag(.br_void).?; + try dtz.findConst(&br_void.block.base); }, .block => { @@ -1803,9 +1806,9 @@ const DumpTzir = struct { try writer.writeAll("})\n"); }, - .brvoid => { - const brvoid = inst.castTag(.brvoid).?; - const kinky = try dtz.writeInst(writer, &brvoid.block.base); + .br_void => { + const br_void = inst.castTag(.br_void).?; + const kinky = try dtz.writeInst(writer, &br_void.block.base); if (kinky) |_| { try writer.writeAll(") // Instruction does not dominate all uses!\n"); } else { diff --git a/src/zir_sema.zig b/src/zir_sema.zig index 0297b9873a..773c782746 100644 --- a/src/zir_sema.zig +++ b/src/zir_sema.zig @@ -155,6 +155,7 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError! .switch_range => return zirSwitchRange(mod, scope, old_inst.castTag(.switch_range).?), .bool_and => return zirBoolOp(mod, scope, old_inst.castTag(.bool_and).?), .bool_or => return zirBoolOp(mod, scope, old_inst.castTag(.bool_or).?), + .void_value => return mod.constVoid(scope, old_inst.src), .container_field_named, .container_field_typed, @@ -447,6 +448,8 @@ fn zirStoreToBlockPtr( const ptr = try resolveInst(mod, scope, inst.positionals.lhs); const value = try resolveInst(mod, scope, inst.positionals.rhs); const ptr_ty = try mod.simplePtrType(scope, inst.base.src, value.ty, true, .One); + // TODO detect when this store should be done at compile-time. For example, + // if expressions should force it when the condition is compile-time known. const b = try mod.requireRuntimeBlock(scope, inst.base.src); const bitcasted_ptr = try mod.addUnOp(b, inst.base.src, ptr_ty, .bitcast, ptr); return mod.storePtr(scope, inst.base.src, bitcasted_ptr, value); -- cgit v1.2.3 From 446ebddb937ccc8bea7060b74268e90702656fde Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 17 Jan 2021 11:04:25 +0100 Subject: stage2 ARM: save function arguments to stack for debugging This changes genArg to copy registers to the stack for better debugging. Thus, it requires genSetStack to be implemented in order for genArg to work. --- src/codegen.zig | 91 +++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 62 insertions(+), 29 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 362b04ab26..904fda0deb 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1567,6 +1567,59 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } + fn genArgDbgInfo(self: *Self, inst: *ir.Inst.Arg, mcv: MCValue) !void { + const name_with_null = inst.name[0 .. mem.lenZ(inst.name) + 1]; + + switch (mcv) { + .register => |reg| { + // Copy arg to stack for better debugging + const ty = inst.base.ty; + const abi_size = math.cast(u32, ty.abiSize(self.target.*)) catch { + return self.fail(inst.base.src, "type '{}' too big to fit into stack frame", .{ty}); + }; + const abi_align = ty.abiAlignment(self.target.*); + const stack_offset = try self.allocMem(&inst.base, abi_size, abi_align); + try self.genSetStack(inst.base.src, ty, stack_offset, MCValue{ .register = reg }); + const adjusted_stack_offset = math.negateCast(stack_offset + abi_size) catch { + return self.fail(inst.base.src, "Stack offset too large for arguments", .{}); + }; + + switch (self.debug_output) { + .dwarf => |dbg_out| { + switch (arch) { + .arm, .armeb => { + try dbg_out.dbg_info.append(link.File.Elf.abbrev_parameter); + + // Get length of the LEB128 stack offset + var counting_writer = std.io.countingWriter(std.io.null_writer); + leb128.writeILEB128(counting_writer.writer(), adjusted_stack_offset) catch unreachable; + + // DW.AT_location, DW.FORM_exprloc + // ULEB128 dwarf expression length + try leb128.writeULEB128(dbg_out.dbg_info.writer(), counting_writer.bytes_written + 1); + try dbg_out.dbg_info.append(DW.OP_breg11); + try leb128.writeILEB128(dbg_out.dbg_info.writer(), adjusted_stack_offset); + }, + else => { + try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 3); + dbg_out.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter); + dbg_out.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT_location, DW.FORM_exprloc + 1, // ULEB128 dwarf expression length + reg.dwarfLocOp(), + }); + }, + } + try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 5 + name_with_null.len); + try self.addDbgInfoTypeReloc(inst.base.ty); // DW.AT_type, DW.FORM_ref4 + dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string + }, + .none => {}, + } + }, + else => {}, + } + } + fn genArg(self: *Self, inst: *ir.Inst.Arg) !MCValue { const arg_index = self.arg_index; self.arg_index += 1; @@ -1574,32 +1627,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (FreeRegInt == u0) { return self.fail(inst.base.src, "TODO implement Register enum for {}", .{self.target.cpu.arch}); } - if (inst.base.isUnused()) - return MCValue.dead; - - try self.registers.ensureCapacity(self.gpa, self.registers.count() + 1); const result = self.args[arg_index]; + try self.genArgDbgInfo(inst, result); + + if (inst.base.isUnused()) + return MCValue.dead; - const name_with_null = inst.name[0 .. mem.lenZ(inst.name) + 1]; switch (result) { .register => |reg| { - self.registers.putAssumeCapacityNoClobber(toCanonicalReg(reg), &inst.base); + try self.registers.putNoClobber(self.gpa, toCanonicalReg(reg), &inst.base); self.markRegUsed(reg); - - switch (self.debug_output) { - .dwarf => |dbg_out| { - try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 8 + name_with_null.len); - dbg_out.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter); - dbg_out.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT_location, DW.FORM_exprloc - 1, // ULEB128 dwarf expression length - reg.dwarfLocOp(), - }); - try self.addDbgInfoTypeReloc(inst.base.ty); // DW.AT_type, DW.FORM_ref4 - dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string - }, - .none => {}, - } }, else => {}, } @@ -3705,10 +3743,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { var nsaa: u32 = 0; // Next stacked argument address for (param_types) |ty, i| { - if (ty.abiAlignment(self.target.*) == 8) { - // Round up NCRN to the next even number - ncrn += ncrn % 2; - } + if (ty.abiAlignment(self.target.*) == 8) + ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2); const param_size = @intCast(u32, ty.abiSize(self.target.*)); if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { @@ -3722,11 +3758,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail(src, "TODO MCValues split between registers and stack", .{}); } else { ncrn = 4; - if (ty.abiAlignment(self.target.*) == 8) { - if (nsaa % 8 != 0) { - nsaa += 8 - (nsaa % 8); - } - } + if (ty.abiAlignment(self.target.*) == 8) + nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8); result.args[i] = .{ .stack_offset = nsaa }; nsaa += param_size; -- cgit v1.2.3 From 6a5a6386c60143258fc9970f52e26e3a974b52b5 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 7 Feb 2021 20:01:11 +0100 Subject: stage2 ARM: fix register allocation in genArmBinOp Previously, this would reuse an operand even if reuseOperand returned false for both operands. genArmBinOpCode was also changed to be more Three-address code oriented in the process. --- src/codegen.zig | 79 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 39 insertions(+), 40 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 904fda0deb..63dbe3268e 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1295,38 +1295,31 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const rhs = try self.resolveInst(op_rhs); // Destination must be a register - // Source may be register, memory or an immediate - // - // So there are two options: (lhs is src and rhs is dest) - // or (rhs is src and lhs is dest) - const lhs_is_dest = blk: { - if (self.reuseOperand(inst, 0, lhs)) { - break :blk true; - } else if (self.reuseOperand(inst, 1, rhs)) { - break :blk false; - } else { - break :blk lhs == .register; - } - }; - var dst_mcv: MCValue = undefined; - var src_mcv: MCValue = undefined; - var src_inst: *ir.Inst = undefined; - if (lhs_is_dest) { + var lhs_mcv: MCValue = undefined; + var rhs_mcv: MCValue = undefined; + if (self.reuseOperand(inst, 0, lhs)) { // LHS is the destination // RHS is the source - src_inst = op_rhs; - src_mcv = rhs; - dst_mcv = if (lhs != .register) try self.copyToNewRegister(inst, lhs) else lhs; - } else { + lhs_mcv = if (lhs != .register) try self.copyToNewRegister(inst, lhs) else lhs; + rhs_mcv = rhs; + dst_mcv = lhs_mcv; + } else if (self.reuseOperand(inst, 1, rhs)) { // RHS is the destination // LHS is the source - src_inst = op_lhs; - src_mcv = lhs; - dst_mcv = if (rhs != .register) try self.copyToNewRegister(inst, rhs) else rhs; + lhs_mcv = lhs; + rhs_mcv = if (rhs != .register) try self.copyToNewRegister(inst, rhs) else rhs; + dst_mcv = rhs_mcv; + } else { + // TODO save 1 copy instruction by directly allocating the destination register + // LHS is the destination + // RHS is the source + lhs_mcv = try self.copyToNewRegister(inst, lhs); + rhs_mcv = rhs; + dst_mcv = lhs_mcv; } - try self.genArmBinOpCode(inst.src, dst_mcv.register, src_mcv, lhs_is_dest, op); + try self.genArmBinOpCode(inst.src, dst_mcv.register, lhs_mcv, rhs_mcv, op); return dst_mcv; } @@ -1334,11 +1327,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self: *Self, src: usize, dst_reg: Register, - src_mcv: MCValue, - lhs_is_dest: bool, + lhs_mcv: MCValue, + rhs_mcv: MCValue, op: ir.Inst.Tag, ) !void { - const operand = switch (src_mcv) { + assert(lhs_mcv == .register or lhs_mcv == .register); + + const swap_lhs_and_rhs = rhs_mcv == .register and lhs_mcv != .register; + const op1 = if (swap_lhs_and_rhs) rhs_mcv.register else lhs_mcv.register; + const op2 = if (swap_lhs_and_rhs) lhs_mcv else rhs_mcv; + + const operand = switch (op2) { .none => unreachable, .undef => unreachable, .dead, .unreach => unreachable, @@ -1352,37 +1351,37 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Load immediate into register if it doesn't fit // as an operand break :blk Instruction.Operand.fromU32(@intCast(u32, imm)) orelse - Instruction.Operand.reg(try self.copyToTmpRegister(src, src_mcv), Instruction.Operand.Shift.none); + Instruction.Operand.reg(try self.copyToTmpRegister(src, op2), Instruction.Operand.Shift.none); }, - .register => |src_reg| Instruction.Operand.reg(src_reg, Instruction.Operand.Shift.none), + .register => |reg| Instruction.Operand.reg(reg, Instruction.Operand.Shift.none), .stack_offset, .embedded_in_code, .memory, - => Instruction.Operand.reg(try self.copyToTmpRegister(src, src_mcv), Instruction.Operand.Shift.none), + => Instruction.Operand.reg(try self.copyToTmpRegister(src, op2), Instruction.Operand.Shift.none), }; switch (op) { .add => { - writeInt(u32, try self.code.addManyAsArray(4), Instruction.add(.al, dst_reg, dst_reg, operand).toU32()); + writeInt(u32, try self.code.addManyAsArray(4), Instruction.add(.al, dst_reg, op1, operand).toU32()); }, .sub => { - if (lhs_is_dest) { - writeInt(u32, try self.code.addManyAsArray(4), Instruction.sub(.al, dst_reg, dst_reg, operand).toU32()); + if (swap_lhs_and_rhs) { + writeInt(u32, try self.code.addManyAsArray(4), Instruction.rsb(.al, dst_reg, op1, operand).toU32()); } else { - writeInt(u32, try self.code.addManyAsArray(4), Instruction.rsb(.al, dst_reg, dst_reg, operand).toU32()); + writeInt(u32, try self.code.addManyAsArray(4), Instruction.sub(.al, dst_reg, op1, operand).toU32()); } }, .bool_and, .bit_and => { - writeInt(u32, try self.code.addManyAsArray(4), Instruction.@"and"(.al, dst_reg, dst_reg, operand).toU32()); + writeInt(u32, try self.code.addManyAsArray(4), Instruction.@"and"(.al, dst_reg, op1, operand).toU32()); }, .bool_or, .bit_or => { - writeInt(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, dst_reg, dst_reg, operand).toU32()); + writeInt(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, dst_reg, op1, operand).toU32()); }, .not, .xor => { - writeInt(u32, try self.code.addManyAsArray(4), Instruction.eor(.al, dst_reg, dst_reg, operand).toU32()); + writeInt(u32, try self.code.addManyAsArray(4), Instruction.eor(.al, dst_reg, op1, operand).toU32()); }, .cmp_eq => { - writeInt(u32, try self.code.addManyAsArray(4), Instruction.cmp(.al, dst_reg, operand).toU32()); + writeInt(u32, try self.code.addManyAsArray(4), Instruction.cmp(.al, op1, operand).toU32()); }, else => unreachable, // not a binary instruction } @@ -2135,7 +2134,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const src_mcv = rhs; const dst_mcv = if (lhs != .register) try self.copyToNewRegister(&inst.base, lhs) else lhs; - try self.genArmBinOpCode(inst.base.src, dst_mcv.register, src_mcv, true, .cmp_eq); + try self.genArmBinOpCode(inst.base.src, dst_mcv.register, dst_mcv, src_mcv, .cmp_eq); const info = inst.lhs.ty.intInfo(self.target.*); return switch (info.signedness) { .signed => MCValue{ .compare_flags_signed = op }, -- cgit v1.2.3 From 1480c428065c01c6feff22ce84021c2e0e30aa9b Mon Sep 17 00:00:00 2001 From: Jonathan Marler Date: Sun, 3 Jan 2021 02:20:37 -0700 Subject: require specifier for arrayish types --- lib/std/build.zig | 2 +- lib/std/fmt.zig | 85 ++++++++++++++++++++++++++++---------------- lib/std/testing.zig | 14 ++++---- src/Module.zig | 2 +- src/codegen.zig | 2 +- src/zir_sema.zig | 2 +- test/cli.zig | 8 ++--- test/standalone/cat/main.zig | 2 +- 8 files changed, 71 insertions(+), 46 deletions(-) (limited to 'src/codegen.zig') diff --git a/lib/std/build.zig b/lib/std/build.zig index 0db9d4c24e..77ca854f15 100644 --- a/lib/std/build.zig +++ b/lib/std/build.zig @@ -739,7 +739,7 @@ pub const Builder = struct { return args.default_target; }, else => |e| { - warn("Unable to parse target '{}': {s}\n\n", .{ triple, @errorName(e) }); + warn("Unable to parse target '{s}': {s}\n\n", .{ triple, @errorName(e) }); self.markInvalidUserInput(); return args.default_target; }, diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 27e68ee9c1..c1b24cc6da 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -69,6 +69,7 @@ pub const FormatOptions = struct { /// - `c`: output integer as an ASCII character. Integer type must have 8 bits at max. /// - `u`: output integer as an UTF-8 sequence. Integer type must have 21 bits at max. /// - `*`: output the address of the value instead of the value itself. +/// - `any`: output a value of any type using its default format /// /// If a formatted user type contains a function of the type /// ``` @@ -387,17 +388,32 @@ pub fn formatAddress(value: anytype, options: FormatOptions, writer: anytype) @T return; } }, - .Array => |info| { - try writer.writeAll(@typeName(info.child) ++ "@"); - try formatInt(@ptrToInt(value), 16, false, FormatOptions{}, writer); - return; - }, else => {}, } @compileError("Cannot format non-pointer type " ++ @typeName(T) ++ " with * specifier"); } +// This ANY const is a workaround for: https://github.com/ziglang/zig/issues/7948 +const ANY = "any"; + +fn defaultSpec(comptime T: type) [:0]const u8 { + switch (@typeInfo(T)) { + .Array => |_| return ANY, + .Pointer => |ptr_info| switch (ptr_info.size) { + .One => switch (@typeInfo(ptr_info.child)) { + .Array => |_| return "*", + else => {}, + }, + .Many, .C => return "*", + .Slice => return ANY, + }, + .Optional => |info| return defaultSpec(info.child), + else => {}, + } + return ""; +} + pub fn formatType( value: anytype, comptime fmt: []const u8, @@ -405,18 +421,19 @@ pub fn formatType( writer: anytype, max_depth: usize, ) @TypeOf(writer).Error!void { - if (comptime std.mem.eql(u8, fmt, "*")) { + const actual_fmt = comptime if (std.mem.eql(u8, fmt, ANY)) defaultSpec(@TypeOf(value)) else fmt; + if (comptime std.mem.eql(u8, actual_fmt, "*")) { return formatAddress(value, options, writer); } const T = @TypeOf(value); if (comptime std.meta.trait.hasFn("format")(T)) { - return try value.format(fmt, options, writer); + return try value.format(actual_fmt, options, writer); } switch (@typeInfo(T)) { .ComptimeInt, .Int, .ComptimeFloat, .Float => { - return formatValue(value, fmt, options, writer); + return formatValue(value, actual_fmt, options, writer); }, .Void => { return formatBuf("void", options, writer); @@ -426,16 +443,16 @@ pub fn formatType( }, .Optional => { if (value) |payload| { - return formatType(payload, fmt, options, writer, max_depth); + return formatType(payload, actual_fmt, options, writer, max_depth); } else { return formatBuf("null", options, writer); } }, .ErrorUnion => { if (value) |payload| { - return formatType(payload, fmt, options, writer, max_depth); + return formatType(payload, actual_fmt, options, writer, max_depth); } else |err| { - return formatType(err, fmt, options, writer, max_depth); + return formatType(err, actual_fmt, options, writer, max_depth); } }, .ErrorSet => { @@ -461,7 +478,7 @@ pub fn formatType( } try writer.writeAll("("); - try formatType(@enumToInt(value), fmt, options, writer, max_depth); + try formatType(@enumToInt(value), actual_fmt, options, writer, max_depth); try writer.writeAll(")"); }, .Union => |info| { @@ -475,7 +492,7 @@ pub fn formatType( try writer.writeAll(" = "); inline for (info.fields) |u_field| { if (value == @field(UnionTagType, u_field.name)) { - try formatType(@field(value, u_field.name), fmt, options, writer, max_depth - 1); + try formatType(@field(value, u_field.name), ANY, options, writer, max_depth - 1); } } try writer.writeAll(" }"); @@ -497,48 +514,54 @@ pub fn formatType( } try writer.writeAll(f.name); try writer.writeAll(" = "); - try formatType(@field(value, f.name), fmt, options, writer, max_depth - 1); + try formatType(@field(value, f.name), ANY, options, writer, max_depth - 1); } try writer.writeAll(" }"); }, .Pointer => |ptr_info| switch (ptr_info.size) { .One => switch (@typeInfo(ptr_info.child)) { .Array => |info| { + if (actual_fmt.len == 0) + @compileError("cannot format array ref without a specifier (i.e. {s} or {*})"); if (info.child == u8) { - if (fmt.len > 0 and comptime mem.indexOfScalar(u8, "sxXeEzZ", fmt[0]) != null) { - return formatText(value, fmt, options, writer); + if (comptime mem.indexOfScalar(u8, "sxXeEzZ", actual_fmt[0]) != null) { + return formatText(value, actual_fmt, options, writer); } } - return format(writer, "{s}@{x}", .{ @typeName(ptr_info.child), @ptrToInt(value) }); + @compileError("Unknown format string: '" ++ actual_fmt ++ "'"); }, .Enum, .Union, .Struct => { - return formatType(value.*, fmt, options, writer, max_depth); + return formatType(value.*, actual_fmt, options, writer, max_depth); }, else => return format(writer, "{s}@{x}", .{ @typeName(ptr_info.child), @ptrToInt(value) }), }, .Many, .C => { + if (actual_fmt.len == 0) + @compileError("cannot format pointer without a specifier (i.e. {s} or {*})"); if (ptr_info.sentinel) |sentinel| { - return formatType(mem.span(value), fmt, options, writer, max_depth); + return formatType(mem.span(value), actual_fmt, options, writer, max_depth); } if (ptr_info.child == u8) { - if (fmt.len > 0 and comptime mem.indexOfScalar(u8, "sxXeEzZ", fmt[0]) != null) { - return formatText(mem.span(value), fmt, options, writer); + if (comptime mem.indexOfScalar(u8, "sxXeEzZ", actual_fmt[0]) != null) { + return formatText(mem.span(value), actual_fmt, options, writer); } } - return format(writer, "{s}@{x}", .{ @typeName(ptr_info.child), @ptrToInt(value) }); + @compileError("Unknown format string: '" ++ actual_fmt ++ "'"); }, .Slice => { + if (actual_fmt.len == 0) + @compileError("cannot format slice without a specifier (i.e. {s} or {any})"); if (max_depth == 0) { return writer.writeAll("{ ... }"); } if (ptr_info.child == u8) { - if (fmt.len > 0 and comptime mem.indexOfScalar(u8, "sxXeEzZ", fmt[0]) != null) { - return formatText(value, fmt, options, writer); + if (comptime mem.indexOfScalar(u8, "sxXeEzZ", actual_fmt[0]) != null) { + return formatText(value, actual_fmt, options, writer); } } try writer.writeAll("{ "); for (value) |elem, i| { - try formatType(elem, fmt, options, writer, max_depth - 1); + try formatType(elem, actual_fmt, options, writer, max_depth - 1); if (i != value.len - 1) { try writer.writeAll(", "); } @@ -547,17 +570,19 @@ pub fn formatType( }, }, .Array => |info| { + if (actual_fmt.len == 0) + @compileError("cannot format array without a specifier (i.e. {s} or {any})"); if (max_depth == 0) { return writer.writeAll("{ ... }"); } if (info.child == u8) { - if (fmt.len > 0 and comptime mem.indexOfScalar(u8, "sxXeEzZ", fmt[0]) != null) { - return formatText(&value, fmt, options, writer); + if (comptime mem.indexOfScalar(u8, "sxXeEzZ", actual_fmt[0]) != null) { + return formatText(&value, actual_fmt, options, writer); } } try writer.writeAll("{ "); for (value) |elem, i| { - try formatType(elem, fmt, options, writer, max_depth - 1); + try formatType(elem, actual_fmt, options, writer, max_depth - 1); if (i < value.len - 1) { try writer.writeAll(", "); } @@ -568,7 +593,7 @@ pub fn formatType( try writer.writeAll("{ "); var i: usize = 0; while (i < info.len) : (i += 1) { - try formatValue(value[i], fmt, options, writer); + try formatValue(value[i], actual_fmt, options, writer); if (i < info.len - 1) { try writer.writeAll(", "); } @@ -1668,7 +1693,7 @@ test "slice" { { var int_slice = [_]u32{ 1, 4096, 391891, 1111111111 }; var runtime_zero: usize = 0; - try expectFmt("int: { 1, 4096, 391891, 1111111111 }", "int: {}", .{int_slice[runtime_zero..]}); + try expectFmt("int: { 1, 4096, 391891, 1111111111 }", "int: {any}", .{int_slice[runtime_zero..]}); try expectFmt("int: { 1, 4096, 391891, 1111111111 }", "int: {d}", .{int_slice[runtime_zero..]}); try expectFmt("int: { 1, 1000, 5fad3, 423a35c7 }", "int: {x}", .{int_slice[runtime_zero..]}); try expectFmt("int: { 00001, 01000, 5fad3, 423a35c7 }", "int: {x:0>5}", .{int_slice[runtime_zero..]}); diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 8df05ba7fe..1d89155a58 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -29,7 +29,7 @@ pub var zig_exe_path: []const u8 = undefined; /// and then aborts when actual_error_union is not expected_error. pub fn expectError(expected_error: anyerror, actual_error_union: anytype) void { if (actual_error_union) |actual_payload| { - std.debug.panic("expected error.{s}, found {}", .{ @errorName(expected_error), actual_payload }); + std.debug.panic("expected error.{s}, found {any}", .{ @errorName(expected_error), actual_payload }); } else |actual_error| { if (expected_error != actual_error) { std.debug.panic("expected error.{s}, found error.{s}", .{ @@ -88,7 +88,7 @@ pub fn expectEqual(expected: anytype, actual: @TypeOf(expected)) void { }, .Slice => { if (actual.ptr != expected.ptr) { - std.debug.panic("expected slice ptr {}, found {}", .{ expected.ptr, actual.ptr }); + std.debug.panic("expected slice ptr {*}, found {*}", .{ expected.ptr, actual.ptr }); } if (actual.len != expected.len) { std.debug.panic("expected slice len {}, found {}", .{ expected.len, actual.len }); @@ -145,11 +145,11 @@ pub fn expectEqual(expected: anytype, actual: @TypeOf(expected)) void { if (actual) |actual_payload| { expectEqual(expected_payload, actual_payload); } else { - std.debug.panic("expected {}, found null", .{expected_payload}); + std.debug.panic("expected {any}, found null", .{expected_payload}); } } else { if (actual) |actual_payload| { - std.debug.panic("expected null, found {}", .{actual_payload}); + std.debug.panic("expected null, found {any}", .{actual_payload}); } } }, @@ -159,11 +159,11 @@ pub fn expectEqual(expected: anytype, actual: @TypeOf(expected)) void { if (actual) |actual_payload| { expectEqual(expected_payload, actual_payload); } else |actual_err| { - std.debug.panic("expected {}, found {}", .{ expected_payload, actual_err }); + std.debug.panic("expected {any}, found {}", .{ expected_payload, actual_err }); } } else |expected_err| { if (actual) |actual_payload| { - std.debug.panic("expected {}, found {}", .{ expected_err, actual_payload }); + std.debug.panic("expected {}, found {any}", .{ expected_err, actual_payload }); } else |actual_err| { expectEqual(expected_err, actual_err); } @@ -279,7 +279,7 @@ pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const var i: usize = 0; while (i < expected.len) : (i += 1) { if (!std.meta.eql(expected[i], actual[i])) { - std.debug.panic("index {} incorrect. expected {}, found {}", .{ i, expected[i], actual[i] }); + std.debug.panic("index {} incorrect. expected {any}, found {any}", .{ i, expected[i], actual[i] }); } } } diff --git a/src/Module.zig b/src/Module.zig index 8de03b54ab..a90998a386 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -2400,7 +2400,7 @@ fn getAnonTypeName(self: *Module, scope: *Scope, base_token: std.zig.ast.TokenIn else => unreachable, }; const loc = tree.tokenLocationLoc(0, tree.token_locs[base_token]); - return std.fmt.allocPrint(self.gpa, "{}:{}:{}", .{ base_name, loc.line, loc.column }); + return std.fmt.allocPrint(self.gpa, "{s}:{}:{}", .{ base_name, loc.line, loc.column }); } fn getNextAnonNameIndex(self: *Module) usize { diff --git a/src/codegen.zig b/src/codegen.zig index 63dbe3268e..9771386403 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -2223,7 +2223,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.cmp(.al, reg, op).toU32()); break :blk .ne; }, - else => return self.fail(inst.base.src, "TODO implement condbr {} when condition is {}", .{ self.target.cpu.arch, @tagName(cond) }), + else => return self.fail(inst.base.src, "TODO implement condbr {} when condition is {s}", .{ self.target.cpu.arch, @tagName(cond) }), }; const reloc = Reloc{ diff --git a/src/zir_sema.zig b/src/zir_sema.zig index f373d7174d..a8120108a4 100644 --- a/src/zir_sema.zig +++ b/src/zir_sema.zig @@ -1832,7 +1832,7 @@ fn zirBitwise(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*In const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; if (!is_int) { - return mod.fail(scope, inst.base.src, "invalid operands to binary bitwise expression: '{}' and '{}'", .{ @tagName(lhs.ty.zigTypeTag()), @tagName(rhs.ty.zigTypeTag()) }); + return mod.fail(scope, inst.base.src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs.ty.zigTypeTag()), @tagName(rhs.ty.zigTypeTag()) }); } if (casted_lhs.value()) |lhs_val| { diff --git a/test/cli.zig b/test/cli.zig index 33dbc2d62b..8dbef06887 100644 --- a/test/cli.zig +++ b/test/cli.zig @@ -51,9 +51,9 @@ fn unwrapArg(arg: UnwrapArgError![]u8) UnwrapArgError![]u8 { } fn printCmd(cwd: []const u8, argv: []const []const u8) void { - std.debug.warn("cd {} && ", .{cwd}); + std.debug.warn("cd {s} && ", .{cwd}); for (argv) |arg| { - std.debug.warn("{} ", .{arg}); + std.debug.warn("{s} ", .{arg}); } std.debug.warn("\n", .{}); } @@ -75,14 +75,14 @@ fn exec(cwd: []const u8, expect_0: bool, argv: []const []const u8) !ChildProcess if ((code != 0) == expect_0) { std.debug.warn("The following command exited with error code {}:\n", .{code}); printCmd(cwd, argv); - std.debug.warn("stderr:\n{}\n", .{result.stderr}); + std.debug.warn("stderr:\n{s}\n", .{result.stderr}); return error.CommandFailed; } }, else => { std.debug.warn("The following command terminated unexpectedly:\n", .{}); printCmd(cwd, argv); - std.debug.warn("stderr:\n{}\n", .{result.stderr}); + std.debug.warn("stderr:\n{s}\n", .{result.stderr}); return error.CommandFailed; }, } diff --git a/test/standalone/cat/main.zig b/test/standalone/cat/main.zig index 89e5fde3cd..80ec97877a 100644 --- a/test/standalone/cat/main.zig +++ b/test/standalone/cat/main.zig @@ -41,6 +41,6 @@ pub fn main() !void { } fn usage(exe: []const u8) !void { - warn("Usage: {} [FILE]...\n", .{exe}); + warn("Usage: {s} [FILE]...\n", .{exe}); return error.Invalid; } -- cgit v1.2.3 From 9270aae071a4ee840193afe1162b24945cbd6d9e Mon Sep 17 00:00:00 2001 From: Tadeo Kondrak Date: Fri, 12 Feb 2021 13:40:44 -0700 Subject: stage2: fix zero-sized function parameters (#7998) --- src/codegen.zig | 25 +++++++++++++++---------- test/stage2/test.zig | 22 ++++++++++++++++++++++ 2 files changed, 37 insertions(+), 10 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 9771386403..d81ad1faf5 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -3705,17 +3705,22 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { for (param_types) |ty, i| { switch (ty.zigTypeTag()) { .Bool, .Int => { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); - if (next_int_reg >= c_abi_int_param_regs.len) { - result.args[i] = .{ .stack_offset = next_stack_offset }; - next_stack_offset += param_size; + if (!ty.hasCodeGenBits()) { + assert(cc != .C); + result.args[i] = .{ .none = {} }; } else { - const aliased_reg = registerAlias( - c_abi_int_param_regs[next_int_reg], - param_size, - ); - result.args[i] = .{ .register = aliased_reg }; - next_int_reg += 1; + const param_size = @intCast(u32, ty.abiSize(self.target.*)); + if (next_int_reg >= c_abi_int_param_regs.len) { + result.args[i] = .{ .stack_offset = next_stack_offset }; + next_stack_offset += param_size; + } else { + const aliased_reg = registerAlias( + c_abi_int_param_regs[next_int_reg], + param_size, + ); + result.args[i] = .{ .register = aliased_reg }; + next_int_reg += 1; + } } }, else => return self.fail(src, "TODO implement function parameters of type {s}", .{@tagName(ty.zigTypeTag())}), diff --git a/test/stage2/test.zig b/test/stage2/test.zig index 486edeb864..b5de03524f 100644 --- a/test/stage2/test.zig +++ b/test/stage2/test.zig @@ -1393,4 +1393,26 @@ pub fn addCases(ctx: *TestContext) !void { "", ); } + + { + var case = ctx.exe("passing u0 to function", linux_x64); + case.addCompareOutput( + \\export fn _start() noreturn { + \\ doNothing(0); + \\ exit(); + \\} + \\fn doNothing(arg: u0) void {} + \\fn exit() noreturn { + \\ asm volatile ("syscall" + \\ : + \\ : [number] "{rax}" (231), + \\ [arg1] "{rdi}" (0) + \\ : "rcx", "r11", "memory" + \\ ); + \\ unreachable; + \\} + , + "", + ); + } } -- cgit v1.2.3 From 7630a5c566b106b6325a55f29eb1ed9e584d0949 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 12 Feb 2021 23:47:17 -0700 Subject: stage2: more progress towards Module/astgen building with new mem layout --- lib/std/zig/ast.zig | 2 + src/Module.zig | 362 +++++++++------- src/astgen.zig | 921 ++++++++++++++++++++++------------------ src/codegen.zig | 15 +- src/ir.zig | 1 + src/link/Elf.zig | 32 +- src/link/MachO/DebugSymbols.zig | 32 +- src/zir.zig | 21 +- 8 files changed, 791 insertions(+), 595 deletions(-) (limited to 'src/codegen.zig') diff --git a/lib/std/zig/ast.zig b/lib/std/zig/ast.zig index ab81c3415e..40541ea7c1 100644 --- a/lib/std/zig/ast.zig +++ b/lib/std/zig/ast.zig @@ -2834,10 +2834,12 @@ pub const Node = struct { /// `(lhs)`. main_token is the `(`; rhs is the token index of the `)`. grouped_expression, /// `@a(lhs, rhs)`. lhs and rhs may be omitted. + /// main_token is the builtin token. builtin_call_two, /// Same as builtin_call_two but there is known to be a trailing comma before the rparen. builtin_call_two_comma, /// `@a(b, c)`. `sub_list[lhs..rhs]`. + /// main_token is the builtin token. builtin_call, /// Same as builtin_call but there is known to be a trailing comma before the rparen. builtin_call_comma, diff --git a/src/Module.zig b/src/Module.zig index a1c2822732..2071ff671c 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -428,14 +428,14 @@ pub const Scope = struct { } /// Asserts the scope is a child of a File and has an AST tree and returns the tree. - pub fn tree(self: *Scope) *ast.Tree { + pub fn tree(self: *Scope) *const ast.Tree { switch (self.tag) { - .file => return self.cast(File).?.contents.tree, - .block => return self.cast(Block).?.src_decl.container.file_scope.contents.tree, - .gen_zir => return self.cast(GenZIR).?.decl.container.file_scope.contents.tree, - .local_val => return self.cast(LocalVal).?.gen_zir.decl.container.file_scope.contents.tree, - .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.container.file_scope.contents.tree, - .container => return self.cast(Container).?.file_scope.contents.tree, + .file => return self.cast(File).?.tree, + .block => return self.cast(Block).?.src_decl.container.file_scope.tree, + .gen_zir => return self.cast(GenZIR).?.decl.container.file_scope.tree, + .local_val => return self.cast(LocalVal).?.gen_zir.decl.container.file_scope.tree, + .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.container.file_scope.tree, + .container => return self.cast(Container).?.file_scope.tree, } } @@ -1008,38 +1008,38 @@ fn astgenAndSemaDecl(mod: *Module, decl: *Decl) !bool { switch (node_tags[fn_proto]) { .fn_proto_simple => { var params: [1]ast.Node.Index = undefined; - return mod.astgenAndSemaFn(decl, tree, body, tree.fnProtoSimple(¶ms, fn_proto)); + return mod.astgenAndSemaFn(decl, tree.*, body, tree.fnProtoSimple(¶ms, fn_proto)); }, - .fn_proto_multi => return mod.astgenAndSemaFn(decl, tree, body, tree.fnProtoMulti(fn_proto)), + .fn_proto_multi => return mod.astgenAndSemaFn(decl, tree.*, body, tree.fnProtoMulti(fn_proto)), .fn_proto_one => { var params: [1]ast.Node.Index = undefined; - return mod.astgenAndSemaFn(decl, tree, body, tree.fnProtoOne(¶ms, fn_proto)); + return mod.astgenAndSemaFn(decl, tree.*, body, tree.fnProtoOne(¶ms, fn_proto)); }, - .fn_proto => return mod.astgenAndSemaFn(decl, tree, body, tree.fnProto(fn_proto)), + .fn_proto => return mod.astgenAndSemaFn(decl, tree.*, body, tree.fnProto(fn_proto)), else => unreachable, } }, .fn_proto_simple => { var params: [1]ast.Node.Index = undefined; - return mod.astgenAndSemaFn(decl, tree, null, tree.fnProtoSimple(¶ms, decl_node)); + return mod.astgenAndSemaFn(decl, tree.*, 0, tree.fnProtoSimple(¶ms, decl_node)); }, - .fn_proto_multi => return mod.astgenAndSemaFn(decl, tree, null, tree.fnProtoMulti(decl_node)), + .fn_proto_multi => return mod.astgenAndSemaFn(decl, tree.*, 0, tree.fnProtoMulti(decl_node)), .fn_proto_one => { var params: [1]ast.Node.Index = undefined; - return mod.astgenAndSemaFn(decl, tree, null, tree.fnProtoOne(¶ms, decl_node)); + return mod.astgenAndSemaFn(decl, tree.*, 0, tree.fnProtoOne(¶ms, decl_node)); }, - .fn_proto => return mod.astgenAndSemaFn(decl, tree, null, tree.fnProto(decl_node)), + .fn_proto => return mod.astgenAndSemaFn(decl, tree.*, 0, tree.fnProto(decl_node)), - .global_var_decl => return mod.astgenAndSemaVarDecl(decl, tree, tree.globalVarDecl(decl_node)), - .local_var_decl => return mod.astgenAndSemaVarDecl(decl, tree, tree.localVarDecl(decl_node)), - .simple_var_decl => return mod.astgenAndSemaVarDecl(decl, tree, tree.simpleVarDecl(decl_node)), - .aligned_var_decl => return mod.astgenAndSemaVarDecl(decl, tree, tree.alignedVarDecl(decl_node)), + .global_var_decl => return mod.astgenAndSemaVarDecl(decl, tree.*, tree.globalVarDecl(decl_node)), + .local_var_decl => return mod.astgenAndSemaVarDecl(decl, tree.*, tree.localVarDecl(decl_node)), + .simple_var_decl => return mod.astgenAndSemaVarDecl(decl, tree.*, tree.simpleVarDecl(decl_node)), + .aligned_var_decl => return mod.astgenAndSemaVarDecl(decl, tree.*, tree.alignedVarDecl(decl_node)), .@"comptime" => { decl.analysis = .in_progress; // A comptime decl does not store any value so we can just deinit this arena after analysis is done. - var analysis_arena = std.heap.ArenaAllocator.init(self.gpa); + var analysis_arena = std.heap.ArenaAllocator.init(mod.gpa); defer analysis_arena.deinit(); var gen_scope: Scope.GenZIR = .{ .decl = decl, @@ -1047,14 +1047,15 @@ fn astgenAndSemaDecl(mod: *Module, decl: *Decl) !bool { .parent = &decl.container.base, .force_comptime = true, }; - defer gen_scope.instructions.deinit(self.gpa); + defer gen_scope.instructions.deinit(mod.gpa); - _ = try astgen.comptimeExpr(self, &gen_scope.base, .none, comptime_decl.expr); - if (std.builtin.mode == .Debug and self.comp.verbose_ir) { - zir.dumpZir(self.gpa, "comptime_block", decl.name, gen_scope.instructions.items) catch {}; + const block_expr = node_datas[decl_node].lhs; + _ = try astgen.comptimeExpr(mod, &gen_scope.base, .none, block_expr); + if (std.builtin.mode == .Debug and mod.comp.verbose_ir) { + zir.dumpZir(mod.gpa, "comptime_block", decl.name, gen_scope.instructions.items) catch {}; } - var inst_table = Scope.Block.InstTable.init(self.gpa); + var inst_table = Scope.Block.InstTable.init(mod.gpa); defer inst_table.deinit(); var branch_quota: u32 = default_eval_branch_quota; @@ -1071,17 +1072,17 @@ fn astgenAndSemaDecl(mod: *Module, decl: *Decl) !bool { .is_comptime = true, .branch_quota = &branch_quota, }; - defer block_scope.instructions.deinit(self.gpa); + defer block_scope.instructions.deinit(mod.gpa); - _ = try zir_sema.analyzeBody(self, &block_scope, .{ + _ = try zir_sema.analyzeBody(mod, &block_scope, .{ .instructions = gen_scope.instructions.items, }); decl.analysis = .complete; - decl.generation = self.generation; + decl.generation = mod.generation; return true; }, - .UsingNamespace => @panic("TODO usingnamespace decl"), + .@"usingnamespace" => @panic("TODO usingnamespace decl"), else => unreachable, } } @@ -1099,18 +1100,20 @@ fn astgenAndSemaFn( decl.analysis = .in_progress; const token_starts = tree.tokens.items(.start); + const token_tags = tree.tokens.items(.tag); // This arena allocator's memory is discarded at the end of this function. It is used // to determine the type of the function, and hence the type of the decl, which is needed // to complete the Decl analysis. - var fn_type_scope_arena = std.heap.ArenaAllocator.init(self.gpa); + var fn_type_scope_arena = std.heap.ArenaAllocator.init(mod.gpa); defer fn_type_scope_arena.deinit(); var fn_type_scope: Scope.GenZIR = .{ .decl = decl, .arena = &fn_type_scope_arena.allocator, .parent = &decl.container.base, + .force_comptime = true, }; - defer fn_type_scope.instructions.deinit(self.gpa); + defer fn_type_scope.instructions.deinit(mod.gpa); decl.is_pub = fn_proto.visib_token != null; @@ -1126,7 +1129,7 @@ fn astgenAndSemaFn( }; const param_types = try fn_type_scope.arena.alloc(*zir.Inst, param_count); const fn_src = token_starts[fn_proto.ast.fn_token]; - const type_type = try astgen.addZIRInstConst(self, &fn_type_scope.base, fn_src, .{ + const type_type = try astgen.addZIRInstConst(mod, &fn_type_scope.base, fn_src, .{ .ty = Type.initTag(.type), .val = Value.initTag(.type_type), }); @@ -1138,13 +1141,13 @@ fn astgenAndSemaFn( while (it.next()) |param| : (param_type_i += 1) { if (param.anytype_ellipsis3) |token| { switch (token_tags[token]) { - .keyword_anytype => return self.failTok( + .keyword_anytype => return mod.failTok( &fn_type_scope.base, - tok_i, + token, "TODO implement anytype parameter", .{}, ), - .ellipsis3 => return self.failTok( + .ellipsis3 => return mod.failTok( &fn_type_scope.base, token, "TODO implement var args", @@ -1156,7 +1159,7 @@ fn astgenAndSemaFn( const param_type_node = param.type_expr; assert(param_type_node != 0); param_types[param_type_i] = - try astgen.expr(self, &fn_type_scope.base, type_type_rl, param_type_node); + try astgen.expr(mod, &fn_type_scope.base, type_type_rl, param_type_node); } assert(param_type_i == param_count); } @@ -1164,10 +1167,10 @@ fn astgenAndSemaFn( // TODO call std.zig.parseStringLiteral const lib_name_str = mem.trim(u8, tree.tokenSlice(lib_name), "\""); log.debug("extern fn symbol expected in lib '{s}'", .{lib_name_str}); - const target = self.comp.getTarget(); + const target = mod.comp.getTarget(); if (target_util.is_libc_lib_name(target, lib_name_str)) { - if (!self.comp.bin_file.options.link_libc) { - return self.failTok( + if (!mod.comp.bin_file.options.link_libc) { + return mod.failTok( &fn_type_scope.base, lib_name, "dependency on libc must be explicitly specified in the build command", @@ -1177,8 +1180,8 @@ fn astgenAndSemaFn( break :blk; } if (target_util.is_libcpp_lib_name(target, lib_name_str)) { - if (!self.comp.bin_file.options.link_libcpp) { - return self.failTok( + if (!mod.comp.bin_file.options.link_libcpp) { + return mod.failTok( &fn_type_scope.base, lib_name, "dependency on libc++ must be explicitly specified in the build command", @@ -1187,16 +1190,16 @@ fn astgenAndSemaFn( } break :blk; } - if (!target.isWasm() and !self.comp.bin_file.options.pic) { - return self.failTok( + if (!target.isWasm() and !mod.comp.bin_file.options.pic) { + return mod.failTok( &fn_type_scope.base, lib_name, "dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by `-l{s}` or `-fPIC`.", .{ lib_name, lib_name }, ); } - self.comp.stage1AddLinkLib(lib_name_str) catch |err| { - return self.failTok( + mod.comp.stage1AddLinkLib(lib_name_str) catch |err| { + return mod.failTok( &fn_type_scope.base, lib_name, "unable to add link lib '{s}': {s}", @@ -1204,45 +1207,55 @@ fn astgenAndSemaFn( ); }; } - if (fn_proto.ast.align_expr) |align_expr| { - return self.failNode(&fn_type_scope.base, align_expr, "TODO implement function align expression", .{}); + if (fn_proto.ast.align_expr != 0) { + return mod.failNode( + &fn_type_scope.base, + fn_proto.ast.align_expr, + "TODO implement function align expression", + .{}, + ); } - if (fn_proto.ast.section_expr) |sect_expr| { - return self.failNode(&fn_type_scope.base, sect_expr, "TODO implement function section expression", .{}); + if (fn_proto.ast.section_expr != 0) { + return mod.failNode( + &fn_type_scope.base, + fn_proto.ast.section_expr, + "TODO implement function section expression", + .{}, + ); } - if (fn_proto.ast.callconv_expr) |callconv_expr| { - return self.failNode( + if (fn_proto.ast.callconv_expr != 0) { + return mod.failNode( &fn_type_scope.base, - callconv_expr, + fn_proto.ast.callconv_expr, "TODO implement function calling convention expression", .{}, ); } const maybe_bang = tree.firstToken(fn_proto.ast.return_type) - 1; if (token_tags[maybe_bang] == .bang) { - return self.failTok(&fn_type_scope.base, maybe_bang, "TODO implement inferred error sets", .{}); + return mod.failTok(&fn_type_scope.base, maybe_bang, "TODO implement inferred error sets", .{}); } const return_type_inst = try astgen.expr( - self, + mod, &fn_type_scope.base, type_type_rl, fn_proto.ast.return_type, ); - const fn_type_inst = try astgen.addZIRInst(self, &fn_type_scope.base, fn_src, zir.Inst.FnType, .{ + const fn_type_inst = try astgen.addZIRInst(mod, &fn_type_scope.base, fn_src, zir.Inst.FnType, .{ .return_type = return_type_inst, .param_types = param_types, }, .{}); - if (std.builtin.mode == .Debug and self.comp.verbose_ir) { - zir.dumpZir(self.gpa, "fn_type", decl.name, fn_type_scope.instructions.items) catch {}; + if (std.builtin.mode == .Debug and mod.comp.verbose_ir) { + zir.dumpZir(mod.gpa, "fn_type", decl.name, fn_type_scope.instructions.items) catch {}; } // We need the memory for the Type to go into the arena for the Decl - var decl_arena = std.heap.ArenaAllocator.init(self.gpa); + var decl_arena = std.heap.ArenaAllocator.init(mod.gpa); errdefer decl_arena.deinit(); const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State); - var inst_table = Scope.Block.InstTable.init(self.gpa); + var inst_table = Scope.Block.InstTable.init(mod.gpa); defer inst_table.deinit(); var branch_quota: u32 = default_eval_branch_quota; @@ -1259,9 +1272,9 @@ fn astgenAndSemaFn( .is_comptime = false, .branch_quota = &branch_quota, }; - defer block_scope.instructions.deinit(self.gpa); + defer block_scope.instructions.deinit(mod.gpa); - const fn_type = try zir_sema.analyzeBodyValueAsType(self, &block_scope, fn_type_inst, .{ + const fn_type = try zir_sema.analyzeBodyValueAsType(mod, &block_scope, fn_type_inst, .{ .instructions = fn_type_scope.instructions.items, }); if (body_node == 0) { @@ -1270,7 +1283,7 @@ fn astgenAndSemaFn( if (decl.typedValueManaged()) |tvm| { type_changed = !tvm.typed_value.ty.eql(fn_type); - tvm.deinit(self.gpa); + tvm.deinit(mod.gpa); } const fn_val = try Value.Tag.extern_fn.create(&decl_arena.allocator, decl); @@ -1282,13 +1295,13 @@ fn astgenAndSemaFn( }, }; decl.analysis = .complete; - decl.generation = self.generation; + decl.generation = mod.generation; - try self.comp.bin_file.allocateDeclIndexes(decl); - try self.comp.work_queue.writeItem(.{ .codegen_decl = decl }); + try mod.comp.bin_file.allocateDeclIndexes(decl); + try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl }); - if (type_changed and self.emit_h != null) { - try self.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); + if (type_changed and mod.emit_h != null) { + try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); } return type_changed; @@ -1304,17 +1317,17 @@ fn astgenAndSemaFn( .arena = &decl_arena.allocator, .parent = &decl.container.base, }; - defer gen_scope.instructions.deinit(self.gpa); + defer gen_scope.instructions.deinit(mod.gpa); // We need an instruction for each parameter, and they must be first in the body. - try gen_scope.instructions.resize(self.gpa, param_count); + try gen_scope.instructions.resize(mod.gpa, param_count); var params_scope = &gen_scope.base; var i: usize = 0; var it = fn_proto.iterate(tree); while (it.next()) |param| : (i += 1) { const name_token = param.name_token.?; const src = token_starts[name_token]; - const param_name = try self.identifierTokenString(&gen_scope.base, name_token); + const param_name = try mod.identifierTokenString(&gen_scope.base, name_token); const arg = try decl_arena.allocator.create(zir.Inst.NoOp); arg.* = .{ .base = .{ @@ -1335,17 +1348,17 @@ fn astgenAndSemaFn( params_scope = &sub_scope.base; } - try astgen.blockExpr(self, params_scope, body_node); + try astgen.blockExpr(mod, params_scope, body_node); if (gen_scope.instructions.items.len == 0 or !gen_scope.instructions.items[gen_scope.instructions.items.len - 1].tag.isNoReturn()) { const src = token_starts[tree.lastToken(body_node)]; - _ = try astgen.addZIRNoOp(self, &gen_scope.base, src, .returnvoid); + _ = try astgen.addZIRNoOp(mod, &gen_scope.base, src, .returnvoid); } - if (std.builtin.mode == .Debug and self.comp.verbose_ir) { - zir.dumpZir(self.gpa, "fn_body", decl.name, gen_scope.instructions.items) catch {}; + if (std.builtin.mode == .Debug and mod.comp.verbose_ir) { + zir.dumpZir(mod.gpa, "fn_body", decl.name, gen_scope.instructions.items) catch {}; } break :blk .{ @@ -1379,7 +1392,7 @@ fn astgenAndSemaFn( prev_is_inline = prev_func.state == .inline_only; } - tvm.deinit(self.gpa); + tvm.deinit(mod.gpa); } decl_arena_state.* = decl_arena.state; @@ -1393,25 +1406,25 @@ fn astgenAndSemaFn( }, }; decl.analysis = .complete; - decl.generation = self.generation; + decl.generation = mod.generation; if (!is_inline and fn_type.hasCodeGenBits()) { // We don't fully codegen the decl until later, but we do need to reserve a global // offset table index for it. This allows us to codegen decls out of dependency order, // increasing how many computations can be done in parallel. - try self.comp.bin_file.allocateDeclIndexes(decl); - try self.comp.work_queue.writeItem(.{ .codegen_decl = decl }); - if (type_changed and self.emit_h != null) { - try self.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); + try mod.comp.bin_file.allocateDeclIndexes(decl); + try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl }); + if (type_changed and mod.emit_h != null) { + try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); } } else if (!prev_is_inline and prev_type_has_bits) { - self.comp.bin_file.freeDecl(decl); + mod.comp.bin_file.freeDecl(decl); } if (fn_proto.extern_export_token) |maybe_export_token| { - if (token_tags[maybe_export_token] == .Keyword_export) { + if (token_tags[maybe_export_token] == .keyword_export) { if (is_inline) { - return self.failTok( + return mod.failTok( &block_scope.base, maybe_export_token, "export of inline function", @@ -1421,7 +1434,7 @@ fn astgenAndSemaFn( const export_src = token_starts[maybe_export_token]; const name = tree.tokenSlice(fn_proto.name_token.?); // TODO identifierTokenString // The scope needs to have the decl in it. - try self.analyzeExport(&block_scope.base, export_src, name, decl); + try mod.analyzeExport(&block_scope.base, export_src, name, decl); } } return type_changed or is_inline != prev_is_inline; @@ -1439,13 +1452,14 @@ fn astgenAndSemaVarDecl( decl.analysis = .in_progress; const token_starts = tree.tokens.items(.start); + const token_tags = tree.tokens.items(.tag); // We need the memory for the Type to go into the arena for the Decl - var decl_arena = std.heap.ArenaAllocator.init(self.gpa); + var decl_arena = std.heap.ArenaAllocator.init(mod.gpa); errdefer decl_arena.deinit(); const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State); - var decl_inst_table = Scope.Block.InstTable.init(self.gpa); + var decl_inst_table = Scope.Block.InstTable.init(mod.gpa); defer decl_inst_table.deinit(); var branch_quota: u32 = default_eval_branch_quota; @@ -1462,63 +1476,83 @@ fn astgenAndSemaVarDecl( .is_comptime = true, .branch_quota = &branch_quota, }; - defer block_scope.instructions.deinit(self.gpa); + defer block_scope.instructions.deinit(mod.gpa); - decl.is_pub = var_decl.getVisibToken() != null; + decl.is_pub = var_decl.visib_token != null; const is_extern = blk: { - const maybe_extern_token = var_decl.getExternExportToken() orelse - break :blk false; - if (tree.token_ids[maybe_extern_token] != .Keyword_extern) break :blk false; - if (var_decl.getInitNode()) |some| { - return self.failNode(&block_scope.base, some, "extern variables have no initializers", .{}); + const maybe_extern_token = var_decl.extern_export_token orelse break :blk false; + if (token_tags[maybe_extern_token] != .keyword_extern) break :blk false; + if (var_decl.ast.init_node != 0) { + return mod.failNode( + &block_scope.base, + var_decl.ast.init_node, + "extern variables have no initializers", + .{}, + ); } break :blk true; }; - if (var_decl.getLibName()) |lib_name| { + if (var_decl.lib_name) |lib_name| { assert(is_extern); - return self.failNode(&block_scope.base, lib_name, "TODO implement function library name", .{}); + return mod.failTok(&block_scope.base, lib_name, "TODO implement function library name", .{}); } - const is_mutable = tree.token_ids[var_decl.mut_token] == .Keyword_var; - const is_threadlocal = if (var_decl.getThreadLocalToken()) |some| blk: { + const is_mutable = token_tags[var_decl.mut_token] == .keyword_var; + const is_threadlocal = if (var_decl.threadlocal_token) |some| blk: { if (!is_mutable) { - return self.failTok(&block_scope.base, some, "threadlocal variable cannot be constant", .{}); + return mod.failTok(&block_scope.base, some, "threadlocal variable cannot be constant", .{}); } break :blk true; } else false; - assert(var_decl.getComptimeToken() == null); - if (var_decl.getAlignNode()) |align_expr| { - return self.failNode(&block_scope.base, align_expr, "TODO implement function align expression", .{}); + assert(var_decl.comptime_token == null); + if (var_decl.ast.align_node != 0) { + return mod.failNode( + &block_scope.base, + var_decl.ast.align_node, + "TODO implement function align expression", + .{}, + ); } - if (var_decl.getSectionNode()) |sect_expr| { - return self.failNode(&block_scope.base, sect_expr, "TODO implement function section expression", .{}); + if (var_decl.ast.section_node != 0) { + return mod.failNode( + &block_scope.base, + var_decl.ast.section_node, + "TODO implement function section expression", + .{}, + ); } - const var_info: struct { ty: Type, val: ?Value } = if (var_decl.getInitNode()) |init_node| vi: { - var gen_scope_arena = std.heap.ArenaAllocator.init(self.gpa); + const var_info: struct { ty: Type, val: ?Value } = if (var_decl.ast.init_node != 0) vi: { + var gen_scope_arena = std.heap.ArenaAllocator.init(mod.gpa); defer gen_scope_arena.deinit(); var gen_scope: Scope.GenZIR = .{ .decl = decl, .arena = &gen_scope_arena.allocator, .parent = &decl.container.base, }; - defer gen_scope.instructions.deinit(self.gpa); + defer gen_scope.instructions.deinit(mod.gpa); - const init_result_loc: astgen.ResultLoc = if (var_decl.getTypeNode()) |type_node| rl: { - const src = token_starts[type_node.firstToken()]; - const type_type = try astgen.addZIRInstConst(self, &gen_scope.base, src, .{ + const init_result_loc: astgen.ResultLoc = if (var_decl.ast.type_node != 0) rl: { + const type_node = var_decl.ast.type_node; + const src = token_starts[tree.firstToken(type_node)]; + const type_type = try astgen.addZIRInstConst(mod, &gen_scope.base, src, .{ .ty = Type.initTag(.type), .val = Value.initTag(.type_type), }); - const var_type = try astgen.expr(self, &gen_scope.base, .{ .ty = type_type }, type_node); + const var_type = try astgen.expr(mod, &gen_scope.base, .{ .ty = type_type }, type_node); break :rl .{ .ty = var_type }; } else .none; - const init_inst = try astgen.comptimeExpr(self, &gen_scope.base, init_result_loc, init_node); - if (std.builtin.mode == .Debug and self.comp.verbose_ir) { - zir.dumpZir(self.gpa, "var_init", decl.name, gen_scope.instructions.items) catch {}; + const init_inst = try astgen.comptimeExpr( + mod, + &gen_scope.base, + init_result_loc, + var_decl.ast.init_node, + ); + if (std.builtin.mode == .Debug and mod.comp.verbose_ir) { + zir.dumpZir(mod.gpa, "var_init", decl.name, gen_scope.instructions.items) catch {}; } - var var_inst_table = Scope.Block.InstTable.init(self.gpa); + var var_inst_table = Scope.Block.InstTable.init(mod.gpa); defer var_inst_table.deinit(); var branch_quota_vi: u32 = default_eval_branch_quota; @@ -1534,8 +1568,8 @@ fn astgenAndSemaVarDecl( .is_comptime = true, .branch_quota = &branch_quota_vi, }; - defer inner_block.instructions.deinit(self.gpa); - try zir_sema.analyzeBody(self, &inner_block, .{ + defer inner_block.instructions.deinit(mod.gpa); + try zir_sema.analyzeBody(mod, &inner_block, .{ .instructions = gen_scope.instructions.items, }); @@ -1550,24 +1584,30 @@ fn astgenAndSemaVarDecl( .val = try val.copy(block_scope.arena), }; } else if (!is_extern) { - return self.failTok(&block_scope.base, var_decl.firstToken(), "variables must be initialized", .{}); - } else if (var_decl.getTypeNode()) |type_node| vi: { + return mod.failTok( + &block_scope.base, + tree.firstToken(var_decl), + "variables must be initialized", + .{}, + ); + } else if (var_decl.ast.type_node != 0) vi: { + const type_node = var_decl.ast.type_node; // Temporary arena for the zir instructions. - var type_scope_arena = std.heap.ArenaAllocator.init(self.gpa); + var type_scope_arena = std.heap.ArenaAllocator.init(mod.gpa); defer type_scope_arena.deinit(); var type_scope: Scope.GenZIR = .{ .decl = decl, .arena = &type_scope_arena.allocator, .parent = &decl.container.base, }; - defer type_scope.instructions.deinit(self.gpa); + defer type_scope.instructions.deinit(mod.gpa); - const var_type = try astgen.typeExpr(self, &type_scope.base, type_node); - if (std.builtin.mode == .Debug and self.comp.verbose_ir) { - zir.dumpZir(self.gpa, "var_type", decl.name, type_scope.instructions.items) catch {}; + const var_type = try astgen.typeExpr(mod, &type_scope.base, type_node); + if (std.builtin.mode == .Debug and mod.comp.verbose_ir) { + zir.dumpZir(mod.gpa, "var_type", decl.name, type_scope.instructions.items) catch {}; } - const ty = try zir_sema.analyzeBodyValueAsType(self, &block_scope, var_type, .{ + const ty = try zir_sema.analyzeBodyValueAsType(mod, &block_scope, var_type, .{ .instructions = type_scope.instructions.items, }); break :vi .{ @@ -1575,18 +1615,28 @@ fn astgenAndSemaVarDecl( .val = null, }; } else { - return self.failTok(&block_scope.base, var_decl.firstToken(), "unable to infer variable type", .{}); + return mod.failTok( + &block_scope.base, + tree.firstToken(var_decl), + "unable to infer variable type", + .{}, + ); }; if (is_mutable and !var_info.ty.isValidVarType(is_extern)) { - return self.failTok(&block_scope.base, var_decl.firstToken(), "variable of type '{}' must be const", .{var_info.ty}); + return mod.failTok( + &block_scope.base, + tree.firstToken(var_decl), + "variable of type '{}' must be const", + .{var_info.ty}, + ); } var type_changed = true; if (decl.typedValueManaged()) |tvm| { type_changed = !tvm.typed_value.ty.eql(var_info.ty); - tvm.deinit(self.gpa); + tvm.deinit(mod.gpa); } const new_variable = try decl_arena.allocator.create(Var); @@ -1610,14 +1660,15 @@ fn astgenAndSemaVarDecl( }, }; decl.analysis = .complete; - decl.generation = self.generation; + decl.generation = mod.generation; - if (var_decl.getExternExportToken()) |maybe_export_token| { - if (tree.token_ids[maybe_export_token] == .Keyword_export) { + if (var_decl.extern_export_token) |maybe_export_token| { + if (token_tags[maybe_export_token] == .keyword_export) { const export_src = token_starts[maybe_export_token]; - const name = tree.tokenSlice(var_decl.name_token); // TODO identifierTokenString + const name_token = var_decl.ast.mut_token + 1; + const name = tree.tokenSlice(name_token); // TODO identifierTokenString // The scope needs to have the decl in it. - try self.analyzeExport(&block_scope.base, export_src, name, decl); + try mod.analyzeExport(&block_scope.base, export_src, name, decl); } } return type_changed; @@ -1761,7 +1812,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void { decl_node, decl_i, tree.*, - null, + 0, tree.fnProtoSimple(¶ms, decl_node), ); }, @@ -1771,7 +1822,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void { decl_node, decl_i, tree.*, - null, + 0, tree.fnProtoMulti(decl_node), ), .fn_proto_one => { @@ -1782,7 +1833,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void { decl_node, decl_i, tree.*, - null, + 0, tree.fnProtoOne(¶ms, decl_node), ); }, @@ -1792,7 +1843,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void { decl_node, decl_i, tree.*, - null, + 0, tree.fnProto(decl_node), ), @@ -1848,7 +1899,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void { decl_node, decl_i, tree.*, - tree.containerFieldInit(decl), + tree.containerFieldInit(decl_node), ), .container_field_align => try mod.semaContainerField( container_scope, @@ -1856,7 +1907,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void { decl_node, decl_i, tree.*, - tree.containerFieldAlign(decl), + tree.containerFieldAlign(decl_node), ), .container_field => try mod.semaContainerField( container_scope, @@ -1864,7 +1915,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void { decl_node, decl_i, tree.*, - tree.containerField(decl), + tree.containerField(decl_node), ), .test_decl => { @@ -1936,14 +1987,14 @@ fn semaContainerFn( // in `Decl` to notice that the line number did not change. mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl }); }, - .c, .wasm => {}, + .c, .wasm, .spirv => {}, } } } else { const new_decl = try mod.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash); container_scope.decls.putAssumeCapacity(new_decl, {}); - if (fn_proto.getExternExportInlineToken()) |maybe_export_token| { - if (tree.token_ids[maybe_export_token] == .Keyword_export) { + if (fn_proto.extern_export_token) |maybe_export_token| { + if (token_tags[maybe_export_token] == .keyword_export) { mod.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); } } @@ -1963,9 +2014,11 @@ fn semaContainerVar( defer tracy.end(); const token_starts = tree.tokens.items(.start); + const token_tags = tree.tokens.items(.tag); - const name_src = token_starts[var_decl.name_token]; - const name = tree.tokenSlice(var_decl.name_token); // TODO identifierTokenString + const name_token = var_decl.ast.mut_token + 1; + const name_src = token_starts[name_token]; + const name = tree.tokenSlice(name_token); // TODO identifierTokenString const name_hash = container_scope.fullyQualifiedNameHash(name); const contents_hash = std.zig.hashSrc(tree.getNodeSource(decl_node)); if (mod.decl_table.get(name_hash)) |decl| { @@ -1987,15 +2040,23 @@ fn semaContainerVar( } else { const new_decl = try mod.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash); container_scope.decls.putAssumeCapacity(new_decl, {}); - if (var_decl.getExternExportToken()) |maybe_export_token| { - if (tree.token_ids[maybe_export_token] == .Keyword_export) { + if (var_decl.extern_export_token) |maybe_export_token| { + if (token_tags[maybe_export_token] == .keyword_export) { mod.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); } } } } -fn semaContainerField() void { +fn semaContainerField( + mod: *Module, + container_scope: *Scope.Container, + deleted_decls: *std.AutoArrayHashMap(*Decl, void), + decl_node: ast.Node.Index, + decl_i: usize, + tree: ast.Tree, + field: ast.full.ContainerField, +) !void { const tracy = trace(@src()); defer tracy.end(); @@ -2898,7 +2959,7 @@ pub fn analyzeImport(self: *Module, scope: *Scope, src: usize, target_string: [] file_scope.* = .{ .sub_file_path = resolved_path, .source = .{ .unloaded = {} }, - .contents = .{ .not_available = {} }, + .tree = undefined, .status = .never_loaded, .pkg = found_pkg orelse cur_pkg, .root_container = .{ @@ -3415,11 +3476,12 @@ pub fn failTok( pub fn failNode( self: *Module, scope: *Scope, - ast_node: *ast.Node, + ast_node: ast.Node.Index, comptime format: []const u8, args: anytype, ) InnerError { - const src = scope.tree().tokens.items(.start)[ast_node.firstToken()]; + const tree = scope.tree(); + const src = tree.tokens.items(.start)[tree.firstToken(ast_node)]; return self.fail(scope, src, format, args); } diff --git a/src/astgen.zig b/src/astgen.zig index ece16d70da..dcc2ea9ad2 100644 --- a/src/astgen.zig +++ b/src/astgen.zig @@ -55,7 +55,7 @@ pub const ResultLoc = union(enum) { }; }; -pub fn typeExpr(mod: *Module, scope: *Scope, type_node: *ast.Node) InnerError!*zir.Inst { +pub fn typeExpr(mod: *Module, scope: *Scope, type_node: ast.Node.Index) InnerError!*zir.Inst { const type_src = scope.tree().token_locs[type_node.firstToken()].start; const type_type = try addZIRInstConst(mod, scope, type_src, .{ .ty = Type.initTag(.type), @@ -65,134 +65,133 @@ pub fn typeExpr(mod: *Module, scope: *Scope, type_node: *ast.Node) InnerError!*z return expr(mod, scope, type_rl, type_node); } -fn lvalExpr(mod: *Module, scope: *Scope, node: *ast.Node) InnerError!*zir.Inst { - switch (node.tag) { - .Root => unreachable, - .Use => unreachable, - .TestDecl => unreachable, - .DocComment => unreachable, - .VarDecl => unreachable, - .SwitchCase => unreachable, - .SwitchElse => unreachable, - .Else => unreachable, - .Payload => unreachable, - .PointerPayload => unreachable, - .PointerIndexPayload => unreachable, - .ErrorTag => unreachable, - .FieldInitializer => unreachable, - .ContainerField => unreachable, - - .Assign, - .AssignBitAnd, - .AssignBitOr, - .AssignBitShiftLeft, - .AssignBitShiftRight, - .AssignBitXor, - .AssignDiv, - .AssignSub, - .AssignSubWrap, - .AssignMod, - .AssignAdd, - .AssignAddWrap, - .AssignMul, - .AssignMulWrap, - .Add, - .AddWrap, - .Sub, - .SubWrap, - .Mul, - .MulWrap, - .Div, - .Mod, - .BitAnd, - .BitOr, - .BitShiftLeft, - .BitShiftRight, - .BitXor, - .BangEqual, - .EqualEqual, - .GreaterThan, - .GreaterOrEqual, - .LessThan, - .LessOrEqual, - .ArrayCat, - .ArrayMult, - .BoolAnd, - .BoolOr, - .Asm, - .StringLiteral, - .IntegerLiteral, - .Call, - .Unreachable, - .Return, - .If, - .While, - .BoolNot, - .AddressOf, - .FloatLiteral, - .UndefinedLiteral, - .BoolLiteral, - .NullLiteral, - .OptionalType, - .Block, - .LabeledBlock, - .Break, +fn lvalExpr(mod: *Module, scope: *Scope, node: ast.Node.Index) InnerError!*zir.Inst { + const tree = scope.tree(); + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + switch (node_tags[node]) { + .root => unreachable, + .@"usingnamespace" => unreachable, + .test_decl => unreachable, + .doc_comment => unreachable, + .var_decl => unreachable, + .switch_case => unreachable, + .switch_else => unreachable, + .container_field_init => unreachable, + .container_field_align => unreachable, + .container_field => unreachable, + + .assign, + .assign_bit_and, + .assign_bit_or, + .assign_bit_shift_left, + .assign_bit_shift_right, + .assign_bit_xor, + .assign_div, + .assign_sub, + .assign_sub_wrap, + .assign_mod, + .assign_add, + .assign_add_wrap, + .assign_mul, + .assign_mul_wrap, + .add, + .add_wrap, + .sub, + .sub_wrap, + .mul, + .mul_wrap, + .div, + .mod, + .bit_and, + .bit_or, + .bit_shift_left, + .bit_shift_right, + .bit_xor, + .bang_equal, + .equal_equal, + .greater_than, + .greater_or_equal, + .less_than, + .less_or_equal, + .array_cat, + .array_mult, + .bool_and, + .bool_or, + .@"asm", + .string_literal, + .integer_literal, + .call, + .@"unreachable", + .@"return", + .@"if", + .@"while", + .bool_not, + .address_of, + .float_literal, + .undefined_literal, + .bool_literal, + .null_literal, + .optional_type, + .block, + .labeled_block, + .@"break", .PtrType, - .ArrayType, - .ArrayTypeSentinel, - .EnumLiteral, + .array_type, + .array_type_sentinel, + .enum_literal, .MultilineStringLiteral, - .CharLiteral, - .Defer, - .Catch, - .ErrorUnion, - .MergeErrorSets, - .Range, - .Await, - .BitNot, - .Negation, - .NegationWrap, - .Resume, - .Try, - .SliceType, - .Slice, + .char_literal, + .@"defer", + .@"catch", + .error_union, + .merge_error_sets, + .range, + .@"await", + .bit_not, + .negation, + .negation_wrap, + .@"resume", + .@"try", + .slice_type, + .slice, .ArrayInitializer, .ArrayInitializerDot, .StructInitializer, .StructInitializerDot, - .Switch, - .For, - .Suspend, - .Continue, - .AnyType, - .ErrorType, + .@"switch", + .@"for", + .@"suspend", + .@"continue", + .@"anytype", + .error_type, .FnProto, - .AnyFrameType, - .ErrorSetDecl, + .anyframe_type, + .error_set_decl, .ContainerDecl, - .Comptime, - .Nosuspend, + .@"comptime", + .@"nosuspend", + .builtin_call, + .builtin_call_comma, => return mod.failNode(scope, node, "invalid left-hand side to assignment", .{}), - // @field can be assigned to - .BuiltinCall => { - const call = node.castTag(.BuiltinCall).?; - const tree = scope.tree(); - const builtin_name = tree.tokenSlice(call.builtin_token); - + // `@field` can be assigned to. + .builtin_call_two, .builtin_call_two_comma => { + const builtin_token = main_tokens[node]; + const builtin_name = tree.tokenSlice(builtin_token); if (!mem.eql(u8, builtin_name, "@field")) { return mod.failNode(scope, node, "invalid left-hand side to assignment", .{}); } }, // can be assigned to - .UnwrapOptional, - .Deref, - .Period, - .ArrayAccess, - .Identifier, - .GroupedExpression, - .OrElse, + .unwrap_optional, + .deref, + .period, + .array_access, + .identifier, + .grouped_expression, + .@"orelse", => {}, } return expr(mod, scope, .ref, node); @@ -202,16 +201,16 @@ fn lvalExpr(mod: *Module, scope: *Scope, node: *ast.Node) InnerError!*zir.Inst { /// When `rl` is discard, ptr, inferred_ptr, bitcasted_ptr, or inferred_ptr, the /// result instruction can be used to inspect whether it is isNoReturn() but that is it, /// it must otherwise not be used. -pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerError!*zir.Inst { +pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerError!*zir.Inst { switch (node.tag) { - .Root => unreachable, // Top-level declaration. - .Use => unreachable, // Top-level declaration. - .TestDecl => unreachable, // Top-level declaration. - .DocComment => unreachable, // Top-level declaration. - .VarDecl => unreachable, // Handled in `blockExpr`. - .SwitchCase => unreachable, // Handled in `switchExpr`. - .SwitchElse => unreachable, // Handled in `switchExpr`. - .Range => unreachable, // Handled in `switchExpr`. + .root => unreachable, // Top-level declaration. + .@"usingnamespace" => unreachable, // Top-level declaration. + .test_decl => unreachable, // Top-level declaration. + .doc_comment => unreachable, // Top-level declaration. + .var_decl => unreachable, // Handled in `blockExpr`. + .switch_case => unreachable, // Handled in `switchExpr`. + .switch_else => unreachable, // Handled in `switchExpr`. + .range => unreachable, // Handled in `switchExpr`. .Else => unreachable, // Handled explicitly the control flow expression functions. .Payload => unreachable, // Handled explicitly. .PointerPayload => unreachable, // Handled explicitly. @@ -220,114 +219,113 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr .FieldInitializer => unreachable, // Handled explicitly. .ContainerField => unreachable, // Handled explicitly. - .Assign => return rvalueVoid(mod, scope, rl, node, try assign(mod, scope, node.castTag(.Assign).?)), - .AssignBitAnd => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitAnd).?, .bit_and)), - .AssignBitOr => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitOr).?, .bit_or)), - .AssignBitShiftLeft => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitShiftLeft).?, .shl)), - .AssignBitShiftRight => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitShiftRight).?, .shr)), - .AssignBitXor => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitXor).?, .xor)), - .AssignDiv => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignDiv).?, .div)), - .AssignSub => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignSub).?, .sub)), - .AssignSubWrap => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignSubWrap).?, .subwrap)), - .AssignMod => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignMod).?, .mod_rem)), - .AssignAdd => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignAdd).?, .add)), - .AssignAddWrap => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignAddWrap).?, .addwrap)), - .AssignMul => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignMul).?, .mul)), - .AssignMulWrap => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignMulWrap).?, .mulwrap)), - - .Add => return simpleBinOp(mod, scope, rl, node.castTag(.Add).?, .add), - .AddWrap => return simpleBinOp(mod, scope, rl, node.castTag(.AddWrap).?, .addwrap), - .Sub => return simpleBinOp(mod, scope, rl, node.castTag(.Sub).?, .sub), - .SubWrap => return simpleBinOp(mod, scope, rl, node.castTag(.SubWrap).?, .subwrap), - .Mul => return simpleBinOp(mod, scope, rl, node.castTag(.Mul).?, .mul), - .MulWrap => return simpleBinOp(mod, scope, rl, node.castTag(.MulWrap).?, .mulwrap), - .Div => return simpleBinOp(mod, scope, rl, node.castTag(.Div).?, .div), - .Mod => return simpleBinOp(mod, scope, rl, node.castTag(.Mod).?, .mod_rem), - .BitAnd => return simpleBinOp(mod, scope, rl, node.castTag(.BitAnd).?, .bit_and), - .BitOr => return simpleBinOp(mod, scope, rl, node.castTag(.BitOr).?, .bit_or), - .BitShiftLeft => return simpleBinOp(mod, scope, rl, node.castTag(.BitShiftLeft).?, .shl), - .BitShiftRight => return simpleBinOp(mod, scope, rl, node.castTag(.BitShiftRight).?, .shr), - .BitXor => return simpleBinOp(mod, scope, rl, node.castTag(.BitXor).?, .xor), - - .BangEqual => return simpleBinOp(mod, scope, rl, node.castTag(.BangEqual).?, .cmp_neq), - .EqualEqual => return simpleBinOp(mod, scope, rl, node.castTag(.EqualEqual).?, .cmp_eq), - .GreaterThan => return simpleBinOp(mod, scope, rl, node.castTag(.GreaterThan).?, .cmp_gt), - .GreaterOrEqual => return simpleBinOp(mod, scope, rl, node.castTag(.GreaterOrEqual).?, .cmp_gte), - .LessThan => return simpleBinOp(mod, scope, rl, node.castTag(.LessThan).?, .cmp_lt), - .LessOrEqual => return simpleBinOp(mod, scope, rl, node.castTag(.LessOrEqual).?, .cmp_lte), - - .ArrayCat => return simpleBinOp(mod, scope, rl, node.castTag(.ArrayCat).?, .array_cat), - .ArrayMult => return simpleBinOp(mod, scope, rl, node.castTag(.ArrayMult).?, .array_mul), - - .BoolAnd => return boolBinOp(mod, scope, rl, node.castTag(.BoolAnd).?), - .BoolOr => return boolBinOp(mod, scope, rl, node.castTag(.BoolOr).?), - - .BoolNot => return rvalue(mod, scope, rl, try boolNot(mod, scope, node.castTag(.BoolNot).?)), - .BitNot => return rvalue(mod, scope, rl, try bitNot(mod, scope, node.castTag(.BitNot).?)), - .Negation => return rvalue(mod, scope, rl, try negation(mod, scope, node.castTag(.Negation).?, .sub)), - .NegationWrap => return rvalue(mod, scope, rl, try negation(mod, scope, node.castTag(.NegationWrap).?, .subwrap)), - - .Identifier => return try identifier(mod, scope, rl, node.castTag(.Identifier).?), - .Asm => return rvalue(mod, scope, rl, try assembly(mod, scope, node.castTag(.Asm).?)), - .StringLiteral => return rvalue(mod, scope, rl, try stringLiteral(mod, scope, node.castTag(.StringLiteral).?)), - .IntegerLiteral => return rvalue(mod, scope, rl, try integerLiteral(mod, scope, node.castTag(.IntegerLiteral).?)), - .BuiltinCall => return builtinCall(mod, scope, rl, node.castTag(.BuiltinCall).?), - .Call => return callExpr(mod, scope, rl, node.castTag(.Call).?), - .Unreachable => return unreach(mod, scope, node.castTag(.Unreachable).?), - .Return => return ret(mod, scope, node.castTag(.Return).?), - .If => return ifExpr(mod, scope, rl, node.castTag(.If).?), - .While => return whileExpr(mod, scope, rl, node.castTag(.While).?), - .Period => return field(mod, scope, rl, node.castTag(.Period).?), - .Deref => return rvalue(mod, scope, rl, try deref(mod, scope, node.castTag(.Deref).?)), - .AddressOf => return rvalue(mod, scope, rl, try addressOf(mod, scope, node.castTag(.AddressOf).?)), - .FloatLiteral => return rvalue(mod, scope, rl, try floatLiteral(mod, scope, node.castTag(.FloatLiteral).?)), - .UndefinedLiteral => return rvalue(mod, scope, rl, try undefLiteral(mod, scope, node.castTag(.UndefinedLiteral).?)), - .BoolLiteral => return rvalue(mod, scope, rl, try boolLiteral(mod, scope, node.castTag(.BoolLiteral).?)), - .NullLiteral => return rvalue(mod, scope, rl, try nullLiteral(mod, scope, node.castTag(.NullLiteral).?)), - .OptionalType => return rvalue(mod, scope, rl, try optionalType(mod, scope, node.castTag(.OptionalType).?)), - .UnwrapOptional => return unwrapOptional(mod, scope, rl, node.castTag(.UnwrapOptional).?), - .Block => return rvalueVoid(mod, scope, rl, node, try blockExpr(mod, scope, node.castTag(.Block).?)), - .LabeledBlock => return labeledBlockExpr(mod, scope, rl, node.castTag(.LabeledBlock).?, .block), - .Break => return rvalue(mod, scope, rl, try breakExpr(mod, scope, node.castTag(.Break).?)), - .Continue => return rvalue(mod, scope, rl, try continueExpr(mod, scope, node.castTag(.Continue).?)), - .PtrType => return rvalue(mod, scope, rl, try ptrType(mod, scope, node.castTag(.PtrType).?)), - .GroupedExpression => return expr(mod, scope, rl, node.castTag(.GroupedExpression).?.expr), - .ArrayType => return rvalue(mod, scope, rl, try arrayType(mod, scope, node.castTag(.ArrayType).?)), - .ArrayTypeSentinel => return rvalue(mod, scope, rl, try arrayTypeSentinel(mod, scope, node.castTag(.ArrayTypeSentinel).?)), - .EnumLiteral => return rvalue(mod, scope, rl, try enumLiteral(mod, scope, node.castTag(.EnumLiteral).?)), - .MultilineStringLiteral => return rvalue(mod, scope, rl, try multilineStrLiteral(mod, scope, node.castTag(.MultilineStringLiteral).?)), - .CharLiteral => return rvalue(mod, scope, rl, try charLiteral(mod, scope, node.castTag(.CharLiteral).?)), - .SliceType => return rvalue(mod, scope, rl, try sliceType(mod, scope, node.castTag(.SliceType).?)), - .ErrorUnion => return rvalue(mod, scope, rl, try typeInixOp(mod, scope, node.castTag(.ErrorUnion).?, .error_union_type)), - .MergeErrorSets => return rvalue(mod, scope, rl, try typeInixOp(mod, scope, node.castTag(.MergeErrorSets).?, .merge_error_sets)), - .AnyFrameType => return rvalue(mod, scope, rl, try anyFrameType(mod, scope, node.castTag(.AnyFrameType).?)), - .ErrorSetDecl => return rvalue(mod, scope, rl, try errorSetDecl(mod, scope, node.castTag(.ErrorSetDecl).?)), - .ErrorType => return rvalue(mod, scope, rl, try errorType(mod, scope, node.castTag(.ErrorType).?)), - .For => return forExpr(mod, scope, rl, node.castTag(.For).?), - .ArrayAccess => return arrayAccess(mod, scope, rl, node.castTag(.ArrayAccess).?), - .Slice => return rvalue(mod, scope, rl, try sliceExpr(mod, scope, node.castTag(.Slice).?)), - .Catch => return catchExpr(mod, scope, rl, node.castTag(.Catch).?), - .Comptime => return comptimeKeyword(mod, scope, rl, node.castTag(.Comptime).?), - .OrElse => return orelseExpr(mod, scope, rl, node.castTag(.OrElse).?), - .Switch => return switchExpr(mod, scope, rl, node.castTag(.Switch).?), - .ContainerDecl => return containerDecl(mod, scope, rl, node.castTag(.ContainerDecl).?), - - .Defer => return mod.failNode(scope, node, "TODO implement astgen.expr for .Defer", .{}), - .Await => return mod.failNode(scope, node, "TODO implement astgen.expr for .Await", .{}), - .Resume => return mod.failNode(scope, node, "TODO implement astgen.expr for .Resume", .{}), - .Try => return mod.failNode(scope, node, "TODO implement astgen.expr for .Try", .{}), + .assign => return rvalueVoid(mod, scope, rl, node, try assign(mod, scope, node)), + .assign_bit_and => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node, .bit_and)), + .assign_bit_or => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node, .bit_or)), + .assign_bit_shift_left => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node, .shl)), + .assign_bit_shift_right => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node, .shr)), + .assign_bit_xor => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node, .xor)), + .assign_div => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node, .div)), + .assign_sub => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node, .sub)), + .assign_sub_wrap => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node, .subwrap)), + .assign_mod => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node, .mod_rem)), + .assign_add => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node, .add)), + .assign_add_wrap => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node, .addwrap)), + .assign_mul => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node, .mul)), + .assign_mul_wrap => return rvalueVoid(mod, scope, rl, node, try assignOp(mod, scope, node, .mulwrap)), + + .add => return simpleBinOp(mod, scope, rl, node, .add), + .add_wrap => return simpleBinOp(mod, scope, rl, node, .addwrap), + .sub => return simpleBinOp(mod, scope, rl, node, .sub), + .sub_wrap => return simpleBinOp(mod, scope, rl, node, .subwrap), + .mul => return simpleBinOp(mod, scope, rl, node, .mul), + .mul_wrap => return simpleBinOp(mod, scope, rl, node, .mulwrap), + .div => return simpleBinOp(mod, scope, rl, node, .div), + .mod => return simpleBinOp(mod, scope, rl, node, .mod_rem), + .bit_and => return simpleBinOp(mod, scope, rl, node, .bit_and), + .bit_or => return simpleBinOp(mod, scope, rl, node, .bit_or), + .bit_shift_left => return simpleBinOp(mod, scope, rl, node, .shl), + .bit_shift_right => return simpleBinOp(mod, scope, rl, node, .shr), + .bit_xor => return simpleBinOp(mod, scope, rl, node, .xor), + + .bang_equal => return simpleBinOp(mod, scope, rl, node, .cmp_neq), + .equal_equal => return simpleBinOp(mod, scope, rl, node, .cmp_eq), + .greater_than => return simpleBinOp(mod, scope, rl, node, .cmp_gt), + .greater_or_equal => return simpleBinOp(mod, scope, rl, node, .cmp_gte), + .less_than => return simpleBinOp(mod, scope, rl, node, .cmp_lt), + .less_or_equal => return simpleBinOp(mod, scope, rl, node, .cmp_lte), + + .array_cat => return simpleBinOp(mod, scope, rl, node, .array_cat), + .array_mult => return simpleBinOp(mod, scope, rl, node, .array_mul), + + .bool_and => return boolBinOp(mod, scope, rl, node), + .bool_or => return boolBinOp(mod, scope, rl, node), + + .bool_not => return rvalue(mod, scope, rl, try boolNot(mod, scope, node)), + .bit_not => return rvalue(mod, scope, rl, try bitNot(mod, scope, node)), + .negation => return rvalue(mod, scope, rl, try negation(mod, scope, node, .sub)), + .negation_wrap => return rvalue(mod, scope, rl, try negation(mod, scope, node, .subwrap)), + + .identifier => return try identifier(mod, scope, rl, node), + .@"asm" => return rvalue(mod, scope, rl, try assembly(mod, scope, node)), + .string_literal => return rvalue(mod, scope, rl, try stringLiteral(mod, scope, node)), + .integer_literal => return rvalue(mod, scope, rl, try integerLiteral(mod, scope, node)), + .builtin_call => return builtinCall(mod, scope, rl, node), + .call => return callExpr(mod, scope, rl, node), + .@"unreachable" => return unreach(mod, scope, node), + .@"return" => return ret(mod, scope, node), + .@"if" => return ifExpr(mod, scope, rl, node), + .@"while" => return whileExpr(mod, scope, rl, node), + .period => return field(mod, scope, rl, node), + .deref => return rvalue(mod, scope, rl, try deref(mod, scope, node)), + .address_of => return rvalue(mod, scope, rl, try addressOf(mod, scope, node)), + .float_literal => return rvalue(mod, scope, rl, try floatLiteral(mod, scope, node)), + .undefined_literal => return rvalue(mod, scope, rl, try undefLiteral(mod, scope, node)), + .bool_literal => return rvalue(mod, scope, rl, try boolLiteral(mod, scope, node)), + .null_literal => return rvalue(mod, scope, rl, try nullLiteral(mod, scope, node)), + .optional_type => return rvalue(mod, scope, rl, try optionalType(mod, scope, node)), + .unwrap_optional => return unwrapOptional(mod, scope, rl, node), + .block => return rvalueVoid(mod, scope, rl, node, try blockExpr(mod, scope, node)), + .labeled_block => return labeledBlockExpr(mod, scope, rl, node, .block), + .@"break" => return rvalue(mod, scope, rl, try breakExpr(mod, scope, node)), + .@"continue" => return rvalue(mod, scope, rl, try continueExpr(mod, scope, node)), + .grouped_expression => return expr(mod, scope, rl, node.expr), + .array_type => return rvalue(mod, scope, rl, try arrayType(mod, scope, node)), + .array_type_sentinel => return rvalue(mod, scope, rl, try arrayTypeSentinel(mod, scope, node)), + .enum_literal => return rvalue(mod, scope, rl, try enumLiteral(mod, scope, node)), + .MultilineStringLiteral => return rvalue(mod, scope, rl, try multilineStrLiteral(mod, scope, node)), + .char_literal => return rvalue(mod, scope, rl, try charLiteral(mod, scope, node)), + .slice_type => return rvalue(mod, scope, rl, try sliceType(mod, scope, node)), + .error_union => return rvalue(mod, scope, rl, try typeInixOp(mod, scope, node, .error_union_type)), + .merge_error_sets => return rvalue(mod, scope, rl, try typeInixOp(mod, scope, node, .merge_error_sets)), + .anyframe_type => return rvalue(mod, scope, rl, try anyFrameType(mod, scope, node)), + .error_set_decl => return rvalue(mod, scope, rl, try errorSetDecl(mod, scope, node)), + .error_type => return rvalue(mod, scope, rl, try errorType(mod, scope, node)), + .@"for" => return forExpr(mod, scope, rl, node), + .array_access => return arrayAccess(mod, scope, rl, node), + .slice => return rvalue(mod, scope, rl, try sliceExpr(mod, scope, node)), + .@"catch" => return catchExpr(mod, scope, rl, node), + .@"comptime" => return comptimeKeyword(mod, scope, rl, node), + .@"orelse" => return orelseExpr(mod, scope, rl, node), + .@"switch" => return switchExpr(mod, scope, rl, node), + .ContainerDecl => return containerDecl(mod, scope, rl, node), + + .@"defer" => return mod.failNode(scope, node, "TODO implement astgen.expr for .defer", .{}), + .@"await" => return mod.failNode(scope, node, "TODO implement astgen.expr for .await", .{}), + .@"resume" => return mod.failNode(scope, node, "TODO implement astgen.expr for .resume", .{}), + .@"try" => return mod.failNode(scope, node, "TODO implement astgen.expr for .Try", .{}), .ArrayInitializer => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayInitializer", .{}), .ArrayInitializerDot => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayInitializerDot", .{}), .StructInitializer => return mod.failNode(scope, node, "TODO implement astgen.expr for .StructInitializer", .{}), .StructInitializerDot => return mod.failNode(scope, node, "TODO implement astgen.expr for .StructInitializerDot", .{}), - .Suspend => return mod.failNode(scope, node, "TODO implement astgen.expr for .Suspend", .{}), - .AnyType => return mod.failNode(scope, node, "TODO implement astgen.expr for .AnyType", .{}), + .@"suspend" => return mod.failNode(scope, node, "TODO implement astgen.expr for .suspend", .{}), + .@"anytype" => return mod.failNode(scope, node, "TODO implement astgen.expr for .anytype", .{}), .FnProto => return mod.failNode(scope, node, "TODO implement astgen.expr for .FnProto", .{}), - .Nosuspend => return mod.failNode(scope, node, "TODO implement astgen.expr for .Nosuspend", .{}), + .@"nosuspend" => return mod.failNode(scope, node, "TODO implement astgen.expr for .nosuspend", .{}), } } -fn comptimeKeyword(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Comptime) InnerError!*zir.Inst { +fn comptimeKeyword(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.@"comptime") InnerError!*zir.Inst { const tracy = trace(@src()); defer tracy.end(); @@ -338,7 +336,7 @@ pub fn comptimeExpr( mod: *Module, parent_scope: *Scope, rl: ResultLoc, - node: *ast.Node, + node: ast.Node.Index, ) InnerError!*zir.Inst { // If we are already in a comptime scope, no need to make another one. if (parent_scope.isComptime()) { @@ -347,7 +345,7 @@ pub fn comptimeExpr( // Optimization for labeled blocks: don't need to have 2 layers of blocks, // we can reuse the existing one. - if (node.castTag(.LabeledBlock)) |block_node| { + if (node.castTag(.labeled_block)) |block_node| { return labeledBlockExpr(mod, parent_scope, rl, block_node, .block_comptime); } @@ -366,6 +364,8 @@ pub fn comptimeExpr( _ = try expr(mod, &block_scope.base, rl, node); const tree = parent_scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const src = tree.token_locs[node.firstToken()].start; const block = try addZIRInstBlock(mod, parent_scope, src, .block_comptime_flat, .{ @@ -381,6 +381,8 @@ fn breakExpr( node: *ast.Node.ControlFlowExpression, ) InnerError!*zir.Inst { const tree = parent_scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const src = tree.token_locs[node.ltoken].start; // Look for the label in the scope. @@ -445,6 +447,8 @@ fn breakExpr( fn continueExpr(mod: *Module, parent_scope: *Scope, node: *ast.Node.ControlFlowExpression) InnerError!*zir.Inst { const tree = parent_scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const src = tree.token_locs[node.ltoken].start; // Look for the label in the scope. @@ -485,7 +489,7 @@ fn continueExpr(mod: *Module, parent_scope: *Scope, node: *ast.Node.ControlFlowE } } -pub fn blockExpr(mod: *Module, parent_scope: *Scope, block_node: *ast.Node.Block) InnerError!void { +pub fn blockExpr(mod: *Module, parent_scope: *Scope, block_node: *ast.Node.block) InnerError!void { const tracy = trace(@src()); defer tracy.end(); @@ -502,6 +506,8 @@ fn checkLabelRedefinition(mod: *Module, parent_scope: *Scope, label: ast.TokenIn if (gen_zir.label) |prev_label| { if (try tokenIdentEql(mod, parent_scope, label, prev_label.token)) { const tree = parent_scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const label_src = tree.token_locs[label].start; const prev_label_src = tree.token_locs[prev_label.token].start; @@ -539,7 +545,7 @@ fn labeledBlockExpr( mod: *Module, parent_scope: *Scope, rl: ResultLoc, - block_node: *ast.Node.LabeledBlock, + block_node: *ast.Node.labeled_block, zir_tag: zir.Inst.Tag, ) InnerError!*zir.Inst { const tracy = trace(@src()); @@ -548,6 +554,8 @@ fn labeledBlockExpr( assert(zir_tag == .block or zir_tag == .block_comptime); const tree = parent_scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const src = tree.token_locs[block_node.lbrace].start; try checkLabelRedefinition(mod, parent_scope, block_node.label); @@ -627,10 +635,12 @@ fn labeledBlockExpr( fn blockExprStmts( mod: *Module, parent_scope: *Scope, - node: *ast.Node, - statements: []*ast.Node, + node: ast.Node.Index, + statements: []const ast.Node.Index, ) !void { const tree = parent_scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); var block_arena = std.heap.ArenaAllocator.init(mod.gpa); defer block_arena.deinit(); @@ -640,24 +650,24 @@ fn blockExprStmts( const src = tree.token_locs[statement.firstToken()].start; _ = try addZIRNoOp(mod, scope, src, .dbg_stmt); switch (statement.tag) { - .VarDecl => { - const var_decl_node = statement.castTag(.VarDecl).?; + .var_decl => { + const var_decl_node = statement.castTag(.var_decl).?; scope = try varDecl(mod, scope, var_decl_node, &block_arena.allocator); }, - .Assign => try assign(mod, scope, statement.castTag(.Assign).?), - .AssignBitAnd => try assignOp(mod, scope, statement.castTag(.AssignBitAnd).?, .bit_and), - .AssignBitOr => try assignOp(mod, scope, statement.castTag(.AssignBitOr).?, .bit_or), - .AssignBitShiftLeft => try assignOp(mod, scope, statement.castTag(.AssignBitShiftLeft).?, .shl), - .AssignBitShiftRight => try assignOp(mod, scope, statement.castTag(.AssignBitShiftRight).?, .shr), - .AssignBitXor => try assignOp(mod, scope, statement.castTag(.AssignBitXor).?, .xor), - .AssignDiv => try assignOp(mod, scope, statement.castTag(.AssignDiv).?, .div), - .AssignSub => try assignOp(mod, scope, statement.castTag(.AssignSub).?, .sub), - .AssignSubWrap => try assignOp(mod, scope, statement.castTag(.AssignSubWrap).?, .subwrap), - .AssignMod => try assignOp(mod, scope, statement.castTag(.AssignMod).?, .mod_rem), - .AssignAdd => try assignOp(mod, scope, statement.castTag(.AssignAdd).?, .add), - .AssignAddWrap => try assignOp(mod, scope, statement.castTag(.AssignAddWrap).?, .addwrap), - .AssignMul => try assignOp(mod, scope, statement.castTag(.AssignMul).?, .mul), - .AssignMulWrap => try assignOp(mod, scope, statement.castTag(.AssignMulWrap).?, .mulwrap), + .assign => try assign(mod, scope, statement), + .assign_bit_and => try assignOp(mod, scope, statement, .bit_and), + .assign_bit_or => try assignOp(mod, scope, statement, .bit_or), + .assign_bit_shift_left => try assignOp(mod, scope, statement, .shl), + .assign_bit_shift_right => try assignOp(mod, scope, statement, .shr), + .assign_bit_xor => try assignOp(mod, scope, statement, .xor), + .assign_div => try assignOp(mod, scope, statement, .div), + .assign_sub => try assignOp(mod, scope, statement, .sub), + .assign_sub_wrap => try assignOp(mod, scope, statement, .subwrap), + .assign_mod => try assignOp(mod, scope, statement, .mod_rem), + .assign_add => try assignOp(mod, scope, statement, .add), + .assign_add_wrap => try assignOp(mod, scope, statement, .addwrap), + .assign_mul => try assignOp(mod, scope, statement, .mul), + .assign_mul_wrap => try assignOp(mod, scope, statement, .mulwrap), else => { const possibly_unused_result = try expr(mod, scope, .none, statement); @@ -672,7 +682,7 @@ fn blockExprStmts( fn varDecl( mod: *Module, scope: *Scope, - node: *ast.Node.VarDecl, + node: *ast.Node.var_decl, block_arena: *Allocator, ) InnerError!*Scope { if (node.getComptimeToken()) |comptime_token| { @@ -682,6 +692,8 @@ fn varDecl( return mod.failNode(scope, align_node, "TODO implement alignment on locals", .{}); } const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const name_src = tree.token_locs[node.name_token].start; const ident_name = try mod.identifierTokenString(scope, node.name_token); @@ -733,7 +745,7 @@ fn varDecl( return mod.fail(scope, name_src, "variables must be initialized", .{}); switch (tree.token_ids[node.mut_token]) { - .Keyword_const => { + .keyword_const => { // Depending on the type of AST the initialization expression is, we may need an lvalue // or an rvalue as a result location. If it is an rvalue, we can use the instruction as // the variable, no memory location needed. @@ -834,7 +846,7 @@ fn varDecl( }; return &sub_scope.base; }, - .Keyword_var => { + .keyword_var => { var resolve_inferred_alloc: ?*zir.Inst = null; const var_data: struct { result_loc: ResultLoc, alloc: *zir.Inst } = if (node.getTypeNode()) |type_node| a: { const type_inst = try typeExpr(mod, scope, type_node); @@ -862,33 +874,39 @@ fn varDecl( } } -fn assign(mod: *Module, scope: *Scope, infix_node: *ast.Node.SimpleInfixOp) InnerError!void { - if (infix_node.lhs.castTag(.Identifier)) |ident| { - // This intentionally does not support @"_" syntax. - const ident_name = scope.tree().tokenSlice(ident.token); +fn assign(mod: *Module, scope: *Scope, infix_node: ast.Node.Index) InnerError!void { + const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); + const lhs = node_datas[infix_node].lhs; + const rhs = node_datas[infix_node].rhs; + if (node_tags[lhs] == .identifier) { + // This intentionally does not support `@"_"` syntax. + const ident_name = tree.tokenSlice(main_tokens[lhs]); if (mem.eql(u8, ident_name, "_")) { _ = try expr(mod, scope, .discard, infix_node.rhs); return; } } - const lvalue = try lvalExpr(mod, scope, infix_node.lhs); - _ = try expr(mod, scope, .{ .ptr = lvalue }, infix_node.rhs); + const lvalue = try lvalExpr(mod, scope, lhs); + _ = try expr(mod, scope, .{ .ptr = lvalue }, rhs); } fn assignOp( mod: *Module, scope: *Scope, - infix_node: *ast.Node.SimpleInfixOp, + infix_node: ast.Node.Index, op_inst_tag: zir.Inst.Tag, ) InnerError!void { - const lhs_ptr = try lvalExpr(mod, scope, infix_node.lhs); - const lhs = try addZIRUnOp(mod, scope, lhs_ptr.src, .deref, lhs_ptr); - const lhs_type = try addZIRUnOp(mod, scope, lhs_ptr.src, .typeof, lhs); - const rhs = try expr(mod, scope, .{ .ty = lhs_type }, infix_node.rhs); - const tree = scope.tree(); - const src = tree.token_locs[infix_node.op_token].start; + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); + const lhs_ptr = try lvalExpr(mod, scope, node_datas[infix_node].lhs); + const lhs = try addZIRUnOp(mod, scope, lhs_ptr.src, .deref, lhs_ptr); + const lhs_type = try addZIRUnOp(mod, scope, lhs_ptr.src, .typeof, lhs); + const rhs = try expr(mod, scope, .{ .ty = lhs_type }, node_datas[infix_node].rhs); + const src = token_starts[main_tokens[infix_node]]; const result = try addZIRBinOp(mod, scope, src, op_inst_tag, lhs, rhs); _ = try addZIRBinOp(mod, scope, src, .store, lhs_ptr, result); } @@ -935,7 +953,7 @@ fn optionalType(mod: *Module, scope: *Scope, node: *ast.Node.SimplePrefixOp) Inn return addZIRUnOp(mod, scope, src, .optional_type, operand); } -fn sliceType(mod: *Module, scope: *Scope, node: *ast.Node.SliceType) InnerError!*zir.Inst { +fn sliceType(mod: *Module, scope: *Scope, node: *ast.Node.slice_type) InnerError!*zir.Inst { const tree = scope.tree(); const src = tree.token_locs[node.op_token].start; return ptrSliceType(mod, scope, src, &node.ptr_info, node.rhs, .Slice); @@ -948,7 +966,7 @@ fn ptrType(mod: *Module, scope: *Scope, node: *ast.Node.PtrType) InnerError!*zir .Asterisk, .AsteriskAsterisk => .One, // TODO stage1 type inference bug .LBracket => @as(std.builtin.TypeInfo.Pointer.Size, switch (tree.token_ids[node.op_token + 2]) { - .Identifier => .C, + .identifier => .C, else => .Many, }), else => unreachable, @@ -998,7 +1016,7 @@ fn ptrSliceType(mod: *Module, scope: *Scope, src: usize, ptr_info: *ast.PtrInfo, return addZIRInst(mod, scope, src, zir.Inst.PtrType, .{ .child_type = child_type }, kw_args); } -fn arrayType(mod: *Module, scope: *Scope, node: *ast.Node.ArrayType) !*zir.Inst { +fn arrayType(mod: *Module, scope: *Scope, node: *ast.Node.array_type) !*zir.Inst { const tree = scope.tree(); const src = tree.token_locs[node.op_token].start; const usize_type = try addZIRInstConst(mod, scope, src, .{ @@ -1013,7 +1031,7 @@ fn arrayType(mod: *Module, scope: *Scope, node: *ast.Node.ArrayType) !*zir.Inst return addZIRBinOp(mod, scope, src, .array_type, len, elem_type); } -fn arrayTypeSentinel(mod: *Module, scope: *Scope, node: *ast.Node.ArrayTypeSentinel) !*zir.Inst { +fn arrayTypeSentinel(mod: *Module, scope: *Scope, node: *ast.Node.array_type_sentinel) !*zir.Inst { const tree = scope.tree(); const src = tree.token_locs[node.op_token].start; const usize_type = try addZIRInstConst(mod, scope, src, .{ @@ -1034,7 +1052,7 @@ fn arrayTypeSentinel(mod: *Module, scope: *Scope, node: *ast.Node.ArrayTypeSenti }, .{}); } -fn anyFrameType(mod: *Module, scope: *Scope, node: *ast.Node.AnyFrameType) InnerError!*zir.Inst { +fn anyFrameType(mod: *Module, scope: *Scope, node: *ast.Node.anyframe_type) InnerError!*zir.Inst { const tree = scope.tree(); const src = tree.token_locs[node.anyframe_token].start; if (node.result) |some| { @@ -1056,7 +1074,7 @@ fn typeInixOp(mod: *Module, scope: *Scope, node: *ast.Node.SimpleInfixOp, op_ins return addZIRBinOp(mod, scope, src, op_inst_tag, error_set, payload); } -fn enumLiteral(mod: *Module, scope: *Scope, node: *ast.Node.EnumLiteral) !*zir.Inst { +fn enumLiteral(mod: *Module, scope: *Scope, node: *ast.Node.enum_literal) !*zir.Inst { const tree = scope.tree(); const src = tree.token_locs[node.name].start; const name = try mod.identifierTokenString(scope, node.name); @@ -1141,13 +1159,13 @@ fn containerDecl(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Con var layout: std.builtin.TypeInfo.ContainerLayout = .Auto; if (node.layout_token) |some| switch (tree.token_ids[some]) { - .Keyword_extern => layout = .Extern, - .Keyword_packed => layout = .Packed, + .keyword_extern => layout = .Extern, + .keyword_packed => layout = .Packed, else => unreachable, }; const container_type = switch (tree.token_ids[node.kind_token]) { - .Keyword_enum => blk: { + .keyword_enum => blk: { const tag_type: ?*zir.Inst = switch (node.init_arg_expr) { .Type => |t| try typeExpr(mod, &gen_scope.base, t), .None => null, @@ -1174,7 +1192,7 @@ fn containerDecl(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Con }; break :blk Type.initPayload(&enum_type.base); }, - .Keyword_struct => blk: { + .keyword_struct => blk: { assert(node.init_arg_expr == .None); const inst = try addZIRInst(mod, &gen_scope.base, src, zir.Inst.StructType, .{ .fields = try arena.dupe(*zir.Inst, fields.items), @@ -1196,7 +1214,7 @@ fn containerDecl(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Con }; break :blk Type.initPayload(&struct_type.base); }, - .Keyword_union => blk: { + .keyword_union => blk: { const init_inst = switch (node.init_arg_expr) { .Enum => |e| if (e) |t| try typeExpr(mod, &gen_scope.base, t) else null, .None => null, @@ -1229,7 +1247,7 @@ fn containerDecl(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Con }; break :blk Type.initPayload(&union_type.base); }, - .Keyword_opaque => blk: { + .keyword_opaque => blk: { if (fields.items.len > 0) { return mod.fail(scope, fields.items[0].src, "opaque types cannot have fields", .{}); } @@ -1258,7 +1276,7 @@ fn containerDecl(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Con } } -fn errorSetDecl(mod: *Module, scope: *Scope, node: *ast.Node.ErrorSetDecl) InnerError!*zir.Inst { +fn errorSetDecl(mod: *Module, scope: *Scope, node: *ast.Node.error_set_decl) InnerError!*zir.Inst { const tree = scope.tree(); const src = tree.token_locs[node.error_token].start; const decls = node.decls(); @@ -1281,7 +1299,7 @@ fn errorType(mod: *Module, scope: *Scope, node: *ast.Node.OneToken) InnerError!* }); } -fn catchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Catch) InnerError!*zir.Inst { +fn catchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.@"catch") InnerError!*zir.Inst { switch (rl) { .ref => return orelseCatchExpr( mod, @@ -1528,7 +1546,7 @@ pub fn field(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.SimpleI const tree = scope.tree(); const src = tree.token_locs[node.op_token].start; // TODO custom AST node for field access so that we don't have to go through a node cast here - const field_name = try mod.identifierTokenString(scope, node.rhs.castTag(.Identifier).?.token); + const field_name = try mod.identifierTokenString(scope, node.rhs.castTag(.identifier).?.token); if (rl == .ref) { return addZirInstTag(mod, scope, src, .field_ptr, .{ .object = try expr(mod, scope, .ref, node.lhs), @@ -1545,7 +1563,7 @@ fn namedField( mod: *Module, scope: *Scope, rl: ResultLoc, - call: *ast.Node.BuiltinCall, + call: *ast.Node.builtin_call, ) InnerError!*zir.Inst { try ensureBuiltinParamCount(mod, scope, call, 2); @@ -1571,7 +1589,7 @@ fn namedField( })); } -fn arrayAccess(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.ArrayAccess) InnerError!*zir.Inst { +fn arrayAccess(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.array_access) InnerError!*zir.Inst { const tree = scope.tree(); const src = tree.token_locs[node.rtoken].start; const usize_type = try addZIRInstConst(mod, scope, src, .{ @@ -1592,7 +1610,7 @@ fn arrayAccess(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Array })); } -fn sliceExpr(mod: *Module, scope: *Scope, node: *ast.Node.Slice) InnerError!*zir.Inst { +fn sliceExpr(mod: *Module, scope: *Scope, node: *ast.Node.slice) InnerError!*zir.Inst { const tree = scope.tree(); const src = tree.token_locs[node.rtoken].start; @@ -1633,15 +1651,16 @@ fn simpleBinOp( mod: *Module, scope: *Scope, rl: ResultLoc, - infix_node: *ast.Node.SimpleInfixOp, + infix_node: ast.Node.Index, op_inst_tag: zir.Inst.Tag, ) InnerError!*zir.Inst { const tree = scope.tree(); - const src = tree.token_locs[infix_node.op_token].start; - - const lhs = try expr(mod, scope, .none, infix_node.lhs); - const rhs = try expr(mod, scope, .none, infix_node.rhs); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); + const lhs = try expr(mod, scope, .none, node_datas[infix_node].lhs); + const rhs = try expr(mod, scope, .none, node_datas[infix_node].rhs); + const src = token_starts[main_tokens[infix_node]]; const result = try addZIRBinOp(mod, scope, src, op_inst_tag, lhs, rhs); return rvalue(mod, scope, rl, result); } @@ -1653,6 +1672,9 @@ fn boolBinOp( infix_node: *ast.Node.SimpleInfixOp, ) InnerError!*zir.Inst { const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); + const src = tree.token_locs[infix_node.op_token].start; const bool_type = try addZIRInstConst(mod, scope, src, .{ .ty = Type.initTag(.type), @@ -1703,7 +1725,7 @@ fn boolBinOp( }; defer const_scope.instructions.deinit(mod.gpa); - const is_bool_and = infix_node.base.tag == .BoolAnd; + const is_bool_and = infix_node.base.tag == .bool_and; _ = try addZIRInst(mod, &const_scope.base, src, zir.Inst.Break, .{ .block = block, .operand = try addZIRInstConst(mod, &const_scope.base, src, .{ @@ -1769,7 +1791,7 @@ const CondKind = union(enum) { return &then_scope.base; }; const is_ptr = payload.ptr_token != null; - const ident_node = payload.value_symbol.castTag(.Identifier).?; + const ident_node = payload.value_symbol.castTag(.identifier).?; // This intentionally does not support @"_" syntax. const ident_name = then_scope.base.tree().tokenSlice(ident_node.token); @@ -1788,7 +1810,7 @@ const CondKind = union(enum) { const payload_ptr = try addZIRUnOp(mod, &else_scope.base, src, .err_union_payload_unsafe_ptr, self.err_union.?); const payload = payload_node.?.castTag(.Payload).?; - const ident_node = payload.error_symbol.castTag(.Identifier).?; + const ident_node = payload.error_symbol.castTag(.identifier).?; // This intentionally does not support @"_" syntax. const ident_name = else_scope.base.tree().tokenSlice(ident_node.token); @@ -1800,7 +1822,7 @@ const CondKind = union(enum) { } }; -fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) InnerError!*zir.Inst { +fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.@"if") InnerError!*zir.Inst { var cond_kind: CondKind = .bool; if (if_node.payload) |_| cond_kind = .{ .optional = null }; if (if_node.@"else") |else_node| { @@ -1819,6 +1841,8 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn defer block_scope.instructions.deinit(mod.gpa); const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const if_src = tree.token_locs[if_node.if_token].start; const cond = try cond_kind.cond(mod, &block_scope, if_src, if_node.condition); @@ -1918,7 +1942,7 @@ fn whileExpr( mod: *Module, scope: *Scope, rl: ResultLoc, - while_node: *ast.Node.While, + while_node: *ast.Node.@"while", ) InnerError!*zir.Inst { var cond_kind: CondKind = .bool; if (while_node.payload) |_| cond_kind = .{ .optional = null }; @@ -1955,6 +1979,8 @@ fn whileExpr( defer continue_scope.instructions.deinit(mod.gpa); const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const while_src = tree.token_locs[while_node.while_token].start; const void_type = try addZIRInstConst(mod, scope, while_src, .{ .ty = Type.initTag(.type), @@ -2066,7 +2092,7 @@ fn forExpr( mod: *Module, scope: *Scope, rl: ResultLoc, - for_node: *ast.Node.For, + for_node: *ast.Node.@"for", ) InnerError!*zir.Inst { if (for_node.label) |label| { try checkLabelRedefinition(mod, scope, label); @@ -2077,6 +2103,8 @@ fn forExpr( // setup variables and constants const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const for_src = tree.token_locs[for_node.for_token].start; const index_ptr = blk: { const usize_type = try addZIRInstConst(mod, scope, for_src, .{ @@ -2246,9 +2274,9 @@ fn forExpr( ); } -fn switchCaseUsesRef(node: *ast.Node.Switch) bool { +fn switchCaseUsesRef(node: *ast.Node.@"switch") bool { for (node.cases()) |uncasted_case| { - const case = uncasted_case.castTag(.SwitchCase).?; + const case = uncasted_case.castTag(.switch_case).?; const uncasted_payload = case.payload orelse continue; const payload = uncasted_payload.castTag(.PointerPayload).?; if (payload.ptr_token) |_| return true; @@ -2260,15 +2288,17 @@ fn getRangeNode(node: *ast.Node) ?*ast.Node.SimpleInfixOp { var cur = node; while (true) { switch (cur.tag) { - .Range => return @fieldParentPtr(ast.Node.SimpleInfixOp, "base", cur), - .GroupedExpression => cur = @fieldParentPtr(ast.Node.GroupedExpression, "base", cur).expr, + .range => return @fieldParentPtr(ast.Node.SimpleInfixOp, "base", cur), + .grouped_expression => cur = @fieldParentPtr(ast.Node.grouped_expression, "base", cur).expr, else => return null, } } } -fn switchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, switch_node: *ast.Node.Switch) InnerError!*zir.Inst { +fn switchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, switch_node: *ast.Node.@"switch") InnerError!*zir.Inst { const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const switch_src = tree.token_locs[switch_node.switch_token].start; const use_ref = switchCaseUsesRef(switch_node); @@ -2291,12 +2321,12 @@ fn switchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, switch_node: *ast.Node var first_range: ?*zir.Inst = null; var simple_case_count: usize = 0; for (switch_node.cases()) |uncasted_case| { - const case = uncasted_case.castTag(.SwitchCase).?; + const case = uncasted_case.castTag(.switch_case).?; const case_src = tree.token_locs[case.firstToken()].start; assert(case.items_len != 0); // Check for else/_ prong, those are handled last. - if (case.items_len == 1 and case.items()[0].tag == .SwitchElse) { + if (case.items_len == 1 and case.items()[0].tag == .switch_else) { if (else_src) |src| { const msg = msg: { const msg = try mod.errMsg( @@ -2313,7 +2343,7 @@ fn switchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, switch_node: *ast.Node } else_src = case_src; continue; - } else if (case.items_len == 1 and case.items()[0].tag == .Identifier and + } else if (case.items_len == 1 and case.items()[0].tag == .identifier and mem.eql(u8, tree.tokenSlice(case.items()[0].firstToken()), "_")) { if (underscore_src) |src| { @@ -2412,20 +2442,20 @@ fn switchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, switch_node: *ast.Node defer else_scope.instructions.deinit(mod.gpa); // Now generate all but the special cases - var special_case: ?*ast.Node.SwitchCase = null; + var special_case: ?*ast.Node.switch_case = null; var items_index: usize = 0; var case_index: usize = 0; for (switch_node.cases()) |uncasted_case| { - const case = uncasted_case.castTag(.SwitchCase).?; + const case = uncasted_case.castTag(.switch_case).?; const case_src = tree.token_locs[case.firstToken()].start; // reset without freeing to reduce allocations. case_scope.instructions.items.len = 0; // Check for else/_ prong, those are handled last. - if (case.items_len == 1 and case.items()[0].tag == .SwitchElse) { + if (case.items_len == 1 and case.items()[0].tag == .switch_else) { special_case = case; continue; - } else if (case.items_len == 1 and case.items()[0].tag == .Identifier and + } else if (case.items_len == 1 and case.items()[0].tag == .identifier and mem.eql(u8, tree.tokenSlice(case.items()[0].firstToken()), "_")) { special_case = case; @@ -2528,11 +2558,13 @@ fn switchCaseExpr( scope: *Scope, rl: ResultLoc, block: *zir.Inst.Block, - case: *ast.Node.SwitchCase, + case: *ast.Node.switch_case, target: *zir.Inst, target_ptr: ?*zir.Inst, ) !void { const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const case_src = tree.token_locs[case.firstToken()].start; const sub_scope = blk: { const uncasted_payload = case.payload orelse break :blk scope; @@ -2559,6 +2591,8 @@ fn switchCaseExpr( fn ret(mod: *Module, scope: *Scope, cfe: *ast.Node.ControlFlowExpression) InnerError!*zir.Inst { const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const src = tree.token_locs[cfe.ltoken].start; if (cfe.getRHS()) |rhs_node| { if (nodeMayNeedMemoryLocation(rhs_node, scope)) { @@ -2580,6 +2614,8 @@ fn identifier(mod: *Module, scope: *Scope, rl: ResultLoc, ident: *ast.Node.OneTo defer tracy.end(); const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const ident_name = try mod.identifierTokenString(scope, ident.token); const src = tree.token_locs[ident.token].start; if (mem.eql(u8, ident_name, "_")) { @@ -2667,6 +2703,8 @@ fn identifier(mod: *Module, scope: *Scope, rl: ResultLoc, ident: *ast.Node.OneTo fn stringLiteral(mod: *Module, scope: *Scope, str_lit: *ast.Node.OneToken) InnerError!*zir.Inst { const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const unparsed_bytes = tree.tokenSlice(str_lit.token); const arena = scope.arena(); @@ -2686,6 +2724,8 @@ fn stringLiteral(mod: *Module, scope: *Scope, str_lit: *ast.Node.OneToken) Inner fn multilineStrLiteral(mod: *Module, scope: *Scope, node: *ast.Node.MultilineStringLiteral) !*zir.Inst { const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const lines = node.linesConst(); const src = tree.token_locs[lines[0]].start; @@ -2713,6 +2753,8 @@ fn multilineStrLiteral(mod: *Module, scope: *Scope, node: *ast.Node.MultilineStr fn charLiteral(mod: *Module, scope: *Scope, node: *ast.Node.OneToken) !*zir.Inst { const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const src = tree.token_locs[node.token].start; const slice = tree.tokenSlice(node.token); @@ -2733,6 +2775,8 @@ fn charLiteral(mod: *Module, scope: *Scope, node: *ast.Node.OneToken) !*zir.Inst fn integerLiteral(mod: *Module, scope: *Scope, int_lit: *ast.Node.OneToken) InnerError!*zir.Inst { const arena = scope.arena(); const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const prefixed_bytes = tree.tokenSlice(int_lit.token); const base = if (mem.startsWith(u8, prefixed_bytes, "0x")) 16 @@ -2762,6 +2806,8 @@ fn integerLiteral(mod: *Module, scope: *Scope, int_lit: *ast.Node.OneToken) Inne fn floatLiteral(mod: *Module, scope: *Scope, float_lit: *ast.Node.OneToken) InnerError!*zir.Inst { const arena = scope.arena(); const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const bytes = tree.tokenSlice(float_lit.token); if (bytes.len > 2 and bytes[1] == 'x') { return mod.failTok(scope, float_lit.token, "TODO hex floats", .{}); @@ -2780,6 +2826,8 @@ fn floatLiteral(mod: *Module, scope: *Scope, float_lit: *ast.Node.OneToken) Inne fn undefLiteral(mod: *Module, scope: *Scope, node: *ast.Node.OneToken) InnerError!*zir.Inst { const arena = scope.arena(); const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const src = tree.token_locs[node.token].start; return addZIRInstConst(mod, scope, src, .{ .ty = Type.initTag(.@"undefined"), @@ -2790,12 +2838,14 @@ fn undefLiteral(mod: *Module, scope: *Scope, node: *ast.Node.OneToken) InnerErro fn boolLiteral(mod: *Module, scope: *Scope, node: *ast.Node.OneToken) InnerError!*zir.Inst { const arena = scope.arena(); const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const src = tree.token_locs[node.token].start; return addZIRInstConst(mod, scope, src, .{ .ty = Type.initTag(.bool), .val = switch (tree.token_ids[node.token]) { - .Keyword_true => Value.initTag(.bool_true), - .Keyword_false => Value.initTag(.bool_false), + .keyword_true => Value.initTag(.bool_true), + .keyword_false => Value.initTag(.bool_false), else => unreachable, }, }); @@ -2804,6 +2854,8 @@ fn boolLiteral(mod: *Module, scope: *Scope, node: *ast.Node.OneToken) InnerError fn nullLiteral(mod: *Module, scope: *Scope, node: *ast.Node.OneToken) InnerError!*zir.Inst { const arena = scope.arena(); const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const src = tree.token_locs[node.token].start; return addZIRInstConst(mod, scope, src, .{ .ty = Type.initTag(.@"null"), @@ -2811,12 +2863,14 @@ fn nullLiteral(mod: *Module, scope: *Scope, node: *ast.Node.OneToken) InnerError }); } -fn assembly(mod: *Module, scope: *Scope, asm_node: *ast.Node.Asm) InnerError!*zir.Inst { +fn assembly(mod: *Module, scope: *Scope, asm_node: *ast.Node.@"asm") InnerError!*zir.Inst { if (asm_node.outputs.len != 0) { return mod.failNode(scope, &asm_node.base, "TODO implement asm with an output", .{}); } const arena = scope.arena(); const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const inputs = try arena.alloc(*zir.Inst, asm_node.inputs.len); const args = try arena.alloc(*zir.Inst, asm_node.inputs.len); @@ -2839,7 +2893,7 @@ fn assembly(mod: *Module, scope: *Scope, asm_node: *ast.Node.Asm) InnerError!*zi .ty = Type.initTag(.type), .val = Value.initTag(.void_type), }); - const asm_inst = try addZIRInst(mod, scope, src, zir.Inst.Asm, .{ + const asm_inst = try addZIRInst(mod, scope, src, zir.Inst.@"asm", .{ .asm_source = try expr(mod, scope, str_type_rl, asm_node.template), .return_type = return_type, }, .{ @@ -2851,7 +2905,7 @@ fn assembly(mod: *Module, scope: *Scope, asm_node: *ast.Node.Asm) InnerError!*zi return asm_inst; } -fn ensureBuiltinParamCount(mod: *Module, scope: *Scope, call: *ast.Node.BuiltinCall, count: u32) !void { +fn ensureBuiltinParamCount(mod: *Module, scope: *Scope, call: *ast.Node.builtin_call, count: u32) !void { if (call.params_len == count) return; @@ -2863,11 +2917,13 @@ fn simpleCast( mod: *Module, scope: *Scope, rl: ResultLoc, - call: *ast.Node.BuiltinCall, + call: *ast.Node.builtin_call, inst_tag: zir.Inst.Tag, ) InnerError!*zir.Inst { try ensureBuiltinParamCount(mod, scope, call, 2); const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const src = tree.token_locs[call.builtin_token].start; const params = call.params(); const dest_type = try typeExpr(mod, scope, params[0]); @@ -2876,10 +2932,12 @@ fn simpleCast( return rvalue(mod, scope, rl, result); } -fn ptrToInt(mod: *Module, scope: *Scope, call: *ast.Node.BuiltinCall) InnerError!*zir.Inst { +fn ptrToInt(mod: *Module, scope: *Scope, call: *ast.Node.builtin_call) InnerError!*zir.Inst { try ensureBuiltinParamCount(mod, scope, call, 1); const operand = try expr(mod, scope, .none, call.params()[0]); const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const src = tree.token_locs[call.builtin_token].start; return addZIRUnOp(mod, scope, src, .ptrtoint, operand); } @@ -2888,10 +2946,12 @@ fn as( mod: *Module, scope: *Scope, rl: ResultLoc, - call: *ast.Node.BuiltinCall, + call: *ast.Node.builtin_call, ) InnerError!*zir.Inst { try ensureBuiltinParamCount(mod, scope, call, 2); const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const src = tree.token_locs[call.builtin_token].start; const params = call.params(); const dest_type = try typeExpr(mod, scope, params[0]); @@ -2963,9 +3023,11 @@ fn asRlPtr( } } -fn bitCast(mod: *Module, scope: *Scope, rl: ResultLoc, call: *ast.Node.BuiltinCall) InnerError!*zir.Inst { +fn bitCast(mod: *Module, scope: *Scope, rl: ResultLoc, call: *ast.Node.builtin_call) InnerError!*zir.Inst { try ensureBuiltinParamCount(mod, scope, call, 2); const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const src = tree.token_locs[call.builtin_token].start; const params = call.params(); const dest_type = try typeExpr(mod, scope, params[0]); @@ -3007,27 +3069,33 @@ fn bitCast(mod: *Module, scope: *Scope, rl: ResultLoc, call: *ast.Node.BuiltinCa } } -fn import(mod: *Module, scope: *Scope, call: *ast.Node.BuiltinCall) InnerError!*zir.Inst { +fn import(mod: *Module, scope: *Scope, call: *ast.Node.builtin_call) InnerError!*zir.Inst { try ensureBuiltinParamCount(mod, scope, call, 1); const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const src = tree.token_locs[call.builtin_token].start; const params = call.params(); const target = try expr(mod, scope, .none, params[0]); return addZIRUnOp(mod, scope, src, .import, target); } -fn compileError(mod: *Module, scope: *Scope, call: *ast.Node.BuiltinCall) InnerError!*zir.Inst { +fn compileError(mod: *Module, scope: *Scope, call: *ast.Node.builtin_call) InnerError!*zir.Inst { try ensureBuiltinParamCount(mod, scope, call, 1); const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const src = tree.token_locs[call.builtin_token].start; const params = call.params(); const target = try expr(mod, scope, .none, params[0]); return addZIRUnOp(mod, scope, src, .compile_error, target); } -fn setEvalBranchQuota(mod: *Module, scope: *Scope, call: *ast.Node.BuiltinCall) InnerError!*zir.Inst { +fn setEvalBranchQuota(mod: *Module, scope: *Scope, call: *ast.Node.builtin_call) InnerError!*zir.Inst { try ensureBuiltinParamCount(mod, scope, call, 1); const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const src = tree.token_locs[call.builtin_token].start; const params = call.params(); const u32_type = try addZIRInstConst(mod, scope, src, .{ @@ -3038,8 +3106,10 @@ fn setEvalBranchQuota(mod: *Module, scope: *Scope, call: *ast.Node.BuiltinCall) return addZIRUnOp(mod, scope, src, .set_eval_branch_quota, quota); } -fn typeOf(mod: *Module, scope: *Scope, rl: ResultLoc, call: *ast.Node.BuiltinCall) InnerError!*zir.Inst { +fn typeOf(mod: *Module, scope: *Scope, rl: ResultLoc, call: *ast.Node.builtin_call) InnerError!*zir.Inst { const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const arena = scope.arena(); const src = tree.token_locs[call.builtin_token].start; const params = call.params(); @@ -3054,8 +3124,10 @@ fn typeOf(mod: *Module, scope: *Scope, rl: ResultLoc, call: *ast.Node.BuiltinCal items[param_i] = try expr(mod, scope, .none, param); return rvalue(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.TypeOfPeer, .{ .items = items }, .{})); } -fn compileLog(mod: *Module, scope: *Scope, call: *ast.Node.BuiltinCall) InnerError!*zir.Inst { +fn compileLog(mod: *Module, scope: *Scope, call: *ast.Node.builtin_call) InnerError!*zir.Inst { const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const arena = scope.arena(); const src = tree.token_locs[call.builtin_token].start; const params = call.params(); @@ -3065,8 +3137,10 @@ fn compileLog(mod: *Module, scope: *Scope, call: *ast.Node.BuiltinCall) InnerErr return addZIRInst(mod, scope, src, zir.Inst.CompileLog, .{ .to_log = targets }, .{}); } -fn builtinCall(mod: *Module, scope: *Scope, rl: ResultLoc, call: *ast.Node.BuiltinCall) InnerError!*zir.Inst { +fn builtinCall(mod: *Module, scope: *Scope, rl: ResultLoc, call: *ast.Node.builtin_call) InnerError!*zir.Inst { const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const builtin_name = tree.tokenSlice(call.builtin_token); // We handle the different builtins manually because they have different semantics depending @@ -3104,8 +3178,10 @@ fn builtinCall(mod: *Module, scope: *Scope, rl: ResultLoc, call: *ast.Node.Built } } -fn callExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Call) InnerError!*zir.Inst { +fn callExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.call) InnerError!*zir.Inst { const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const lhs = try expr(mod, scope, .none, node.lhs); const param_nodes = node.params(); @@ -3130,6 +3206,8 @@ fn callExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Call) In fn unreach(mod: *Module, scope: *Scope, unreach_node: *ast.Node.OneToken) InnerError!*zir.Inst { const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); const src = tree.token_locs[unreach_node.token].start; return addZIRNoOp(mod, scope, src, .unreachable_safe); } @@ -3176,11 +3254,11 @@ fn nodeMayNeedMemoryLocation(start_node: *ast.Node, scope: *Scope) bool { while (true) { switch (node.tag) { .Root, - .Use, - .TestDecl, - .DocComment, - .SwitchCase, - .SwitchElse, + .@"usingnamespace", + .test_decl, + .doc_comment, + .switch_case, + .switch_else, .Else, .Payload, .PointerPayload, @@ -3190,97 +3268,97 @@ fn nodeMayNeedMemoryLocation(start_node: *ast.Node, scope: *Scope) bool { .FieldInitializer, => unreachable, - .Return, - .Break, - .Continue, - .BitNot, - .BoolNot, - .VarDecl, - .Defer, - .AddressOf, - .OptionalType, - .Negation, - .NegationWrap, - .Resume, - .ArrayType, - .ArrayTypeSentinel, + .@"return", + .@"break", + .@"continue", + .bit_not, + .bool_not, + .var_decl, + .@"defer", + .address_of, + .optional_type, + .negation, + .negation_wrap, + .@"resume", + .array_type, + .array_type_sentinel, .PtrType, - .SliceType, - .Suspend, - .AnyType, - .ErrorType, + .slice_type, + .@"suspend", + .@"anytype", + .error_type, .FnProto, - .AnyFrameType, - .IntegerLiteral, - .FloatLiteral, - .EnumLiteral, - .StringLiteral, + .anyframe_type, + .integer_literal, + .float_literal, + .enum_literal, + .string_literal, .MultilineStringLiteral, - .CharLiteral, - .BoolLiteral, - .NullLiteral, - .UndefinedLiteral, - .Unreachable, - .Identifier, - .ErrorSetDecl, + .char_literal, + .bool_literal, + .null_literal, + .undefined_literal, + .@"unreachable", + .identifier, + .error_set_decl, .ContainerDecl, - .Asm, - .Add, - .AddWrap, - .ArrayCat, - .ArrayMult, - .Assign, - .AssignBitAnd, - .AssignBitOr, - .AssignBitShiftLeft, - .AssignBitShiftRight, - .AssignBitXor, - .AssignDiv, - .AssignSub, - .AssignSubWrap, - .AssignMod, - .AssignAdd, - .AssignAddWrap, - .AssignMul, - .AssignMulWrap, - .BangEqual, - .BitAnd, - .BitOr, - .BitShiftLeft, - .BitShiftRight, - .BitXor, - .BoolAnd, - .BoolOr, - .Div, - .EqualEqual, - .ErrorUnion, - .GreaterOrEqual, - .GreaterThan, - .LessOrEqual, - .LessThan, - .MergeErrorSets, - .Mod, - .Mul, - .MulWrap, - .Range, - .Period, - .Sub, - .SubWrap, - .Slice, - .Deref, - .ArrayAccess, - .Block, + .@"asm", + .add, + .add_wrap, + .array_cat, + .array_mult, + .assign, + .assign_bit_and, + .assign_bit_or, + .assign_bit_shift_left, + .assign_bit_shift_right, + .assign_bit_xor, + .assign_div, + .assign_sub, + .assign_sub_wrap, + .assign_mod, + .assign_add, + .assign_add_wrap, + .assign_mul, + .assign_mul_wrap, + .bang_equal, + .bit_and, + .bit_or, + .bit_shift_left, + .bit_shift_right, + .bit_xor, + .bool_and, + .bool_or, + .div, + .equal_equal, + .error_union, + .greater_or_equal, + .greater_than, + .less_or_equal, + .less_than, + .merge_error_sets, + .mod, + .mul, + .mul_wrap, + .range, + .period, + .sub, + .sub_wrap, + .slice, + .deref, + .array_access, + .block, => return false, // Forward the question to a sub-expression. - .GroupedExpression => node = node.castTag(.GroupedExpression).?.expr, - .Try => node = node.castTag(.Try).?.rhs, - .Await => node = node.castTag(.Await).?.rhs, - .Catch => node = node.castTag(.Catch).?.rhs, - .OrElse => node = node.castTag(.OrElse).?.rhs, - .Comptime => node = node.castTag(.Comptime).?.expr, - .Nosuspend => node = node.castTag(.Nosuspend).?.expr, - .UnwrapOptional => node = node.castTag(.UnwrapOptional).?.lhs, + .grouped_expression => node = node.castTag(.grouped_expression).?.expr, + .@"try" => node = node.castTag(.@"try").?.rhs, + .@"await" => node = node.castTag(.@"await").?.rhs, + .@"catch" => node = node.castTag(.@"catch").?.rhs, + .@"orelse" => node = node.castTag(.@"orelse").?.rhs, + .@"comptime" => node = node.castTag(.@"comptime").?.expr, + .@"nosuspend" => node = node.castTag(.@"nosuspend").?.expr, + .unwrap_optional => node = node.castTag(.unwrap_optional).?.lhs, // True because these are exactly the expressions we need memory locations for. .ArrayInitializer, @@ -3291,14 +3369,14 @@ fn nodeMayNeedMemoryLocation(start_node: *ast.Node, scope: *Scope) bool { // True because depending on comptime conditions, sub-expressions // may be the kind that need memory locations. - .While, - .For, - .Switch, - .Call, - .LabeledBlock, + .@"while", + .@"for", + .@"switch", + .call, + .labeled_block, => return true, - .BuiltinCall => { + .builtin_call => { @setEvalBranchQuota(5000); const builtin_needs_mem_loc = std.ComptimeStringMap(bool, .{ .{ "@addWithOverflow", false }, @@ -3404,12 +3482,12 @@ fn nodeMayNeedMemoryLocation(start_node: *ast.Node, scope: *Scope) bool { .{ "@TypeOf", false }, .{ "@unionInit", true }, }); - const name = scope.tree().tokenSlice(node.castTag(.BuiltinCall).?.builtin_token); + const name = scope.tree().tokenSlice(node.castTag(.builtin_call).?.builtin_token); return builtin_needs_mem_loc.get(name).?; }, // Depending on AST properties, they may need memory locations. - .If => return node.castTag(.If).?.@"else" != null, + .@"if" => return node.castTag(.@"if").?.@"else" != null, } } } @@ -3450,8 +3528,17 @@ fn rvalue(mod: *Module, scope: *Scope, rl: ResultLoc, result: *zir.Inst) InnerEr } } -fn rvalueVoid(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node, result: void) InnerError!*zir.Inst { - const src = scope.tree().token_locs[node.firstToken()].start; +fn rvalueVoid( + mod: *Module, + scope: *Scope, + rl: ResultLoc, + node: ast.Node.Index, + result: void, +) InnerError!*zir.Inst { + const tree = scope.tree(); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); + const src = tree.tokens.items(.start)[tree.firstToken(node)]; const void_inst = try addZIRInstConst(mod, scope, src, .{ .ty = Type.initTag(.void), .val = Value.initTag(.void_value), diff --git a/src/codegen.zig b/src/codegen.zig index 9771386403..095bb123ba 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -451,11 +451,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const src_data: struct { lbrace_src: usize, rbrace_src: usize, source: []const u8 } = blk: { const container_scope = module_fn.owner_decl.container; - const tree = container_scope.file_scope.contents.tree; - const fn_proto = tree.root_node.decls()[module_fn.owner_decl.src_index].castTag(.FnProto).?; - const block = fn_proto.getBodyNode().?.castTag(.Block).?; - const lbrace_src = tree.token_locs[block.lbrace].start; - const rbrace_src = tree.token_locs[block.rbrace].start; + const tree = container_scope.file_scope.tree; + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + const token_starts = tree.tokens.items(.start); + + const fn_decl = tree.rootDecls()[module_fn.owner_decl.src_index]; + assert(node_tags[fn_decl] == .fn_decl); + const block = node_datas[fn_decl].rhs; + const lbrace_src = token_starts[tree.firstToken(block)]; + const rbrace_src = token_starts[tree.lastToken(block)]; break :blk .{ .lbrace_src = lbrace_src, .rbrace_src = rbrace_src, diff --git a/src/ir.zig b/src/ir.zig index 0e83dbfd56..a0b33fba73 100644 --- a/src/ir.zig +++ b/src/ir.zig @@ -317,6 +317,7 @@ pub const Inst = struct { pub const base_tag = Tag.arg; base: Inst, + /// This exists to be emitted into debug info. name: [*:0]const u8, pub fn operandCount(self: *const Arg) usize { diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 18f3f57712..f92c585cd5 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2223,13 +2223,19 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { try dbg_line_buffer.ensureCapacity(26); const line_off: u28 = blk: { - const tree = decl.container.file_scope.contents.tree; - const file_ast_decls = tree.root_node.decls(); + const tree = decl.container.file_scope.tree; + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + const token_starts = tree.tokens.items(.start); + + const file_ast_decls = tree.rootDecls(); // TODO Look into improving the performance here by adding a token-index-to-line // lookup table. Currently this involves scanning over the source code for newlines. - const fn_proto = file_ast_decls[decl.src_index].castTag(.FnProto).?; - const block = fn_proto.getBodyNode().?.castTag(.Block).?; - const line_delta = std.zig.lineDelta(tree.source, 0, tree.token_locs[block.lbrace].start); + const fn_decl = file_ast_decls[decl.src_index]; + assert(node_tags[fn_decl] == .fn_decl); + const block = node_datas[fn_decl].rhs; + const lbrace = tree.firstToken(block); + const line_delta = std.zig.lineDelta(tree.source, 0, token_starts[lbrace]); break :blk @intCast(u28, line_delta); }; @@ -2744,13 +2750,19 @@ pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Dec if (self.llvm_ir_module) |_| return; - const tree = decl.container.file_scope.contents.tree; - const file_ast_decls = tree.root_node.decls(); + const tree = decl.container.file_scope.tree; + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + const token_starts = tree.tokens.items(.start); + + const file_ast_decls = tree.rootDecls(); // TODO Look into improving the performance here by adding a token-index-to-line // lookup table. Currently this involves scanning over the source code for newlines. - const fn_proto = file_ast_decls[decl.src_index].castTag(.FnProto).?; - const block = fn_proto.getBodyNode().?.castTag(.Block).?; - const line_delta = std.zig.lineDelta(tree.source, 0, tree.token_locs[block.lbrace].start); + const fn_decl = file_ast_decls[decl.src_index]; + assert(node_tags[fn_decl] == .fn_decl); + const block = node_datas[fn_decl].rhs; + const lbrace = tree.firstToken(block); + const line_delta = std.zig.lineDelta(tree.source, 0, token_starts[lbrace]); const casted_line_off = @intCast(u28, line_delta); const shdr = &self.sections.items[self.debug_line_section_index.?]; diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index 15aa86be51..645e17068b 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -904,13 +904,19 @@ pub fn updateDeclLineNumber(self: *DebugSymbols, module: *Module, decl: *const M const tracy = trace(@src()); defer tracy.end(); - const tree = decl.container.file_scope.contents.tree; - const file_ast_decls = tree.root_node.decls(); + const tree = decl.container.file_scope.tree; + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + const token_starts = tree.tokens.items(.start); + + const file_ast_decls = tree.rootDecls(); // TODO Look into improving the performance here by adding a token-index-to-line // lookup table. Currently this involves scanning over the source code for newlines. - const fn_proto = file_ast_decls[decl.src_index].castTag(.FnProto).?; - const block = fn_proto.getBodyNode().?.castTag(.Block).?; - const line_delta = std.zig.lineDelta(tree.source, 0, tree.token_locs[block.lbrace].start); + const fn_decl = file_ast_decls[decl.src_index]; + assert(node_tags[fn_decl] == .fn_decl); + const block = node_datas[fn_decl].rhs; + const lbrace = tree.firstToken(block); + const line_delta = std.zig.lineDelta(tree.source, 0, token_starts[lbrace]); const casted_line_off = @intCast(u28, line_delta); const dwarf_segment = &self.load_commands.items[self.dwarf_segment_cmd_index.?].Segment; @@ -948,13 +954,19 @@ pub fn initDeclDebugBuffers( try dbg_line_buffer.ensureCapacity(26); const line_off: u28 = blk: { - const tree = decl.container.file_scope.contents.tree; - const file_ast_decls = tree.root_node.decls(); + const tree = decl.container.file_scope.tree; + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + const token_starts = tree.tokens.items(.start); + + const file_ast_decls = tree.rootDecls(); // TODO Look into improving the performance here by adding a token-index-to-line // lookup table. Currently this involves scanning over the source code for newlines. - const fn_proto = file_ast_decls[decl.src_index].castTag(.FnProto).?; - const block = fn_proto.getBodyNode().?.castTag(.Block).?; - const line_delta = std.zig.lineDelta(tree.source, 0, tree.token_locs[block.lbrace].start); + const fn_decl = file_ast_decls[decl.src_index]; + assert(node_tags[fn_decl] == .fn_decl); + const block = node_datas[fn_decl].rhs; + const lbrace = tree.firstToken(block); + const line_delta = std.zig.lineDelta(tree.source, 0, token_starts[lbrace]); break :blk @intCast(u28, line_delta); }; diff --git a/src/zir.zig b/src/zir.zig index d8ac023562..fc68aee216 100644 --- a/src/zir.zig +++ b/src/zir.zig @@ -53,6 +53,9 @@ pub const Inst = struct { indexable_ptr_len, /// Function parameter value. These must be first in a function's main block, /// in respective order with the parameters. + /// TODO make this instruction implicit; after we transition to having ZIR + /// instructions be same sized and referenced by index, the first N indexes + /// will implicitly be references to the parameters of the function. arg, /// Type coercion. as, @@ -354,9 +357,8 @@ pub const Inst = struct { .return_void, .ret_ptr, .ret_type, - .unreach_nocheck, - .@"unreachable", - .arg, + .unreachable_unsafe, + .unreachable_safe, .void_value, => NoOp, @@ -451,6 +453,7 @@ pub const Inst = struct { .block_comptime_flat, => Block, + .arg => Arg, .array_type_sentinel => ArrayTypeSentinel, .@"break" => Break, .break_void => BreakVoid, @@ -684,6 +687,18 @@ pub const Inst = struct { kw_args: struct {}, }; + pub const Arg = struct { + pub const base_tag = Tag.arg; + base: Inst, + + positionals: struct { + /// This exists to be passed to the arg TZIR instruction, which + /// needs it for debug info. + name: []const u8, + }, + kw_args: struct {}, + }; + pub const Block = struct { pub const base_tag = Tag.block; base: Inst, -- cgit v1.2.3 From 9712e892656ace8ee26c2b2decfde0f2d116ec54 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Fri, 5 Feb 2021 21:05:14 +0100 Subject: stage2 codegen: Add Type argument to genSetReg --- src/codegen.zig | 94 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 46 insertions(+), 48 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index d81ad1faf5..ea08b80092 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -939,7 +939,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// Copies a value to a register without tracking the register. The register is not considered /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. - fn copyToTmpRegister(self: *Self, src: usize, mcv: MCValue) !Register { + fn copyToTmpRegister(self: *Self, src: usize, ty: Type, mcv: MCValue) !Register { const reg = self.findUnusedReg() orelse b: { // We'll take over the first register. Move the instruction that was previously // there to a stack allocation. @@ -956,7 +956,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { break :b reg; }; - try self.genSetReg(src, reg, mcv); + try self.genSetReg(src, ty, reg, mcv); return reg; } @@ -983,7 +983,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { break :b reg; }; - try self.genSetReg(reg_owner.src, reg, mcv); + try self.genSetReg(reg_owner.src, reg_owner.ty, reg, mcv); return MCValue{ .register = reg }; } @@ -1351,13 +1351,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Load immediate into register if it doesn't fit // as an operand break :blk Instruction.Operand.fromU32(@intCast(u32, imm)) orelse - Instruction.Operand.reg(try self.copyToTmpRegister(src, op2), Instruction.Operand.Shift.none); + Instruction.Operand.reg(try self.copyToTmpRegister(src, Type.initTag(.u32), op2), Instruction.Operand.Shift.none); }, .register => |reg| Instruction.Operand.reg(reg, Instruction.Operand.Shift.none), .stack_offset, .embedded_in_code, .memory, - => Instruction.Operand.reg(try self.copyToTmpRegister(src, op2), Instruction.Operand.Shift.none), + => Instruction.Operand.reg(try self.copyToTmpRegister(src, Type.initTag(.u32), op2), Instruction.Operand.Shift.none), }; switch (op) { @@ -1443,7 +1443,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (src_mcv) { .immediate => |imm| { if (imm > math.maxInt(u31)) { - src_mcv = MCValue{ .register = try self.copyToTmpRegister(src_inst.src, src_mcv) }; + src_mcv = MCValue{ .register = try self.copyToTmpRegister(src_inst.src, Type.initTag(.u64), src_mcv) }; } }, else => {}, @@ -1474,7 +1474,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .register => |dst_reg| { switch (src_mcv) { .none => unreachable, - .undef => try self.genSetReg(src, dst_reg, .undef), + .undef => try self.genSetReg(src, dst_ty, dst_reg, .undef), .dead, .unreach => unreachable, .ptr_stack_offset => unreachable, .ptr_embedded_in_code => unreachable, @@ -1684,7 +1684,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (mc_arg) { .none => continue, .register => |reg| { - try self.genSetReg(arg.src, reg, arg_mcv); + try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); // TODO interact with the register allocator to mark the instruction as moved. }, .stack_offset => { @@ -1753,7 +1753,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { else unreachable; - try self.genSetReg(inst.base.src, .ra, .{ .memory = got_addr }); + try self.genSetReg(inst.base.src, Type.initTag(.usize), .ra, .{ .memory = got_addr }); mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.jalr(.ra, 0, .ra).toU32()); } else if (func_value.castTag(.extern_fn)) |_| { return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); @@ -1826,7 +1826,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_signed => unreachable, .compare_flags_unsigned => unreachable, .register => |reg| { - try self.genSetReg(arg.src, reg, arg_mcv); + try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); // TODO interact with the register allocator to mark the instruction as moved. }, .stack_offset => { @@ -1854,7 +1854,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { else unreachable; - try self.genSetReg(inst.base.src, .lr, .{ .memory = got_addr }); + try self.genSetReg(inst.base.src, Type.initTag(.usize), .lr, .{ .memory = got_addr }); // TODO: add Instruction.supportedOn // function for ARM @@ -1889,7 +1889,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_signed => unreachable, .compare_flags_unsigned => unreachable, .register => |reg| { - try self.genSetReg(arg.src, reg, arg_mcv); + try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); // TODO interact with the register allocator to mark the instruction as moved. }, .stack_offset => { @@ -1917,7 +1917,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { else unreachable; - try self.genSetReg(inst.base.src, .x30, .{ .memory = got_addr }); + try self.genSetReg(inst.base.src, Type.initTag(.usize), .x30, .{ .memory = got_addr }); writeInt(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32()); } else if (func_value.castTag(.extern_fn)) |_| { @@ -1940,7 +1940,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (mc_arg) { .none => continue, .register => |reg| { - try self.genSetReg(arg.src, reg, arg_mcv); + try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); // TODO interact with the register allocator to mark the instruction as moved. }, .stack_offset => { @@ -1973,12 +1973,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const got_addr = got.addr + func.owner_decl.link.macho.offset_table_index * @sizeOf(u64); switch (arch) { .x86_64 => { - try self.genSetReg(inst.base.src, .rax, .{ .memory = got_addr }); + try self.genSetReg(inst.base.src, Type.initTag(.u32), .rax, .{ .memory = got_addr }); // callq *%rax self.code.appendSliceAssumeCapacity(&[2]u8{ 0xff, 0xd0 }); }, .aarch64 => { - try self.genSetReg(inst.base.src, .x30, .{ .memory = got_addr }); + try self.genSetReg(inst.base.src, Type.initTag(.u32), .x30, .{ .memory = got_addr }); // blr x30 writeInt(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32()); }, @@ -2579,7 +2579,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const reg = parseRegName(reg_name) orelse return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); const arg = try self.resolveInst(inst.args[i]); - try self.genSetReg(inst.base.src, reg, arg); + try self.genSetReg(inst.base.src, inst.args[i].ty, reg, arg); } if (mem.eql(u8, inst.asm_source, "svc #0")) { @@ -2609,7 +2609,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const reg = parseRegName(reg_name) orelse return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); const arg = try self.resolveInst(inst.args[i]); - try self.genSetReg(inst.base.src, reg, arg); + try self.genSetReg(inst.base.src, inst.args[i].ty, reg, arg); } if (mem.eql(u8, inst.asm_source, "svc #0")) { @@ -2641,7 +2641,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const reg = parseRegName(reg_name) orelse return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); const arg = try self.resolveInst(inst.args[i]); - try self.genSetReg(inst.base.src, reg, arg); + try self.genSetReg(inst.base.src, inst.args[i].ty, reg, arg); } if (mem.eql(u8, inst.asm_source, "ecall")) { @@ -2671,7 +2671,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const reg = parseRegName(reg_name) orelse return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); const arg = try self.resolveInst(inst.args[i]); - try self.genSetReg(inst.base.src, reg, arg); + try self.genSetReg(inst.base.src, inst.args[i].ty, reg, arg); } if (mem.eql(u8, inst.asm_source, "syscall")) { @@ -2733,7 +2733,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn setRegOrMem(self: *Self, src: usize, ty: Type, loc: MCValue, val: MCValue) !void { switch (loc) { .none => return, - .register => |reg| return self.genSetReg(src, reg, val), + .register => |reg| return self.genSetReg(src, ty, reg, val), .stack_offset => |off| return self.genSetStack(src, ty, off, val), .memory => { return self.fail(src, "TODO implement setRegOrMem for memory", .{}); @@ -2768,7 +2768,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{}); }, .immediate => { - const reg = try self.copyToTmpRegister(src, mcv); + const reg = try self.copyToTmpRegister(src, ty, mcv); return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); }, .embedded_in_code => |code_offset| { @@ -2782,7 +2782,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { 1, 4 => { const offset = if (math.cast(u12, adj_off)) |imm| blk: { break :blk Instruction.Offset.imm(imm); - } else |_| Instruction.Offset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off }), 0); + } else |_| Instruction.Offset.reg(try self.copyToTmpRegister(src, Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0); const str = switch (abi_size) { 1 => Instruction.strb, 4 => Instruction.str, @@ -2797,7 +2797,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { 2 => { const offset = if (adj_off <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, adj_off)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(src, Type.initTag(.u32), MCValue{ .immediate = adj_off })); writeInt(u32, try self.code.addManyAsArray(4), Instruction.strh(.al, reg, .fp, .{ .offset = offset, @@ -2814,7 +2814,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (stack_offset == off) return; // Copy stack variable to itself; nothing to do. - const reg = try self.copyToTmpRegister(src, mcv); + const reg = try self.copyToTmpRegister(src, ty, mcv); return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); }, }, @@ -2903,7 +2903,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (stack_offset == off) return; // Copy stack variable to itself; nothing to do. - const reg = try self.copyToTmpRegister(src, mcv); + const reg = try self.copyToTmpRegister(src, ty, mcv); return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); }, }, @@ -2931,7 +2931,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{}); }, .immediate => { - const reg = try self.copyToTmpRegister(src, mcv); + const reg = try self.copyToTmpRegister(src, ty, mcv); return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); }, .embedded_in_code => |code_offset| { @@ -2946,7 +2946,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const offset = if (math.cast(i9, adj_off)) |imm| Instruction.LoadStoreOffset.imm_post_index(-imm) else |_| - Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off })); + Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(src, Type.initTag(.u64), MCValue{ .immediate = adj_off })); const rn: Register = switch (arch) { .aarch64, .aarch64_be => .x29, .aarch64_32 => .w29, @@ -2967,7 +2967,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (stack_offset == off) return; // Copy stack variable to itself; nothing to do. - const reg = try self.copyToTmpRegister(src, mcv); + const reg = try self.copyToTmpRegister(src, ty, mcv); return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); }, }, @@ -2975,7 +2975,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genSetReg(self: *Self, src: usize, reg: Register, mcv: MCValue) InnerError!void { + fn genSetReg(self: *Self, src: usize, ty: Type, reg: Register, mcv: MCValue) InnerError!void { switch (arch) { .arm, .armeb => switch (mcv) { .dead => unreachable, @@ -2986,7 +2986,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (!self.wantSafety()) return; // The already existing value will do just fine. // Write the debug undefined value. - return self.genSetReg(src, reg, .{ .immediate = 0xaaaaaaaa }); + return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaa }); }, .compare_flags_unsigned, .compare_flags_signed, @@ -3051,21 +3051,19 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .memory => |addr| { // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. - try self.genSetReg(src, reg, .{ .immediate = addr }); + try self.genSetReg(src, ty, reg, .{ .immediate = addr }); writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldr(.al, reg, reg, .{ .offset = Instruction.Offset.none }).toU32()); }, .stack_offset => |unadjusted_off| { // TODO: maybe addressing from sp instead of fp - // TODO: supply type information to genSetReg as we do to genSetStack - // const abi_size = ty.abiSize(self.target.*); - const abi_size = 4; + const abi_size = ty.abiSize(self.target.*); const adj_off = unadjusted_off + abi_size; switch (abi_size) { 1, 4 => { const offset = if (adj_off <= math.maxInt(u12)) blk: { break :blk Instruction.Offset.imm(@intCast(u12, adj_off)); - } else Instruction.Offset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off }), 0); + } else Instruction.Offset.reg(try self.copyToTmpRegister(src, Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0); const ldr = switch (abi_size) { 1 => Instruction.ldrb, 4 => Instruction.ldr, @@ -3080,7 +3078,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { 2 => { const offset = if (adj_off <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, adj_off)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(src, Type.initTag(.u32), MCValue{ .immediate = adj_off })); writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldrh(.al, reg, .fp, .{ .offset = offset, @@ -3102,8 +3100,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return; // The already existing value will do just fine. // Write the debug undefined value. switch (reg.size()) { - 32 => return self.genSetReg(src, reg, .{ .immediate = 0xaaaaaaaa }), - 64 => return self.genSetReg(src, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), + 32 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaa }), + 64 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), else => unreachable, // unexpected register size } }, @@ -3216,7 +3214,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } else { // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. - try self.genSetReg(src, reg, .{ .immediate = addr }); + try self.genSetReg(src, Type.initTag(.usize), reg, .{ .immediate = addr }); mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(reg, .{ .register = .{ .rn = reg } }).toU32()); } }, @@ -3231,7 +3229,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (!self.wantSafety()) return; // The already existing value will do just fine. // Write the debug undefined value. - return self.genSetReg(src, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); + return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); }, .immediate => |unsigned_x| { const x = @bitCast(i64, unsigned_x); @@ -3256,7 +3254,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .memory => |addr| { // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. - try self.genSetReg(src, reg, .{ .immediate = addr }); + try self.genSetReg(src, ty, reg, .{ .immediate = addr }); mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ld(reg, 0, reg).toU32()); // LOAD imm=[i12 offset = 0], rs1 = @@ -3275,10 +3273,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return; // The already existing value will do just fine. // Write the debug undefined value. switch (reg.size()) { - 8 => return self.genSetReg(src, reg, .{ .immediate = 0xaa }), - 16 => return self.genSetReg(src, reg, .{ .immediate = 0xaaaa }), - 32 => return self.genSetReg(src, reg, .{ .immediate = 0xaaaaaaaa }), - 64 => return self.genSetReg(src, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), + 8 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaa }), + 16 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaa }), + 32 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaa }), + 64 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), else => unreachable, } }, @@ -3492,7 +3490,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { assert(id3 != 4 and id3 != 5); // Rather than duplicate the logic used for the move, we just use a self-call with a new MCValue. - try self.genSetReg(src, reg, MCValue{ .immediate = x }); + try self.genSetReg(src, ty, reg, MCValue{ .immediate = x }); // Now, the register contains the address of the value to load into it // Currently, we're only allowing 64-bit registers, so we need the `REX.W 8B /r` variant. @@ -3591,7 +3589,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // This immediate is unsigned. const U = std.meta.Int(.unsigned, ti.bits - @boolToInt(ti.signedness == .signed)); if (imm >= math.maxInt(U)) { - return MCValue{ .register = try self.copyToTmpRegister(inst.src, mcv) }; + return MCValue{ .register = try self.copyToTmpRegister(inst.src, Type.initTag(.usize), mcv) }; } }, else => {}, -- cgit v1.2.3 From 449f4de3825d3448c2aa0cda79c1a567adb08b59 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 24 Feb 2021 21:54:23 -0700 Subject: zig fmt src/ --- src/Module.zig | 9 ++++----- src/astgen.zig | 27 ++++++++++++--------------- src/codegen.zig | 3 +-- src/codegen/c.zig | 3 +-- src/codegen/spirv.zig | 4 +--- src/link/SpirV.zig | 32 ++++++++++++++++---------------- src/main.zig | 3 +-- src/translate_c.zig | 15 ++++++--------- src/translate_c/ast.zig | 6 +++--- 9 files changed, 45 insertions(+), 57 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/Module.zig b/src/Module.zig index 7af4648c79..b0b9d6bc00 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1251,11 +1251,10 @@ fn astgenAndSemaFn( .param_types = param_types, .cc = cc, }); - } else - try astgen.addZirInstTag(mod, &fn_type_scope.base, fn_src, .fn_type, .{ - .return_type = return_type_inst, - .param_types = param_types, - }); + } else try astgen.addZirInstTag(mod, &fn_type_scope.base, fn_src, .fn_type, .{ + .return_type = return_type_inst, + .param_types = param_types, + }); if (std.builtin.mode == .Debug and mod.comp.verbose_ir) { zir.dumpZir(mod.gpa, "fn_type", decl.name, fn_type_scope.instructions.items) catch {}; diff --git a/src/astgen.zig b/src/astgen.zig index 5b27925a5f..1a533a6e96 100644 --- a/src/astgen.zig +++ b/src/astgen.zig @@ -2143,11 +2143,10 @@ fn ifExpr( .src = token_starts[tree.lastToken(else_node)], .result = try expr(mod, sub_scope, block_scope.break_result_loc, else_node), }; - } else - .{ - .src = token_starts[tree.lastToken(if_full.ast.then_expr)], - .result = null, - }; + } else .{ + .src = token_starts[tree.lastToken(if_full.ast.then_expr)], + .result = null, + }; return finishThenElseBlock( mod, @@ -2316,11 +2315,10 @@ fn whileExpr( .src = token_starts[tree.lastToken(else_node)], .result = try expr(mod, sub_scope, loop_scope.break_result_loc, else_node), }; - } else - .{ - .src = token_starts[tree.lastToken(while_full.ast.then_expr)], - .result = null, - }; + } else .{ + .src = token_starts[tree.lastToken(while_full.ast.then_expr)], + .result = null, + }; if (loop_scope.label) |some| { if (!some.used) { @@ -2514,11 +2512,10 @@ fn forExpr( .src = token_starts[tree.lastToken(else_node)], .result = try expr(mod, sub_scope, loop_scope.break_result_loc, else_node), }; - } else - .{ - .src = token_starts[tree.lastToken(for_full.ast.then_expr)], - .result = null, - }; + } else .{ + .src = token_starts[tree.lastToken(for_full.ast.then_expr)], + .result = null, + }; if (loop_scope.label) |some| { if (!some.used) { diff --git a/src/codegen.zig b/src/codegen.zig index 779366cc23..69c7789462 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -2950,8 +2950,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { 4, 8 => { const offset = if (math.cast(i9, adj_off)) |imm| Instruction.LoadStoreOffset.imm_post_index(-imm) - else |_| - Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(src, Type.initTag(.u64), MCValue{ .immediate = adj_off })); + else |_| Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(src, Type.initTag(.u64), MCValue{ .immediate = adj_off })); const rn: Register = switch (arch) { .aarch64, .aarch64_be => .x29, .aarch64_32 => .w29, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index d8c81ad0e4..a885b984ac 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -626,8 +626,7 @@ fn genBlock(o: *Object, inst: *Inst.Block) !CValue { const local = try o.allocLocal(inst.base.ty, .Mut); try writer.writeAll(";\n"); break :blk local; - } else - CValue{ .none = {} }; + } else CValue{ .none = {} }; inst.codegen.mcv = @bitCast(@import("../codegen.zig").AnyMCValue, result); try genBody(o, inst.body); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 5a262de836..23fc45616f 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -45,7 +45,5 @@ pub const SPIRVModule = struct { return self.next_id; } - pub fn genDecl(self: SPIRVModule, id: u32, code: *std.ArrayList(u32), decl: *Decl) !void { - - } + pub fn genDecl(self: SPIRVModule, id: u32, code: *std.ArrayList(u32), decl: *Decl) !void {} }; diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index bde1eae391..7a35752f62 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -1,17 +1,3 @@ -const SpirV = @This(); - -const std = @import("std"); -const Allocator = std.mem.Allocator; -const assert = std.debug.assert; - -const Module = @import("../Module.zig"); -const Compilation = @import("../Compilation.zig"); -const link = @import("../link.zig"); -const codegen = @import("../codegen/spirv.zig"); -const trace = @import("../tracy.zig").trace; -const build_options = @import("build_options"); -const spec = @import("../codegen/spirv/spec.zig"); - //! SPIR-V Spec documentation: https://www.khronos.org/registry/spir-v/specs/unified1/SPIRV.html //! According to above documentation, a SPIR-V module has the following logical layout: //! Header. @@ -30,6 +16,20 @@ const spec = @import("../codegen/spirv/spec.zig"); //! All function declarations without a body (extern functions presumably). //! All regular functions. +const SpirV = @This(); + +const std = @import("std"); +const Allocator = std.mem.Allocator; +const assert = std.debug.assert; + +const Module = @import("../Module.zig"); +const Compilation = @import("../Compilation.zig"); +const link = @import("../link.zig"); +const codegen = @import("../codegen/spirv.zig"); +const trace = @import("../tracy.zig").trace; +const build_options = @import("build_options"); +const spec = @import("../codegen/spirv/spec.zig"); + pub const FnData = struct { id: ?u32 = null, code: std.ArrayListUnmanaged(u32) = .{}, @@ -199,7 +199,7 @@ fn writeCapabilities(binary: *std.ArrayList(u32), target: std.Target) !void { else => unreachable, // TODO }; - try codegen.writeInstruction(binary, .OpCapability, &[_]u32{ @enumToInt(cap) }); + try codegen.writeInstruction(binary, .OpCapability, &[_]u32{@enumToInt(cap)}); } fn writeMemoryModel(binary: *std.ArrayList(u32), target: std.Target) !void { @@ -221,7 +221,7 @@ fn writeMemoryModel(binary: *std.ArrayList(u32), target: std.Target) !void { }; try codegen.writeInstruction(binary, .OpMemoryModel, &[_]u32{ - @enumToInt(addressing_model), @enumToInt(memory_model) + @enumToInt(addressing_model), @enumToInt(memory_model), }); } diff --git a/src/main.zig b/src/main.zig index 38da3d5a3b..bfac976c5c 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3221,8 +3221,7 @@ pub const ClangArgIterator = struct { self.zig_equivalent = clang_arg.zig_equivalent; break :find_clang_arg; }, - } - else { + } else { fatal("Unknown Clang option: '{s}'", .{arg}); } } diff --git a/src/translate_c.zig b/src/translate_c.zig index f2d2f53050..c6d248ab15 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -2313,8 +2313,7 @@ fn transCaseStmt(c: *Context, scope: *Scope, stmt: *const clang.Stmt, items: *st const rhs_node = try transExprCoercing(c, scope, rhs, .used); break :blk try Tag.ellipsis3.create(c.arena, .{ .lhs = lhs_node, .rhs = rhs_node }); - } else - try transExprCoercing(c, scope, case_stmt.getLHS(), .used); + } else try transExprCoercing(c, scope, case_stmt.getLHS(), .used); try items.append(expr); sub = case_stmt.getSubStmt(); @@ -2551,8 +2550,7 @@ fn transArrayAccess(c: *Context, scope: *Scope, stmt: *const clang.ArraySubscrip // check if long long first so that signed long long doesn't just become unsigned long long var typeid_node = if (is_longlong) try Tag.identifier.create(c.arena, "usize") else try transQualTypeIntWidthOf(c, qt, false); break :blk try Tag.int_cast.create(c.arena, .{ .lhs = typeid_node, .rhs = try transExpr(c, scope, subscr_expr, .used) }); - } else - try transExpr(c, scope, subscr_expr, .used); + } else try transExpr(c, scope, subscr_expr, .used); const node = try Tag.array_access.create(c.arena, .{ .lhs = container_node, @@ -2752,8 +2750,7 @@ fn transUnaryOperator(c: *Context, scope: *Scope, stmt: *const clang.UnaryOperat } else if (cIsUnsignedInteger(op_expr.getType())) { // use -% x for unsigned integers return Tag.negate_wrap.create(c.arena, try transExpr(c, scope, op_expr, .used)); - } else - return fail(c, error.UnsupportedTranslation, stmt.getBeginLoc(), "C negation with non float non integer", .{}); + } else return fail(c, error.UnsupportedTranslation, stmt.getBeginLoc(), "C negation with non float non integer", .{}); }, .Not => { return Tag.bit_not.create(c.arena, try transExpr(c, scope, op_expr, .used)); @@ -4593,7 +4590,8 @@ fn parseCPrimaryExprInner(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!N // (type)alignof(x) .Keyword_alignof, // (type)identifier - .Identifier => {}, + .Identifier, + => {}, // (type)integer .IntegerLiteral => { saw_integer_literal = true; @@ -5068,8 +5066,7 @@ fn getContainerTypeOf(c: *Context, ref: Node) ?Node { return getContainer(c, field.type); } } - } else - return ty_node; + } else return ty_node; } } return null; diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig index 3bc20271cc..dd837add97 100644 --- a/src/translate_c/ast.zig +++ b/src/translate_c/ast.zig @@ -312,7 +312,7 @@ pub const Node = extern union { => Payload.Value, .@"if" => Payload.If, .@"while" => Payload.While, - .@"switch", .array_init,.switch_prong => Payload.Switch, + .@"switch", .array_init, .switch_prong => Payload.Switch, .break_val => Payload.BreakVal, .call => Payload.Call, .var_decl => Payload.VarDecl, @@ -394,7 +394,8 @@ pub const Node = extern union { some.data else if (case.castTag(.switch_prong)) |some| some.data.cond - else unreachable; + else + unreachable; if (!body.isNoreturn(break_counts)) return false; } @@ -406,7 +407,6 @@ pub const Node = extern union { } return false; } - }; pub const Payload = struct { -- cgit v1.2.3 From 297eabd4accbcae42bfe821078a79e4af06a2dde Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Fri, 19 Feb 2021 10:23:36 +0100 Subject: stage2 ARM: Save callee-saved registers Add a new allocated_registers bitmap to keep track of all callee-saved registers allocated during generation of this function. Function(.arm).gen uses this data to generate instructions in the function prologue and epilogue to push and pop these registers respectively. --- src/codegen.zig | 40 ++++++++++++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 8 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 69c7789462..15c19c8e53 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -288,6 +288,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// The key must be canonical register. registers: std.AutoHashMapUnmanaged(Register, *ir.Inst) = .{}, free_registers: FreeRegInt = math.maxInt(FreeRegInt), + /// Tracks all registers allocated in the course of this function + allocated_registers: FreeRegInt = 0, /// Maps offset to what is stored there. stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{}, @@ -384,7 +386,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const index = reg.allocIndex() orelse return; const ShiftInt = math.Log2Int(FreeRegInt); const shift = @intCast(ShiftInt, index); - self.free_registers &= ~(@as(FreeRegInt, 1) << shift); + const mask = @as(FreeRegInt, 1) << shift; + self.free_registers &= ~mask; + self.allocated_registers |= mask; } fn markRegFree(self: *Self, reg: Register) void { @@ -402,7 +406,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (free_index >= callee_preserved_regs.len) { return null; } - self.free_registers &= ~(@as(FreeRegInt, 1) << free_index); + const mask = @as(FreeRegInt, 1) << free_index; + self.free_registers &= ~mask; + self.allocated_registers |= mask; const reg = callee_preserved_regs[free_index]; self.registers.putAssumeCapacityNoClobber(reg, inst); log.debug("alloc {} => {*}", .{ reg, inst }); @@ -586,20 +592,34 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // push {fp, lr} // mov fp, sp // sub sp, sp, #reloc - writeInt(u32, try self.code.addManyAsArray(4), Instruction.push(.al, .{ .fp, .lr }).toU32()); - writeInt(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, .fp, Instruction.Operand.reg(.sp, Instruction.Operand.Shift.none)).toU32()); - const backpatch_reloc = self.code.items.len; - try self.code.resize(backpatch_reloc + 4); + const prologue_reloc = self.code.items.len; + try self.code.resize(prologue_reloc + 12); + writeInt(u32, self.code.items[prologue_reloc + 4 ..][0..4], Instruction.mov(.al, .fp, Instruction.Operand.reg(.sp, Instruction.Operand.Shift.none)).toU32()); try self.dbgSetPrologueEnd(); try self.genBody(self.mod_fn.body); + // Backpatch push callee saved regs + var saved_regs = Instruction.RegisterList{ + .r11 = true, // fp + .r14 = true, // lr + }; + inline for (callee_preserved_regs) |reg, i| { + const ShiftInt = math.Log2Int(FreeRegInt); + const shift = @intCast(ShiftInt, i); + const mask = @as(FreeRegInt, 1) << shift; + if (self.allocated_registers & mask != 0) { + @field(saved_regs, @tagName(reg)) = true; + } + } + writeInt(u32, self.code.items[prologue_reloc..][0..4], Instruction.stmdb(.al, .sp, true, saved_regs).toU32()); + // Backpatch stack offset const stack_end = self.max_end_stack; const aligned_stack_end = mem.alignForward(stack_end, self.stack_align); if (Instruction.Operand.fromU32(@intCast(u32, aligned_stack_end))) |op| { - writeInt(u32, self.code.items[backpatch_reloc..][0..4], Instruction.sub(.al, .sp, .sp, op).toU32()); + writeInt(u32, self.code.items[prologue_reloc + 8 ..][0..4], Instruction.sub(.al, .sp, .sp, op).toU32()); } else { return self.failSymbol("TODO ARM: allow larger stacks", .{}); } @@ -632,10 +652,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } + // Epilogue: pop callee saved registers (swap lr with pc in saved_regs) + saved_regs.r14 = false; // lr + saved_regs.r15 = true; // pc + // mov sp, fp // pop {fp, pc} writeInt(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, .sp, Instruction.Operand.reg(.fp, Instruction.Operand.Shift.none)).toU32()); - writeInt(u32, try self.code.addManyAsArray(4), Instruction.pop(.al, .{ .fp, .pc }).toU32()); + writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldm(.al, .sp, true, saved_regs).toU32()); } else { try self.dbgSetPrologueEnd(); try self.genBody(self.mod_fn.body); -- cgit v1.2.3 From 153c97ac9ec8deafb0777ae424f00695c18e3bd9 Mon Sep 17 00:00:00 2001 From: g-w1 Date: Thu, 31 Dec 2020 17:10:49 -0500 Subject: improve stage2 to allow catch at comptime: * add error_union value tag. * add analyzeIsErr * add Value.isError * add TZIR wrap_errunion_payload and wrap_errunion_err for wrapping from T -> E!T and E -> E!T * add anlyzeInstUnwrapErrCode and analyzeInstUnwrapErr * add analyzeInstEnsureErrPayloadVoid: * add wrapErrorUnion * add comptime error comparison for tests * tests! --- src/Module.zig | 66 ++++++++++++++++++++++++++-- src/codegen.zig | 62 +++++++++++++++++++++++++++ src/ir.zig | 18 ++++++++ src/value.zig | 118 ++++++++++++++++++++++++++++++++++++++++++++++++++- src/zir.zig | 12 ++++++ src/zir_sema.zig | 107 +++++++++++++++++++++++++++++++++++++++++++--- test/stage2/test.zig | 108 +++++++++++++++++++++++++++++++++++++++++++++- 7 files changed, 479 insertions(+), 12 deletions(-) (limited to 'src/codegen.zig') diff --git a/src/Module.zig b/src/Module.zig index b0b9d6bc00..21dc953262 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -2871,7 +2871,15 @@ pub fn analyzeIsNull( } pub fn analyzeIsErr(self: *Module, scope: *Scope, src: usize, operand: *Inst) InnerError!*Inst { - return self.fail(scope, src, "TODO implement analysis of iserr", .{}); + const ot = operand.ty.zigTypeTag(); + if (ot != .ErrorSet and ot != .ErrorUnion) return self.constBool(scope, src, false); + if (ot == .ErrorSet) return self.constBool(scope, src, true); + assert(ot == .ErrorUnion); + if (operand.value()) |err_union| { + return self.constBool(scope, src, err_union.getError() != null); + } + const b = try self.requireRuntimeBlock(scope, src); + return self.addUnOp(b, src, Type.initTag(.bool), .is_err, operand); } pub fn analyzeSlice(self: *Module, scope: *Scope, src: usize, array_ptr: *Inst, start: *Inst, end_opt: ?*Inst, sentinel_opt: ?*Inst) InnerError!*Inst { @@ -3174,6 +3182,52 @@ fn wrapOptional(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*In return self.addUnOp(b, inst.src, dest_type, .wrap_optional, inst); } +fn wrapErrorUnion(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst { + // TODO deal with inferred error sets + const err_union = dest_type.castTag(.error_union).?; + if (inst.value()) |val| { + const to_wrap = if (inst.ty.zigTypeTag() != .ErrorSet) blk: { + _ = try self.coerce(scope, err_union.data.payload, inst); + break :blk val; + } else switch (err_union.data.error_set.tag()) { + .anyerror => val, + .error_set_single => blk: { + const n = err_union.data.error_set.castTag(.error_set_single).?.data; + if (!mem.eql(u8, val.castTag(.@"error").?.data.name, n)) + return self.fail(scope, inst.src, "expected type '{}', found type '{}'", .{ err_union.data.error_set, inst.ty }); + break :blk val; + }, + .error_set => blk: { + const f = err_union.data.error_set.castTag(.error_set).?.data.typed_value.most_recent.typed_value.val.castTag(.error_set).?.data.fields; + if (f.get(val.castTag(.@"error").?.data.name) == null) + return self.fail(scope, inst.src, "expected type '{}', found type '{}'", .{ err_union.data.error_set, inst.ty }); + break :blk val; + }, + else => unreachable, + }; + + return self.constInst(scope, inst.src, .{ + .ty = dest_type, + // creating a SubValue for the error_union payload + .val = try Value.Tag.error_union.create( + scope.arena(), + to_wrap, + ), + }); + } + + const b = try self.requireRuntimeBlock(scope, inst.src); + + // we are coercing from E to E!T + if (inst.ty.zigTypeTag() == .ErrorSet) { + var coerced = try self.coerce(scope, err_union.data.error_set, inst); + return self.addUnOp(b, inst.src, dest_type, .wrap_errunion_err, coerced); + } else { + var coerced = try self.coerce(scope, err_union.data.payload, inst); + return self.addUnOp(b, inst.src, dest_type, .wrap_errunion_payload, coerced); + } +} + fn makeIntType(self: *Module, scope: *Scope, signed: bool, bits: u16) !Type { const int_payload = try scope.arena().create(Type.Payload.Bits); int_payload.* = .{ @@ -3240,7 +3294,7 @@ pub fn resolvePeerTypes(self: *Module, scope: *Scope, instructions: []*Inst) !Ty return chosen.ty; } -pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst { +pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) InnerError!*Inst { // If the types are the same, we can return the operand. if (dest_type.eql(inst.ty)) return inst; @@ -3274,6 +3328,11 @@ pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst } } + // T to E!T or E to E!T + if (dest_type.tag() == .error_union) { + return try self.wrapErrorUnion(scope, dest_type, inst); + } + // Coercions where the source is a single pointer to an array. src_array_ptr: { if (!inst.ty.isSinglePointer()) break :src_array_ptr; @@ -3352,7 +3411,7 @@ pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst return self.fail(scope, inst.src, "expected {}, found {}", .{ dest_type, inst.ty }); } -pub fn coerceNum(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !?*Inst { +pub fn coerceNum(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) InnerError!?*Inst { const val = inst.value() orelse return null; const src_zig_tag = inst.ty.zigTypeTag(); const dst_zig_tag = dest_type.zigTypeTag(); @@ -3843,6 +3902,7 @@ pub fn dumpInst(self: *Module, scope: *Scope, inst: *Inst) void { pub const PanicId = enum { unreach, unwrap_null, + unwrap_errunion, }; pub fn addSafetyCheck(mod: *Module, parent_block: *Scope.Block, ok: *Inst, panic_id: PanicId) !void { diff --git a/src/codegen.zig b/src/codegen.zig index 15c19c8e53..57fd732b42 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -909,7 +909,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .unreach => return MCValue{ .unreach = {} }, .optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), .optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), + .unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), + .unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), + .unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), + .unwrap_errunion_payload_ptr => return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), .wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), + .wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), + .wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), .varptr => return self.genVarPtr(inst.castTag(.varptr).?), .xor => return self.genXor(inst.castTag(.xor).?), } @@ -1170,6 +1176,41 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } + fn genUnwrapErrErr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + // No side effects, so if it's unreferenced, do nothing. + if (inst.base.isUnused()) + return MCValue.dead; + switch (arch) { + else => return self.fail(inst.base.src, "TODO implement unwrap error union error for {}", .{self.target.cpu.arch}), + } + } + + fn genUnwrapErrPayload(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + // No side effects, so if it's unreferenced, do nothing. + if (inst.base.isUnused()) + return MCValue.dead; + switch (arch) { + else => return self.fail(inst.base.src, "TODO implement unwrap error union payload for {}", .{self.target.cpu.arch}), + } + } + // *(E!T) -> E + fn genUnwrapErrErrPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + // No side effects, so if it's unreferenced, do nothing. + if (inst.base.isUnused()) + return MCValue.dead; + switch (arch) { + else => return self.fail(inst.base.src, "TODO implement unwrap error union error ptr for {}", .{self.target.cpu.arch}), + } + } + // *(E!T) -> *T + fn genUnwrapErrPayloadPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + // No side effects, so if it's unreferenced, do nothing. + if (inst.base.isUnused()) + return MCValue.dead; + switch (arch) { + else => return self.fail(inst.base.src, "TODO implement unwrap error union payload ptr for {}", .{self.target.cpu.arch}), + } + } fn genWrapOptional(self: *Self, inst: *ir.Inst.UnOp) !MCValue { const optional_ty = inst.base.ty; @@ -1186,6 +1227,27 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } + /// T to E!T + fn genWrapErrUnionPayload(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + // No side effects, so if it's unreferenced, do nothing. + if (inst.base.isUnused()) + return MCValue.dead; + + switch (arch) { + else => return self.fail(inst.base.src, "TODO implement wrap errunion payload for {}", .{self.target.cpu.arch}), + } + } + + /// E to E!T + fn genWrapErrUnionErr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + // No side effects, so if it's unreferenced, do nothing. + if (inst.base.isUnused()) + return MCValue.dead; + + switch (arch) { + else => return self.fail(inst.base.src, "TODO implement wrap errunion error for {}", .{self.target.cpu.arch}), + } + } fn genVarPtr(self: *Self, inst: *ir.Inst.VarPtr) !MCValue { // No side effects, so if it's unreferenced, do nothing. if (inst.base.isUnused()) diff --git a/src/ir.zig b/src/ir.zig index a0b33fba73..eddc885d14 100644 --- a/src/ir.zig +++ b/src/ir.zig @@ -114,6 +114,18 @@ pub const Inst = struct { // *?T => *T optional_payload_ptr, wrap_optional, + /// E!T -> T + unwrap_errunion_payload, + /// E!T -> E + unwrap_errunion_err, + /// *(E!T) -> *T + unwrap_errunion_payload_ptr, + /// *(E!T) -> E + unwrap_errunion_err_ptr, + /// wrap from T to E!T + wrap_errunion_payload, + /// wrap from E to E!T + wrap_errunion_err, xor, switchbr, @@ -143,6 +155,12 @@ pub const Inst = struct { .optional_payload, .optional_payload_ptr, .wrap_optional, + .unwrap_errunion_payload, + .unwrap_errunion_err, + .unwrap_errunion_payload_ptr, + .unwrap_errunion_err_ptr, + .wrap_errunion_payload, + .wrap_errunion_err, => UnOp, .add, diff --git a/src/value.zig b/src/value.zig index 50298da682..a602d08c06 100644 --- a/src/value.zig +++ b/src/value.zig @@ -102,6 +102,7 @@ pub const Value = extern union { enum_literal, error_set, @"error", + error_union, /// This is a special value that tracks a set of types that have been stored /// to an inferred allocation. It does not support any of the normal value queries. inferred_alloc, @@ -174,6 +175,7 @@ pub const Value = extern union { .ref_val, .repeated, + .error_union, => Payload.SubValue, .bytes, @@ -388,9 +390,17 @@ pub const Value = extern union { return Value{ .ptr_otherwise = &new_payload.base }; }, .@"error" => return self.copyPayloadShallow(allocator, Payload.Error), + .error_union => { + const payload = self.castTag(.error_union).?; + const new_payload = try allocator.create(Payload.SubValue); + new_payload.* = .{ + .base = payload.base, + .data = try payload.data.copy(allocator), + }; + return Value{ .ptr_otherwise = &new_payload.base }; + }, .error_set => return self.copyPayloadShallow(allocator, Payload.ErrorSet), - .inferred_alloc => unreachable, } } @@ -510,6 +520,8 @@ pub const Value = extern union { return out_stream.writeAll("}"); }, .@"error" => return out_stream.print("error.{s}", .{val.castTag(.@"error").?.data.name}), + // TODO to print this it should be error{ Set, Items }!T(val), but we need the type for that + .error_union => return out_stream.print("error_union_val({})", .{val.castTag(.error_union).?.data}), .inferred_alloc => return out_stream.writeAll("(inferred allocation value)"), }; } @@ -622,6 +634,7 @@ pub const Value = extern union { .float_128, .enum_literal, .@"error", + .error_union, .empty_struct_value, .inferred_alloc, => unreachable, @@ -692,6 +705,7 @@ pub const Value = extern union { .empty_array, .enum_literal, .error_set, + .error_union, .@"error", .empty_struct_value, .inferred_alloc, @@ -779,6 +793,7 @@ pub const Value = extern union { .enum_literal, .error_set, .@"error", + .error_union, .empty_struct_value, .inferred_alloc, => unreachable, @@ -865,6 +880,7 @@ pub const Value = extern union { .enum_literal, .error_set, .@"error", + .error_union, .empty_struct_value, .inferred_alloc, => unreachable, @@ -979,6 +995,7 @@ pub const Value = extern union { .enum_literal, .error_set, .@"error", + .error_union, .empty_struct_value, .inferred_alloc, => unreachable, @@ -1069,6 +1086,7 @@ pub const Value = extern union { .enum_literal, .error_set, .@"error", + .error_union, .empty_struct_value, .inferred_alloc, => unreachable, @@ -1228,6 +1246,7 @@ pub const Value = extern union { .enum_literal, .error_set, .@"error", + .error_union, .empty_struct_value, .inferred_alloc, => unreachable, @@ -1305,6 +1324,7 @@ pub const Value = extern union { .enum_literal, .error_set, .@"error", + .error_union, .empty_struct_value, .inferred_alloc, => unreachable, @@ -1543,7 +1563,10 @@ pub const Value = extern union { hasher.update(payload.name); std.hash.autoHash(&hasher, payload.value); }, - + .error_union => { + const payload = self.castTag(.error_union).?.data; + std.hash.autoHash(&hasher, payload.hash()); + }, .inferred_alloc => unreachable, } return hasher.final(); @@ -1621,6 +1644,7 @@ pub const Value = extern union { .enum_literal, .error_set, .@"error", + .error_union, .empty_struct_value, .inferred_alloc, => unreachable, @@ -1707,6 +1731,7 @@ pub const Value = extern union { .enum_literal, .error_set, .@"error", + .error_union, .empty_struct_value, .inferred_alloc, => unreachable, @@ -1810,6 +1835,7 @@ pub const Value = extern union { .enum_literal, .error_set, .@"error", + .error_union, .empty_struct_value, => false, @@ -1820,6 +1846,93 @@ pub const Value = extern union { }; } + /// Valid for all types. Asserts the value is not undefined and not unreachable. + pub fn getError(self: Value) ?[]const u8 { + return switch (self.tag()) { + .ty, + .int_type, + .u8_type, + .i8_type, + .u16_type, + .i16_type, + .u32_type, + .i32_type, + .u64_type, + .i64_type, + .usize_type, + .isize_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f128_type, + .c_void_type, + .bool_type, + .void_type, + .type_type, + .anyerror_type, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + .null_type, + .undefined_type, + .fn_noreturn_no_args_type, + .fn_void_no_args_type, + .fn_naked_noreturn_no_args_type, + .fn_ccc_void_no_args_type, + .single_const_pointer_to_comptime_int_type, + .const_slice_u8_type, + .enum_literal_type, + .anyframe_type, + .zero, + .one, + .null_value, + .empty_array, + .bool_true, + .bool_false, + .function, + .extern_fn, + .variable, + .int_u64, + .int_i64, + .int_big_positive, + .int_big_negative, + .ref_val, + .decl_ref, + .elem_ptr, + .bytes, + .repeated, + .float_16, + .float_32, + .float_64, + .float_128, + .void_value, + .enum_literal, + .error_set, + .empty_struct_value, + => null, + + .error_union => { + const data = self.castTag(.error_union).?.data; + return if (data.tag() == .@"error") + data.castTag(.@"error").?.data.name + else + null; + }, + .@"error" => self.castTag(.@"error").?.data.name, + .undef => unreachable, + .unreachable_value => unreachable, + .inferred_alloc => unreachable, + }; + } /// Valid for all types. Asserts the value is not undefined. pub fn isFloat(self: Value) bool { return switch (self.tag()) { @@ -1908,6 +2021,7 @@ pub const Value = extern union { .void_value, .enum_literal, .@"error", + .error_union, .empty_struct_value, .null_value, => false, diff --git a/src/zir.zig b/src/zir.zig index ee0fd3dc3d..42c972e29d 100644 --- a/src/zir.zig +++ b/src/zir.zig @@ -1622,6 +1622,12 @@ const DumpTzir = struct { .optional_payload, .optional_payload_ptr, .wrap_optional, + .wrap_errunion_payload, + .wrap_errunion_err, + .unwrap_errunion_payload, + .unwrap_errunion_err, + .unwrap_errunion_payload_ptr, + .unwrap_errunion_err_ptr, => { const un_op = inst.cast(ir.Inst.UnOp).?; try dtz.findConst(un_op.operand); @@ -1733,6 +1739,12 @@ const DumpTzir = struct { .optional_payload, .optional_payload_ptr, .wrap_optional, + .wrap_errunion_err, + .wrap_errunion_payload, + .unwrap_errunion_err, + .unwrap_errunion_payload, + .unwrap_errunion_payload_ptr, + .unwrap_errunion_err_ptr, => { const un_op = inst.cast(ir.Inst.UnOp).?; const kinky = try dtz.writeInst(writer, un_op.operand); diff --git a/src/zir_sema.zig b/src/zir_sema.zig index b20e78d448..f9ff256a35 100644 --- a/src/zir_sema.zig +++ b/src/zir_sema.zig @@ -1263,34 +1263,124 @@ fn zirOptionalPayload( fn zirErrUnionPayload(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp, safety_check: bool) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, unwrap.base.src, "TODO implement zir_sema.zirErrUnionPayload", .{}); + + const operand = try resolveInst(mod, scope, unwrap.positionals.operand); + if (operand.ty.zigTypeTag() != .ErrorUnion) + return mod.fail(scope, operand.src, "expected error union type, found '{}'", .{operand.ty}); + + if (operand.value()) |val| { + if (val.getError()) |name| { + return mod.fail(scope, unwrap.base.src, "caught unexpected error '{s}'", .{name}); + } + const data = val.castTag(.error_union).?.data; + return mod.constInst(scope, unwrap.base.src, .{ + .ty = operand.ty.castTag(.error_union).?.data.payload, + .val = data, + }); + } + const b = try mod.requireRuntimeBlock(scope, unwrap.base.src); + if (safety_check and mod.wantSafety(scope)) { + const is_non_err = try mod.addUnOp(b, unwrap.base.src, Type.initTag(.bool), .is_err, operand); + try mod.addSafetyCheck(b, is_non_err, .unwrap_errunion); + } + return mod.addUnOp(b, unwrap.base.src, operand.ty.castTag(.error_union).?.data.payload, .unwrap_errunion_payload, operand); } /// Pointer in, pointer out fn zirErrUnionPayloadPtr(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp, safety_check: bool) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, unwrap.base.src, "TODO implement zir_sema.zirErrUnionPayloadPtr", .{}); + + const operand = try resolveInst(mod, scope, unwrap.positionals.operand); + assert(operand.ty.zigTypeTag() == .Pointer); + + if (operand.ty.elemType().zigTypeTag() != .ErrorUnion) + return mod.fail(scope, unwrap.base.src, "expected error union type, found {}", .{operand.ty.elemType()}); + + const operand_pointer_ty = try mod.simplePtrType(scope, unwrap.base.src, operand.ty.elemType().castTag(.error_union).?.data.payload, !operand.ty.isConstPtr(), .One); + + if (operand.value()) |pointer_val| { + const val = try pointer_val.pointerDeref(scope.arena()); + if (val.getError()) |name| { + return mod.fail(scope, unwrap.base.src, "caught unexpected error '{s}'", .{name}); + } + const data = val.castTag(.error_union).?.data; + // The same Value represents the pointer to the error union and the payload. + return mod.constInst(scope, unwrap.base.src, .{ + .ty = operand_pointer_ty, + .val = try Value.Tag.ref_val.create( + scope.arena(), + data, + ), + }); + } + + const b = try mod.requireRuntimeBlock(scope, unwrap.base.src); + if (safety_check and mod.wantSafety(scope)) { + const is_non_err = try mod.addUnOp(b, unwrap.base.src, Type.initTag(.bool), .is_err, operand); + try mod.addSafetyCheck(b, is_non_err, .unwrap_errunion); + } + return mod.addUnOp(b, unwrap.base.src, operand_pointer_ty, .unwrap_errunion_payload_ptr, operand); } /// Value in, value out fn zirErrUnionCode(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, unwrap.base.src, "TODO implement zir_sema.zirErrUnionCode", .{}); + + const operand = try resolveInst(mod, scope, unwrap.positionals.operand); + if (operand.ty.zigTypeTag() != .ErrorUnion) + return mod.fail(scope, unwrap.base.src, "expected error union type, found '{}'", .{operand.ty}); + + if (operand.value()) |val| { + assert(val.getError() != null); + const data = val.castTag(.error_union).?.data; + return mod.constInst(scope, unwrap.base.src, .{ + .ty = operand.ty.castTag(.error_union).?.data.error_set, + .val = data, + }); + } + + const b = try mod.requireRuntimeBlock(scope, unwrap.base.src); + return mod.addUnOp(b, unwrap.base.src, operand.ty.castTag(.error_union).?.data.payload, .unwrap_errunion_err, operand); } /// Pointer in, value out fn zirErrUnionCodePtr(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, unwrap.base.src, "TODO implement zir_sema.zirErrUnionCodePtr", .{}); + + const operand = try resolveInst(mod, scope, unwrap.positionals.operand); + assert(operand.ty.zigTypeTag() == .Pointer); + + if (operand.ty.elemType().zigTypeTag() != .ErrorUnion) + return mod.fail(scope, unwrap.base.src, "expected error union type, found {}", .{operand.ty.elemType()}); + + if (operand.value()) |pointer_val| { + const val = try pointer_val.pointerDeref(scope.arena()); + assert(val.getError() != null); + const data = val.castTag(.error_union).?.data; + return mod.constInst(scope, unwrap.base.src, .{ + .ty = operand.ty.elemType().castTag(.error_union).?.data.error_set, + .val = data, + }); + } + + const b = try mod.requireRuntimeBlock(scope, unwrap.base.src); + return mod.addUnOp(b, unwrap.base.src, operand.ty.castTag(.error_union).?.data.payload, .unwrap_errunion_err_ptr, operand); } fn zirEnsureErrPayloadVoid(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); - return mod.fail(scope, unwrap.base.src, "TODO implement zirEnsureErrPayloadVoid", .{}); + + const operand = try resolveInst(mod, scope, unwrap.positionals.operand); + if (operand.ty.zigTypeTag() != .ErrorUnion) + return mod.fail(scope, unwrap.base.src, "expected error union type, found '{}'", .{operand.ty}); + if (operand.ty.castTag(.error_union).?.data.payload.zigTypeTag() != .Void) { + return mod.fail(scope, unwrap.base.src, "expression value is ignored", .{}); + } + return mod.constVoid(scope, unwrap.base.src); } fn zirFnType(mod: *Module, scope: *Scope, fntype: *zir.Inst.FnType) InnerError!*Inst { @@ -2106,7 +2196,12 @@ fn zirCmp( if (!is_equality_cmp) { return mod.fail(scope, inst.base.src, "{s} operator not allowed for errors", .{@tagName(op)}); } - return mod.fail(scope, inst.base.src, "TODO implement equality comparison between errors", .{}); + if (rhs.value()) |rval| { + if (lhs.value()) |lval| { + return mod.constBool(scope, inst.base.src, (lval.castTag(.@"error").?.data.value == rval.castTag(.@"error").?.data.value) == (op == .eq)); + } + } + return mod.fail(scope, inst.base.src, "TODO implement equality comparison between runtime errors", .{}); } else if (lhs.ty.isNumeric() and rhs.ty.isNumeric()) { // This operation allows any combination of integer and float types, regardless of the // signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for diff --git a/test/stage2/test.zig b/test/stage2/test.zig index a7cbef82a9..9bd5655d22 100644 --- a/test/stage2/test.zig +++ b/test/stage2/test.zig @@ -1397,7 +1397,6 @@ pub fn addCases(ctx: *TestContext) !void { "", ); } - { var case = ctx.exe("passing u0 to function", linux_x64); case.addCompareOutput( @@ -1419,4 +1418,111 @@ pub fn addCases(ctx: *TestContext) !void { "", ); } + { + var case = ctx.exe("catch at comptime", linux_x64); + case.addCompareOutput( + \\export fn _start() noreturn { + \\ const i: anyerror!u64 = 0; + \\ const caught = i catch 5; + \\ assert(caught == 0); + \\ exit(); + \\} + \\fn assert(b: bool) void { + \\ if (!b) unreachable; + \\} + \\fn exit() noreturn { + \\ asm volatile ("syscall" + \\ : + \\ : [number] "{rax}" (231), + \\ [arg1] "{rdi}" (0) + \\ : "rcx", "r11", "memory" + \\ ); + \\ unreachable; + \\} + , + "", + ); + case.addCompareOutput( + \\export fn _start() noreturn { + \\ const i: anyerror!u64 = error.B; + \\ const caught = i catch 5; + \\ assert(caught == 5); + \\ exit(); + \\} + \\fn assert(b: bool) void { + \\ if (!b) unreachable; + \\} + \\fn exit() noreturn { + \\ asm volatile ("syscall" + \\ : + \\ : [number] "{rax}" (231), + \\ [arg1] "{rdi}" (0) + \\ : "rcx", "r11", "memory" + \\ ); + \\ unreachable; + \\} + , + "", + ); + case.addCompareOutput( + \\export fn _start() noreturn { + \\ const a: anyerror!comptime_int = 42; + \\ const b: *const comptime_int = &(a catch unreachable); + \\ assert(b.* == 42); + \\ + \\ exit(); + \\} + \\fn assert(b: bool) void { + \\ if (!b) unreachable; // assertion failure + \\} + \\fn exit() noreturn { + \\ asm volatile ("syscall" + \\ : + \\ : [number] "{rax}" (231), + \\ [arg1] "{rdi}" (0) + \\ : "rcx", "r11", "memory" + \\ ); + \\ unreachable; + \\} + , ""); + case.addCompareOutput( + \\export fn _start() noreturn { + \\const a: anyerror!u32 = error.B; + \\_ = &(a catch |err| assert(err == error.B)); + \\exit(); + \\} + \\fn assert(b: bool) void { + \\ if (!b) unreachable; + \\} + \\fn exit() noreturn { + \\ asm volatile ("syscall" + \\ : + \\ : [number] "{rax}" (231), + \\ [arg1] "{rdi}" (0) + \\ : "rcx", "r11", "memory" + \\ ); + \\ unreachable; + \\} + , ""); + case.addCompareOutput( + \\export fn _start() noreturn { + \\ const a: anyerror!u32 = error.Bar; + \\ a catch |err| assert(err == error.Bar); + \\ + \\ exit(); + \\} + \\fn assert(b: bool) void { + \\ if (!b) unreachable; + \\} + \\fn exit() noreturn { + \\ asm volatile ("syscall" + \\ : + \\ : [number] "{rax}" (231), + \\ [arg1] "{rdi}" (0) + \\ : "rcx", "r11", "memory" + \\ ); + \\ unreachable; + \\} + , ""); + } } -- cgit v1.2.3