From b68fa9970b5cf5bb5954da476cc8679512ce489b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 24 Aug 2020 16:43:20 -0700 Subject: stage2 codegen: Rework genCondBr so that the arch-independent logic isn't buried and duplicated. --- src-self-hosted/codegen.zig | 40 +++++++++++++++++++--------------------- 1 file changed, 19 insertions(+), 21 deletions(-) (limited to 'src-self-hosted/codegen.zig') diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig index cb12211206..6415fceecf 100644 --- a/src-self-hosted/codegen.zig +++ b/src-self-hosted/codegen.zig @@ -1540,14 +1540,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genCondBr(self: *Self, inst: *ir.Inst.CondBr) !MCValue { - // TODO Rework this so that the arch-independent logic isn't buried and duplicated. - switch (arch) { - .x86_64 => { + const cond = try self.resolveInst(inst.condition); + + // TODO deal with liveness / deaths condbr's then_entry_deaths and else_entry_deaths + const reloc: Reloc = switch (arch) { + .i386, .x86_64 => reloc: { try self.code.ensureCapacity(self.code.items.len + 6); - const cond = try self.resolveInst(inst.condition); - switch (cond) { - .compare_flags_signed => |cmp_op| { + const opcode: u8 = switch (cond) { + .compare_flags_signed => |cmp_op| blk: { // Here we map to the opposite opcode because the jump is to the false branch. const opcode: u8 = switch (cmp_op) { .gte => 0x8c, @@ -1557,9 +1558,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .lte => 0x8f, .eq => 0x85, }; - return self.genX86CondBr(inst, opcode); + break :blk opcode; }, - .compare_flags_unsigned => |cmp_op| { + .compare_flags_unsigned => |cmp_op| blk: { // Here we map to the opposite opcode because the jump is to the false branch. const opcode: u8 = switch (cmp_op) { .gte => 0x82, @@ -1569,9 +1570,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .lte => 0x87, .eq => 0x85, }; - return self.genX86CondBr(inst, opcode); + break :blk opcode; }, - .register => |reg| { + .register => |reg| blk: { // test reg, 1 // TODO detect al, ax, eax try self.code.ensureCapacity(self.code.items.len + 4); @@ -1583,20 +1584,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { @as(u8, 0xC0) | (0 << 3) | @truncate(u3, reg.id()), 0x01, }); - return self.genX86CondBr(inst, 0x84); + break :blk 0x84; }, else => return self.fail(inst.base.src, "TODO implement condbr {} when condition is {}", .{ self.target.cpu.arch, @tagName(cond) }), - } + }; + self.code.appendSliceAssumeCapacity(&[_]u8{ 0x0f, opcode }); + const reloc = Reloc{ .rel32 = self.code.items.len }; + self.code.items.len += 4; + break :reloc reloc; }, - else => return self.fail(inst.base.src, "TODO implement condbr for {}", .{self.target.cpu.arch}), - } - } - - fn genX86CondBr(self: *Self, inst: *ir.Inst.CondBr, opcode: u8) !MCValue { - // TODO deal with liveness / deaths condbr's then_entry_deaths and else_entry_deaths - self.code.appendSliceAssumeCapacity(&[_]u8{ 0x0f, opcode }); - const reloc = Reloc{ .rel32 = self.code.items.len }; - self.code.items.len += 4; + else => return self.fail(inst.base.src, "TODO implement condbr {}", .{ self.target.cpu.arch }), + }; try self.genBody(inst.then_body); try self.performReloc(inst.base.src, reloc); try self.genBody(inst.else_body); -- cgit v1.2.3 From e97157f71c79257ab575598d142f3caa785a2a76 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 24 Aug 2020 23:09:12 -0700 Subject: stage2: codegen for conditional branching * Move branch-local register and stack allocation metadata to the function-local struct. Conditional branches clone this data in order to restore it after generating machine code for a branch. Branch-local data is now only the instruction table mapping *ir.Inst to MCValue. * Implement conditional branching - Process operand deaths - Handle register and stack allocation metadata * Avoid storing unreferenced or void typed instructions into the branch-local instruction table. * Fix integer types reporting the wrong value for hasCodeGenBits. * Remove the codegen optimization for eliding length-0 jumps. I need to reexamine how this works because it was causing invalid jumps to be emitted. --- src-self-hosted/codegen.zig | 337 +++++++++++++++++++++++++++++-------------- src-self-hosted/link/Elf.zig | 9 +- src-self-hosted/type.zig | 4 +- 3 files changed, 235 insertions(+), 115 deletions(-) (limited to 'src-self-hosted/codegen.zig') diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig index 6415fceecf..298873e618 100644 --- a/src-self-hosted/codegen.zig +++ b/src-self-hosted/codegen.zig @@ -273,8 +273,22 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// across each runtime branch upon joining. branch_stack: *std.ArrayList(Branch), + /// The key must be canonical register. + registers: std.AutoHashMapUnmanaged(Register, *ir.Inst) = .{}, + free_registers: FreeRegInt = math.maxInt(FreeRegInt), + /// Maps offset to what is stored there. + stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{}, + + /// Offset from the stack base, representing the end of the stack frame. + max_end_stack: u32 = 0, + /// Represents the current end stack offset. If there is no existing slot + /// to place a new stack allocation, it goes here, and then bumps `max_end_stack`. + next_stack_offset: u32 = 0, + const MCValue = union(enum) { /// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc. + /// TODO Look into deleting this tag and using `dead` instead, since every use + /// of MCValue.none should be instead looking at the type and noticing it is 0 bits. none, /// Control flow will not allow this value to be observed. unreach, @@ -346,71 +360,55 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const Branch = struct { inst_table: std.AutoHashMapUnmanaged(*ir.Inst, MCValue) = .{}, - /// The key must be canonical register. - registers: std.AutoHashMapUnmanaged(Register, RegisterAllocation) = .{}, - free_registers: FreeRegInt = math.maxInt(FreeRegInt), - - /// Maps offset to what is stored there. - stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{}, - /// Offset from the stack base, representing the end of the stack frame. - max_end_stack: u32 = 0, - /// Represents the current end stack offset. If there is no existing slot - /// to place a new stack allocation, it goes here, and then bumps `max_end_stack`. - next_stack_offset: u32 = 0, - - fn markRegUsed(self: *Branch, reg: Register) void { - if (FreeRegInt == u0) return; - const index = reg.allocIndex() orelse return; - const ShiftInt = math.Log2Int(FreeRegInt); - const shift = @intCast(ShiftInt, index); - self.free_registers &= ~(@as(FreeRegInt, 1) << shift); - } - - fn markRegFree(self: *Branch, reg: Register) void { - if (FreeRegInt == u0) return; - const index = reg.allocIndex() orelse return; - const ShiftInt = math.Log2Int(FreeRegInt); - const shift = @intCast(ShiftInt, index); - self.free_registers |= @as(FreeRegInt, 1) << shift; - } - - /// Before calling, must ensureCapacity + 1 on branch.registers. - /// Returns `null` if all registers are allocated. - fn allocReg(self: *Branch, inst: *ir.Inst) ?Register { - const free_index = @ctz(FreeRegInt, self.free_registers); - if (free_index >= callee_preserved_regs.len) { - return null; - } - self.free_registers &= ~(@as(FreeRegInt, 1) << free_index); - const reg = callee_preserved_regs[free_index]; - self.registers.putAssumeCapacityNoClobber(reg, .{ .inst = inst }); - log.debug("alloc {} => {*}", .{reg, inst}); - return reg; - } - - /// Does not track the register. - fn findUnusedReg(self: *Branch) ?Register { - const free_index = @ctz(FreeRegInt, self.free_registers); - if (free_index >= callee_preserved_regs.len) { - return null; - } - return callee_preserved_regs[free_index]; - } fn deinit(self: *Branch, gpa: *Allocator) void { self.inst_table.deinit(gpa); - self.registers.deinit(gpa); - self.stack.deinit(gpa); self.* = undefined; } }; - const RegisterAllocation = struct { - inst: *ir.Inst, - }; + fn markRegUsed(self: *Self, reg: Register) void { + if (FreeRegInt == u0) return; + const index = reg.allocIndex() orelse return; + const ShiftInt = math.Log2Int(FreeRegInt); + const shift = @intCast(ShiftInt, index); + self.free_registers &= ~(@as(FreeRegInt, 1) << shift); + } + + fn markRegFree(self: *Self, reg: Register) void { + if (FreeRegInt == u0) return; + const index = reg.allocIndex() orelse return; + const ShiftInt = math.Log2Int(FreeRegInt); + const shift = @intCast(ShiftInt, index); + self.free_registers |= @as(FreeRegInt, 1) << shift; + } + + /// Before calling, must ensureCapacity + 1 on self.registers. + /// Returns `null` if all registers are allocated. + fn allocReg(self: *Self, inst: *ir.Inst) ?Register { + const free_index = @ctz(FreeRegInt, self.free_registers); + if (free_index >= callee_preserved_regs.len) { + return null; + } + self.free_registers &= ~(@as(FreeRegInt, 1) << free_index); + const reg = callee_preserved_regs[free_index]; + self.registers.putAssumeCapacityNoClobber(reg, inst); + log.debug("alloc {} => {*}", .{reg, inst}); + return reg; + } + + /// Does not track the register. + fn findUnusedReg(self: *Self) ?Register { + const free_index = @ctz(FreeRegInt, self.free_registers); + if (free_index >= callee_preserved_regs.len) { + return null; + } + return callee_preserved_regs[free_index]; + } const StackAllocation = struct { inst: *ir.Inst, + /// TODO do we need size? should be determined by inst.ty.abiSize() size: u32, }; @@ -435,8 +433,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { branch_stack.items[0].deinit(bin_file.allocator); branch_stack.deinit(); } - const branch = try branch_stack.addOne(); - branch.* = .{}; + try branch_stack.append(.{}); const src_data: struct {lbrace_src: usize, rbrace_src: usize, source: []const u8} = blk: { if (module_fn.owner_decl.scope.cast(Module.Scope.File)) |scope_file| { @@ -476,6 +473,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .rbrace_src = src_data.rbrace_src, .source = src_data.source, }; + defer function.registers.deinit(bin_file.allocator); + defer function.stack.deinit(bin_file.allocator); defer function.exitlude_jump_relocs.deinit(bin_file.allocator); var call_info = function.resolveCallingConventionValues(src, fn_type) catch |err| switch (err) { @@ -487,7 +486,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { function.args = call_info.args; function.ret_mcv = call_info.return_value; function.stack_align = call_info.stack_align; - branch.max_end_stack = call_info.stack_byte_count; + function.max_end_stack = call_info.stack_byte_count; function.gen() catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, @@ -523,7 +522,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.dbgSetPrologueEnd(); try self.genBody(self.mod_fn.analysis.success); - const stack_end = self.branch_stack.items[0].max_end_stack; + const stack_end = self.max_end_stack; if (stack_end > math.maxInt(i32)) return self.fail(self.src, "too much stack used in call parameters", .{}); const aligned_stack_end = mem.alignForward(stack_end, self.stack_align); @@ -580,13 +579,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genBody(self: *Self, body: ir.Body) InnerError!void { - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - const inst_table = &branch.inst_table; for (body.instructions) |inst| { + try self.ensureProcessDeathCapacity(@popCount(@TypeOf(inst.deaths), inst.deaths)); + const mcv = try self.genFuncInst(inst); - log.debug("{*} => {}", .{inst, mcv}); - // TODO don't put void or dead things in here - try inst_table.putNoClobber(self.gpa, inst, mcv); + if (!inst.isUnused()) { + log.debug("{*} => {}", .{inst, mcv}); + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + try branch.inst_table.putNoClobber(self.gpa, inst, mcv); + } var i: ir.Inst.DeathsBitIndex = 0; while (inst.getOperand(i)) |operand| : (i += 1) { @@ -628,21 +629,27 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.dbg_line.appendAssumeCapacity(DW.LNS_copy); } + /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: *ir.Inst) void { + if (inst.tag == .constant) return; // Constants are immortal. + const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - const entry = branch.inst_table.getEntry(inst) orelse return; - const prev_value = entry.value; - entry.value = .dead; + branch.inst_table.putAssumeCapacity(inst, .dead); switch (prev_value) { .register => |reg| { const canon_reg = toCanonicalReg(reg); - _ = branch.registers.remove(canon_reg); - branch.markRegFree(canon_reg); + _ = self.registers.remove(canon_reg); + self.markRegFree(canon_reg); }, else => {}, // TODO process stack allocation death } } + fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { + const table = &self.branch_stack.items[self.branch_stack.items.len - 1].inst_table; + try table.ensureCapacity(self.gpa, table.items().len + additional_count); + } + /// Adds a Type to the .debug_info at the current position. The bytes will be populated later, /// after codegen for this symbol is done. fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void { @@ -705,13 +712,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn allocMem(self: *Self, inst: *ir.Inst, abi_size: u32, abi_align: u32) !u32 { if (abi_align > self.stack_align) self.stack_align = abi_align; - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, branch.next_stack_offset, abi_align); - branch.next_stack_offset = offset + abi_size; - if (branch.next_stack_offset > branch.max_end_stack) - branch.max_end_stack = branch.next_stack_offset; - try branch.stack.putNoClobber(self.gpa, offset, .{ + const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align); + self.next_stack_offset = offset + abi_size; + if (self.next_stack_offset > self.max_end_stack) + self.max_end_stack = self.next_stack_offset; + try self.stack.putNoClobber(self.gpa, offset, .{ .inst = inst, .size = abi_size, }); @@ -737,15 +743,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const abi_align = elem_ty.abiAlignment(self.target.*); if (abi_align > self.stack_align) self.stack_align = abi_align; - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. const ptr_bits = arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); if (abi_size <= ptr_bytes) { - try branch.registers.ensureCapacity(self.gpa, branch.registers.items().len + 1); - if (branch.allocReg(inst)) |reg| { + try self.registers.ensureCapacity(self.gpa, self.registers.items().len + 1); + if (self.allocReg(inst)) |reg| { return MCValue{ .register = registerAlias(reg, abi_size) }; } } @@ -758,20 +763,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, src: usize, mcv: MCValue) !Register { - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - - const reg = branch.findUnusedReg() orelse b: { + const reg = self.findUnusedReg() orelse b: { // We'll take over the first register. Move the instruction that was previously // there to a stack allocation. const reg = callee_preserved_regs[0]; - const regs_entry = branch.registers.remove(reg).?; - const spilled_inst = regs_entry.value.inst; + const regs_entry = self.registers.remove(reg).?; + const spilled_inst = regs_entry.value; const stack_mcv = try self.allocRegOrMem(spilled_inst, false); - const inst_entry = branch.inst_table.getEntry(spilled_inst).?; - const reg_mcv = inst_entry.value; + const reg_mcv = self.getResolvedInstValue(spilled_inst); assert(reg == toCanonicalReg(reg_mcv.register)); - inst_entry.value = stack_mcv; + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + try branch.inst_table.put(self.gpa, spilled_inst, stack_mcv); try self.genSetStack(src, spilled_inst.ty, stack_mcv.stack_offset, reg_mcv); break :b reg; @@ -784,22 +787,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// `reg_owner` is the instruction that gets associated with the register in the register table. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: *ir.Inst, mcv: MCValue) !MCValue { - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - try branch.registers.ensureCapacity(self.gpa, branch.registers.items().len + 1); + try self.registers.ensureCapacity(self.gpa, self.registers.items().len + 1); - const reg = branch.allocReg(reg_owner) orelse b: { + const reg = self.allocReg(reg_owner) orelse b: { // We'll take over the first register. Move the instruction that was previously // there to a stack allocation. const reg = callee_preserved_regs[0]; - const regs_entry = branch.registers.getEntry(reg).?; - const spilled_inst = regs_entry.value.inst; - regs_entry.value = .{ .inst = reg_owner }; + const regs_entry = self.registers.getEntry(reg).?; + const spilled_inst = regs_entry.value; + regs_entry.value = reg_owner; const stack_mcv = try self.allocRegOrMem(spilled_inst, false); - const inst_entry = branch.inst_table.getEntry(spilled_inst).?; - const reg_mcv = inst_entry.value; + const reg_mcv = self.getResolvedInstValue(spilled_inst); assert(reg == toCanonicalReg(reg_mcv.register)); - inst_entry.value = stack_mcv; + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + try branch.inst_table.put(self.gpa, spilled_inst, stack_mcv); try self.genSetStack(reg_owner.src, spilled_inst.ty, stack_mcv.stack_offset, reg_mcv); break :b reg; @@ -934,9 +936,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .register => |reg| { // If it's in the registers table, need to associate the register with the // new instruction. - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - if (branch.registers.getEntry(toCanonicalReg(reg))) |entry| { - entry.value = .{ .inst = inst }; + if (self.registers.getEntry(toCanonicalReg(reg))) |entry| { + entry.value = inst; } log.debug("reusing {} => {*}", .{reg, inst}); }, @@ -1231,8 +1232,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (inst.base.isUnused()) return MCValue.dead; - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - try branch.registers.ensureCapacity(self.gpa, branch.registers.items().len + 1); + try self.registers.ensureCapacity(self.gpa, self.registers.items().len + 1); const result = self.args[self.arg_index]; self.arg_index += 1; @@ -1240,8 +1240,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const name_with_null = inst.name[0..mem.lenZ(inst.name) + 1]; switch (result) { .register => |reg| { - branch.registers.putAssumeCapacityNoClobber(toCanonicalReg(reg), .{ .inst = &inst.base }); - branch.markRegUsed(reg); + self.registers.putAssumeCapacityNoClobber(toCanonicalReg(reg), &inst.base); + self.markRegUsed(reg); try self.dbg_info.ensureCapacity(self.dbg_info.items.len + 8 + name_with_null.len); self.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter); @@ -1536,13 +1536,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genDbgStmt(self: *Self, inst: *ir.Inst.NoOp) !MCValue { try self.dbgAdvancePCAndLine(inst.base.src); - return MCValue.none; + assert(inst.base.isUnused()); + return MCValue.dead; } fn genCondBr(self: *Self, inst: *ir.Inst.CondBr) !MCValue { const cond = try self.resolveInst(inst.condition); - // TODO deal with liveness / deaths condbr's then_entry_deaths and else_entry_deaths const reloc: Reloc = switch (arch) { .i386, .x86_64 => reloc: { try self.code.ensureCapacity(self.code.items.len + 6); @@ -1595,9 +1595,117 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, else => return self.fail(inst.base.src, "TODO implement condbr {}", .{ self.target.cpu.arch }), }; + + // Capture the state of register and stack allocation state so that we can revert to it. + const parent_next_stack_offset = self.next_stack_offset; + const parent_free_registers = self.free_registers; + var parent_stack = try self.stack.clone(self.gpa); + defer parent_stack.deinit(self.gpa); + var parent_registers = try self.registers.clone(self.gpa); + defer parent_registers.deinit(self.gpa); + + try self.branch_stack.append(.{}); + + const then_deaths = inst.thenDeaths(); + try self.ensureProcessDeathCapacity(then_deaths.len); + for (then_deaths) |operand| { + self.processDeath(operand); + } try self.genBody(inst.then_body); + + // Revert to the previous register and stack allocation state. + + var saved_then_branch = self.branch_stack.pop(); + defer saved_then_branch.deinit(self.gpa); + + self.registers.deinit(self.gpa); + self.registers = parent_registers; + parent_registers = .{}; + + self.stack.deinit(self.gpa); + self.stack = parent_stack; + parent_stack = .{}; + + self.next_stack_offset = parent_next_stack_offset; + self.free_registers = parent_free_registers; + try self.performReloc(inst.base.src, reloc); + const else_branch = self.branch_stack.addOneAssumeCapacity(); + else_branch.* = .{}; + + const else_deaths = inst.elseDeaths(); + try self.ensureProcessDeathCapacity(else_deaths.len); + for (else_deaths) |operand| { + self.processDeath(operand); + } try self.genBody(inst.else_body); + + // At this point, each branch will possibly have conflicting values for where + // each instruction is stored. They agree, however, on which instructions are alive/dead. + // We use the first ("then") branch as canonical, and here emit + // instructions into the second ("else") branch to make it conform. + // We continue respect the data structure semantic guarantees of the else_branch so + // that we can use all the code emitting abstractions. This is why at the bottom we + // assert that parent_branch.free_registers equals the saved_then_branch.free_registers + // rather than assigning it. + const parent_branch = &self.branch_stack.items[self.branch_stack.items.len - 2]; + try parent_branch.inst_table.ensureCapacity(self.gpa, parent_branch.inst_table.items().len + + else_branch.inst_table.items().len); + for (else_branch.inst_table.items()) |else_entry| { + const canon_mcv = if (saved_then_branch.inst_table.remove(else_entry.key)) |then_entry| blk: { + // The instruction's MCValue is overridden in both branches. + parent_branch.inst_table.putAssumeCapacity(else_entry.key, then_entry.value); + if (else_entry.value == .dead) { + assert(then_entry.value == .dead); + continue; + } + break :blk then_entry.value; + } else blk: { + if (else_entry.value == .dead) + continue; + // The instruction is only overridden in the else branch. + var i: usize = self.branch_stack.items.len - 2; + while (true) { + i -= 1; + if (self.branch_stack.items[i].inst_table.get(else_entry.key)) |mcv| { + assert(mcv != .dead); + break :blk mcv; + } + } + }; + log.debug("consolidating else_entry {*} {}=>{}", .{else_entry.key, else_entry.value, canon_mcv}); + // TODO make sure the destination stack offset / register does not already have something + // going on there. + try self.setRegOrMem(inst.base.src, else_entry.key.ty, canon_mcv, else_entry.value); + // TODO track the new register / stack allocation + } + try parent_branch.inst_table.ensureCapacity(self.gpa, parent_branch.inst_table.items().len + + saved_then_branch.inst_table.items().len); + for (saved_then_branch.inst_table.items()) |then_entry| { + // We already deleted the items from this table that matched the else_branch. + // So these are all instructions that are only overridden in the then branch. + parent_branch.inst_table.putAssumeCapacity(then_entry.key, then_entry.value); + if (then_entry.value == .dead) + continue; + const parent_mcv = blk: { + var i: usize = self.branch_stack.items.len - 2; + while (true) { + i -= 1; + if (self.branch_stack.items[i].inst_table.get(then_entry.key)) |mcv| { + assert(mcv != .dead); + break :blk mcv; + } + } + }; + log.debug("consolidating then_entry {*} {}=>{}", .{then_entry.key, parent_mcv, then_entry.value}); + // TODO make sure the destination stack offset / register does not already have something + // going on there. + try self.setRegOrMem(inst.base.src, then_entry.key.ty, parent_mcv, then_entry.value); + // TODO track the new register / stack allocation + } + + self.branch_stack.pop().deinit(self.gpa); + return MCValue.unreach; } @@ -1671,11 +1779,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (reloc) { .rel32 => |pos| { const amt = self.code.items.len - (pos + 4); - // If it wouldn't jump at all, elide it. - if (amt == 0) { - self.code.items.len -= 5; - return; - } + // Here it would be tempting to implement testing for amt == 0 and then elide the + // jump. However, that will cause a problem because other jumps may assume that they + // can jump to this code. Or maybe I didn't understand something when I was debugging. + // It could be worth another look. Anyway, that's why that isn't done here. Probably the + // best place to elide jumps will be in semantic analysis, by inlining blocks that only + // only have 1 break instruction. const s32_amt = math.cast(i32, amt) catch return self.fail(src, "unable to perform relocation: jump too far", .{}); mem.writeIntLittle(i32, self.code.items[pos..][0..4], s32_amt); @@ -2280,8 +2389,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn resolveInst(self: *Self, inst: *ir.Inst) !MCValue { + // If the type has no codegen bits, no need to store it. + if (!inst.ty.hasCodeGenBits()) + return MCValue.none; + // Constants have static lifetimes, so they are always memoized in the outer most table. - if (inst.cast(ir.Inst.Constant)) |const_inst| { + if (inst.castTag(.constant)) |const_inst| { const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst); if (!gop.found_existing) { @@ -2290,6 +2403,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return gop.entry.value; } + return self.getResolvedInstValue(inst); + } + + fn getResolvedInstValue(self: *Self, inst: *ir.Inst) MCValue { // Treat each stack item as a "layer" on top of the previous one. var i: usize = self.branch_stack.items.len; while (true) { diff --git a/src-self-hosted/link/Elf.zig b/src-self-hosted/link/Elf.zig index 1d18344cbb..b67de55dd5 100644 --- a/src-self-hosted/link/Elf.zig +++ b/src-self-hosted/link/Elf.zig @@ -1640,9 +1640,12 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { else => false, }; if (is_fn) { - //if (mem.eql(u8, mem.spanZ(decl.name), "add")) { - // typed_value.val.cast(Value.Payload.Function).?.func.dump(module.*); - //} + { + //if (mem.eql(u8, mem.spanZ(decl.name), "add")) { + //} + std.debug.print("\n{}\n", .{decl.name}); + typed_value.val.cast(Value.Payload.Function).?.func.dump(module.*); + } // For functions we need to add a prologue to the debug line program. try dbg_line_buffer.ensureCapacity(26); diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig index eb8fa2acd7..82079aa9f7 100644 --- a/src-self-hosted/type.zig +++ b/src-self-hosted/type.zig @@ -771,8 +771,8 @@ pub const Type = extern union { .array => self.elemType().hasCodeGenBits() and self.arrayLen() != 0, .array_u8 => self.arrayLen() != 0, .array_sentinel, .single_const_pointer, .single_mut_pointer, .many_const_pointer, .many_mut_pointer, .c_const_pointer, .c_mut_pointer, .const_slice, .mut_slice, .pointer => self.elemType().hasCodeGenBits(), - .int_signed => self.cast(Payload.IntSigned).?.bits == 0, - .int_unsigned => self.cast(Payload.IntUnsigned).?.bits == 0, + .int_signed => self.cast(Payload.IntSigned).?.bits != 0, + .int_unsigned => self.cast(Payload.IntUnsigned).?.bits != 0, .error_union => { const payload = self.cast(Payload.ErrorUnion).?; -- cgit v1.2.3 From 0c5faa61aebca4215683d233dd52bf3a7a5d1db6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 26 Aug 2020 01:00:04 -0700 Subject: stage2: codegen: fix reuseOperand not doing death bookkeeping --- src-self-hosted/codegen.zig | 7 ++++- src-self-hosted/zir.zig | 12 ++++++++- test/stage2/test.zig | 62 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 79 insertions(+), 2 deletions(-) (limited to 'src-self-hosted/codegen.zig') diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig index 298873e618..b282c2011b 100644 --- a/src-self-hosted/codegen.zig +++ b/src-self-hosted/codegen.zig @@ -632,6 +632,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: *ir.Inst) void { if (inst.tag == .constant) return; // Constants are immortal. + // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; branch.inst_table.putAssumeCapacity(inst, .dead); @@ -951,6 +952,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Prevent the operand deaths processing code from deallocating it. inst.clearOperandDeath(op_index); + // That makes us responsible for doing the rest of the stuff that processDeath would have done. + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + branch.inst_table.putAssumeCapacity(inst.getOperand(op_index).?, .dead); + return true; } @@ -1666,7 +1671,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // The instruction is only overridden in the else branch. var i: usize = self.branch_stack.items.len - 2; while (true) { - i -= 1; + i -= 1; // If this overflows, the question is: why wasn't the instruction marked dead? if (self.branch_stack.items[i].inst_table.get(else_entry.key)) |mcv| { assert(mcv != .dead); break :blk mcv; diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig index 3557c88f4e..c552f28553 100644 --- a/src-self-hosted/zir.zig +++ b/src-self-hosted/zir.zig @@ -954,6 +954,7 @@ pub const Module = struct { pub const MetaData = struct { deaths: ir.Inst.DeathsInt, + addr: usize, }; pub const BodyMetaData = struct { @@ -1152,6 +1153,12 @@ const Writer = struct { try self.writeInstToStream(stream, inst); if (self.module.metadata.get(inst)) |metadata| { try stream.print(" ; deaths=0b{b}", .{metadata.deaths}); + // This is conditionally compiled in because addresses mess up the tests due + // to Address Space Layout Randomization. It's super useful when debugging + // codegen.zig though. + if (!std.builtin.is_test) { + try stream.print(" 0x{x}", .{metadata.addr}); + } } self.indent -= 2; try stream.writeByte('\n'); @@ -2417,7 +2424,10 @@ const EmitZIR = struct { .varptr => @panic("TODO"), }; - try self.metadata.put(new_inst, .{ .deaths = inst.deaths }); + try self.metadata.put(new_inst, .{ + .deaths = inst.deaths, + .addr = @ptrToInt(inst), + }); try instructions.append(new_inst); try inst_table.put(inst, new_inst); } diff --git a/test/stage2/test.zig b/test/stage2/test.zig index fee8886ddb..1a0d801ac3 100644 --- a/test/stage2/test.zig +++ b/test/stage2/test.zig @@ -694,6 +694,68 @@ pub fn addCases(ctx: *TestContext) !void { "", ); + // Reusing the registers of dead operands playing nicely with conditional branching. + case.addCompareOutput( + \\export fn _start() noreturn { + \\ assert(add(3, 4) == 791); + \\ assert(add(4, 3) == 79); + \\ + \\ exit(); + \\} + \\ + \\fn add(a: u32, b: u32) u32 { + \\ const x: u32 = if (a < b) blk: { + \\ const c = a + b; // 7 + \\ const d = a + c; // 10 + \\ const e = d + b; // 14 + \\ const f = d + e; // 24 + \\ const g = e + f; // 38 + \\ const h = f + g; // 62 + \\ const i = g + h; // 100 + \\ const j = i + d; // 110 + \\ const k = i + j; // 210 + \\ const l = k + c; // 217 + \\ const m = l + d; // 227 + \\ const n = m + e; // 241 + \\ const o = n + f; // 265 + \\ const p = o + g; // 303 + \\ const q = p + h; // 365 + \\ const r = q + i; // 465 + \\ const s = r + j; // 575 + \\ const t = s + k; // 785 + \\ break :blk t; + \\ } else blk: { + \\ const t = b + b + a; // 10 + \\ const c = a + t; // 14 + \\ const d = c + t; // 24 + \\ const e = d + t; // 34 + \\ const f = e + t; // 44 + \\ const g = f + t; // 54 + \\ const h = c + g; // 68 + \\ break :blk h + b; // 71 + \\ }; + \\ const y = x + a; // 788, 75 + \\ const z = y + a; // 791, 79 + \\ return z; + \\} + \\ + \\pub fn assert(ok: bool) void { + \\ if (!ok) unreachable; // assertion failure + \\} + \\ + \\fn exit() noreturn { + \\ asm volatile ("syscall" + \\ : + \\ : [number] "{rax}" (231), + \\ [arg1] "{rdi}" (0) + \\ : "rcx", "r11", "memory" + \\ ); + \\ unreachable; + \\} + , + "", + ); + // Character literals and multiline strings. case.addCompareOutput( \\export fn _start() noreturn { -- cgit v1.2.3 From cc26cb9b2366723a8149e9f79e8252936cb69b73 Mon Sep 17 00:00:00 2001 From: Vexu Date: Wed, 26 Aug 2020 21:07:56 +0300 Subject: stage2: codegen needed for basic for loop --- src-self-hosted/astgen.zig | 12 +++++---- src-self-hosted/codegen.zig | 32 +++++++++++++++++++++--- src-self-hosted/zir_sema.zig | 2 +- test/stage2/test.zig | 59 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 95 insertions(+), 10 deletions(-) (limited to 'src-self-hosted/codegen.zig') diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig index 737af24268..cb8191fcee 100644 --- a/src-self-hosted/astgen.zig +++ b/src-self-hosted/astgen.zig @@ -802,7 +802,7 @@ fn catchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Catch) const err_name = tree.tokenSlice(payload.castTag(.Payload).?.error_symbol.firstToken()); if (mem.eql(u8, err_name, "_")) break :blk &err_scope.base; - + const unwrapped_err_ptr = try addZIRUnOp(mod, &err_scope.base, src, .unwrap_err_code, err_union_ptr); err_val_scope = .{ .parent = &err_scope.base, @@ -1374,7 +1374,8 @@ fn forExpr(mod: *Module, scope: *Scope, rl: ResultLoc, for_node: *ast.Node.For) .ty = Type.initTag(.usize), .val = Value.initTag(.one), }); - const index_plus_one = try addZIRBinOp(mod, &loop_scope.base, for_src, .add, index, one); + const index_2 = try addZIRUnOp(mod, &loop_scope.base, cond_src, .deref, index_ptr); + const index_plus_one = try addZIRBinOp(mod, &loop_scope.base, for_src, .add, index_2, one); _ = try addZIRBinOp(mod, &loop_scope.base, for_src, .store, index_ptr, index_plus_one); // looping stuff @@ -1382,7 +1383,7 @@ fn forExpr(mod: *Module, scope: *Scope, rl: ResultLoc, for_node: *ast.Node.For) .instructions = try for_scope.arena.dupe(*zir.Inst, loop_scope.instructions.items), }); const for_block = try addZIRInstBlock(mod, scope, for_src, .{ - .instructions = try scope.arena().dupe(*zir.Inst, for_scope.instructions.items), + .instructions = try for_scope.arena.dupe(*zir.Inst, for_scope.instructions.items), }); // while body @@ -1404,7 +1405,7 @@ fn forExpr(mod: *Module, scope: *Scope, rl: ResultLoc, for_node: *ast.Node.For) .inferred_ptr, .bitcasted_ptr, .block_ptr => .{ .block_ptr = for_block }, }; - var index_scope: Scope.LocalVal = undefined; + var index_scope: Scope.LocalPtr = undefined; const then_sub_scope = blk: { const payload = for_node.payload.castTag(.PointerIndexPayload).?; const is_ptr = payload.ptr_token != null; @@ -1422,11 +1423,12 @@ fn forExpr(mod: *Module, scope: *Scope, rl: ResultLoc, for_node: *ast.Node.For) if (mem.eql(u8, index_name, "_")) { break :blk &then_scope.base; } + // TODO make this const without an extra copy? index_scope = .{ .parent = &then_scope.base, .gen_zir = &then_scope, .name = index_name, - .inst = index, + .ptr = index_ptr, }; break :blk &index_scope.base; }; diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig index b282c2011b..7a24af976d 100644 --- a/src-self-hosted/codegen.zig +++ b/src-self-hosted/codegen.zig @@ -829,6 +829,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // No side effects, so if it's unreferenced, do nothing. if (inst.base.isUnused()) return MCValue.dead; + + const operand = try self.resolveInst(inst.operand); + const info_a = inst.operand.ty.intInfo(self.target.*); + const info_b = inst.base.ty.intInfo(self.target.*); + if (info_a.signed != info_b.signed) + return self.fail(inst.base.src, "TODO gen intcast sign safety in semantic analysis", .{}); + + if (info_a.bits == info_b.bits) + return operand; + switch (arch) { else => return self.fail(inst.base.src, "TODO implement intCast for {}", .{self.target.cpu.arch}), } @@ -2039,15 +2049,29 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), x); }, 8 => { - return self.fail(src, "TODO implement set abi_size=8 stack variable with immediate", .{}); + // We have a positive stack offset value but we want a twos complement negative + // offset from rbp, which is at the top of the stack frame. + const negative_offset = @intCast(i8, -@intCast(i32, adj_off)); + const twos_comp = @bitCast(u8, negative_offset); + + // 64 bit write to memory would take two mov's anyways so we + // insted just use two 32 bit writes to avoid register allocation + try self.code.ensureCapacity(self.code.items.len + 14); + var buf: [8]u8 = undefined; + mem.writeIntLittle(u64, &buf, x_big); + + // mov DWORD PTR [rbp+offset+4], immediate + self.code.appendSliceAssumeCapacity(&[_]u8{ 0xc7, 0x45, twos_comp + 4}); + self.code.appendSliceAssumeCapacity(buf[4..8]); + + // mov DWORD PTR [rbp+offset], immediate + self.code.appendSliceAssumeCapacity(&[_]u8{ 0xc7, 0x45, twos_comp }); + self.code.appendSliceAssumeCapacity(buf[0..4]); }, else => { return self.fail(src, "TODO implement set abi_size=large stack variable with immediate", .{}); }, } - if (x_big <= math.maxInt(u32)) {} else { - return self.fail(src, "TODO implement set stack variable with large immediate", .{}); - } }, .embedded_in_code => |code_offset| { return self.fail(src, "TODO implement set stack variable from embedded_in_code", .{}); diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig index 056d6c2faa..2ac14f8bb4 100644 --- a/src-self-hosted/zir_sema.zig +++ b/src-self-hosted/zir_sema.zig @@ -112,7 +112,7 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError! .condbr => return analyzeInstCondBr(mod, scope, old_inst.castTag(.condbr).?), .isnull => return analyzeInstIsNonNull(mod, scope, old_inst.castTag(.isnull).?, true), .isnonnull => return analyzeInstIsNonNull(mod, scope, old_inst.castTag(.isnonnull).?, false), - .iserr => return analyzeInstIsErr(mod, scope, old_inst.castTag(.iserr).?, true), + .iserr => return analyzeInstIsErr(mod, scope, old_inst.castTag(.iserr).?), .boolnot => return analyzeInstBoolNot(mod, scope, old_inst.castTag(.boolnot).?), .typeof => return analyzeInstTypeOf(mod, scope, old_inst.castTag(.typeof).?), .optional_type => return analyzeInstOptionalType(mod, scope, old_inst.castTag(.optional_type).?), diff --git a/test/stage2/test.zig b/test/stage2/test.zig index beb40f8e95..50203c7ee9 100644 --- a/test/stage2/test.zig +++ b/test/stage2/test.zig @@ -845,6 +845,65 @@ pub fn addCases(ctx: *TestContext) !void { , "", ); + + // 64bit set stack + case.addCompareOutput( + \\export fn _start() noreturn { + \\ var i: u64 = 0xFFEEDDCCBBAA9988; + \\ assert(i == 0xFFEEDDCCBBAA9988); + \\ + \\ exit(); + \\} + \\ + \\pub fn assert(ok: bool) void { + \\ if (!ok) unreachable; // assertion failure + \\} + \\ + \\fn exit() noreturn { + \\ asm volatile ("syscall" + \\ : + \\ : [number] "{rax}" (231), + \\ [arg1] "{rdi}" (0) + \\ : "rcx", "r11", "memory" + \\ ); + \\ unreachable; + \\} + , + "", + ); + + // Basic for loop + case.addCompareOutput( + \\export fn _start() noreturn { + \\ for ("hello") |_| print(); + \\ + \\ exit(); + \\} + \\ + \\fn print() void { + \\ asm volatile ("syscall" + \\ : + \\ : [number] "{rax}" (1), + \\ [arg1] "{rdi}" (1), + \\ [arg2] "{rsi}" (@ptrToInt("hello\n")), + \\ [arg3] "{rdx}" (6) + \\ : "rcx", "r11", "memory" + \\ ); + \\ return; + \\} + \\ + \\fn exit() noreturn { + \\ asm volatile ("syscall" + \\ : + \\ : [number] "{rax}" (231), + \\ [arg1] "{rdi}" (0) + \\ : "rcx", "r11", "memory" + \\ ); + \\ unreachable; + \\} + , + "hello\nhello\nhello\nhello\nhello\n", + ); } { -- cgit v1.2.3 From d3e5105eccaa1149a90f8d640d7f5cf90dd46bca Mon Sep 17 00:00:00 2001 From: Tadeo Kondrak Date: Sun, 30 Aug 2020 17:18:25 -0600 Subject: std.zig.ast: make getTrailer/setTrailer private and add getters/setters --- lib/std/zig/ast.zig | 238 +++++++++++++++++++++++++++++++++++----- lib/std/zig/render.zig | 76 ++++++------- src-self-hosted/Module.zig | 46 ++++---- src-self-hosted/astgen.zig | 12 +- src-self-hosted/codegen.zig | 2 +- src-self-hosted/link/Elf.zig | 4 +- src-self-hosted/translate_c.zig | 10 +- 7 files changed, 288 insertions(+), 100 deletions(-) (limited to 'src-self-hosted/codegen.zig') diff --git a/lib/std/zig/ast.zig b/lib/std/zig/ast.zig index c235c92585..404e8c413a 100644 --- a/lib/std/zig/ast.zig +++ b/lib/std/zig/ast.zig @@ -915,18 +915,106 @@ pub const Node = struct { init_node: *Node, }); + pub fn getDocComments(self: *const VarDecl) ?*DocComment { + return self.getTrailer(.doc_comments); + } + + pub fn setDocComments(self: *VarDecl, value: *DocComment) void { + self.setTrailer(.doc_comments, value); + } + + pub fn getVisibToken(self: *const VarDecl) ?TokenIndex { + return self.getTrailer(.visib_token); + } + + pub fn setVisibToken(self: *VarDecl, value: TokenIndex) void { + self.setTrailer(.visib_token, value); + } + + pub fn getThreadLocalToken(self: *const VarDecl) ?TokenIndex { + return self.getTrailer(.thread_local_token); + } + + pub fn setThreadLocalToken(self: *VarDecl, value: TokenIndex) void { + self.setTrailer(.thread_local_token, value); + } + + pub fn getEqToken(self: *const VarDecl) ?TokenIndex { + return self.getTrailer(.eq_token); + } + + pub fn setEqToken(self: *VarDecl, value: TokenIndex) void { + self.setTrailer(.eq_token, value); + } + + pub fn getComptimeToken(self: *const VarDecl) ?TokenIndex { + return self.getTrailer(.comptime_token); + } + + pub fn setComptimeToken(self: *VarDecl, value: TokenIndex) void { + self.setTrailer(.comptime_token, value); + } + + pub fn getExternExportToken(self: *const VarDecl) ?TokenIndex { + return self.getTrailer(.extern_export_token); + } + + pub fn setExternExportToken(self: *VarDecl, value: TokenIndex) void { + self.setTrailer(.extern_export_token, value); + } + + pub fn getLibName(self: *const VarDecl) ?*Node { + return self.getTrailer(.lib_name); + } + + pub fn setLibName(self: *VarDecl, value: *Node) void { + self.setTrailer(.lib_name, value); + } + + pub fn getTypeNode(self: *const VarDecl) ?*Node { + return self.getTrailer(.type_node); + } + + pub fn setTypeNode(self: *VarDecl, value: *Node) void { + self.setTrailer(.type_node, value); + } + + pub fn getAlignNode(self: *const VarDecl) ?*Node { + return self.getTrailer(.align_node); + } + + pub fn setAlignNode(self: *VarDecl, value: *Node) void { + self.setTrailer(.align_node, value); + } + + pub fn getSectionNode(self: *const VarDecl) ?*Node { + return self.getTrailer(.section_node); + } + + pub fn setSectionNode(self: *VarDecl, value: *Node) void { + self.setTrailer(.section_node, value); + } + + pub fn getInitNode(self: *const VarDecl) ?*Node { + return self.getTrailer(.init_node); + } + + pub fn setInitNode(self: *VarDecl, value: *Node) void { + self.setTrailer(.init_node, value); + } + pub const RequiredFields = struct { mut_token: TokenIndex, name_token: TokenIndex, semicolon_token: TokenIndex, }; - pub fn getTrailer(self: *const VarDecl, comptime field: TrailerFlags.FieldEnum) ?TrailerFlags.Field(field) { + fn getTrailer(self: *const VarDecl, comptime field: TrailerFlags.FieldEnum) ?TrailerFlags.Field(field) { const trailers_start = @ptrCast([*]const u8, self) + @sizeOf(VarDecl); return self.trailer_flags.get(trailers_start, field); } - pub fn setTrailer(self: *VarDecl, comptime field: TrailerFlags.FieldEnum, value: TrailerFlags.Field(field)) void { + fn setTrailer(self: *VarDecl, comptime field: TrailerFlags.FieldEnum, value: TrailerFlags.Field(field)) void { const trailers_start = @ptrCast([*]u8, self) + @sizeOf(VarDecl); self.trailer_flags.set(trailers_start, field, value); } @@ -954,22 +1042,22 @@ pub const Node = struct { pub fn iterate(self: *const VarDecl, index: usize) ?*Node { var i = index; - if (self.getTrailer(.type_node)) |type_node| { + if (self.getTypeNode()) |type_node| { if (i < 1) return type_node; i -= 1; } - if (self.getTrailer(.align_node)) |align_node| { + if (self.getAlignNode()) |align_node| { if (i < 1) return align_node; i -= 1; } - if (self.getTrailer(.section_node)) |section_node| { + if (self.getSectionNode()) |section_node| { if (i < 1) return section_node; i -= 1; } - if (self.getTrailer(.init_node)) |init_node| { + if (self.getInitNode()) |init_node| { if (i < 1) return init_node; i -= 1; } @@ -978,11 +1066,11 @@ pub const Node = struct { } pub fn firstToken(self: *const VarDecl) TokenIndex { - if (self.getTrailer(.visib_token)) |visib_token| return visib_token; - if (self.getTrailer(.thread_local_token)) |thread_local_token| return thread_local_token; - if (self.getTrailer(.comptime_token)) |comptime_token| return comptime_token; - if (self.getTrailer(.extern_export_token)) |extern_export_token| return extern_export_token; - assert(self.getTrailer(.lib_name) == null); + if (self.getVisibToken()) |visib_token| return visib_token; + if (self.getThreadLocalToken()) |thread_local_token| return thread_local_token; + if (self.getComptimeToken()) |comptime_token| return comptime_token; + if (self.getExternExportToken()) |extern_export_token| return extern_export_token; + assert(self.getLibName() == null); return self.mut_token; } @@ -1320,17 +1408,109 @@ pub const Node = struct { std.debug.print("{*} flags: {b} name_token: {} {*} params_len: {}\n", .{ self, self.trailer_flags.bits, - self.getTrailer(.name_token), + self.getNameToken(), self.trailer_flags.ptrConst(trailers_start, .name_token), self.params_len, }); } - pub fn body(self: *const FnProto) ?*Node { + pub fn getDocComments(self: *const FnProto) ?*DocComment { + return self.getTrailer(.doc_comments); + } + + pub fn setDocComments(self: *FnProto, value: *DocComment) void { + self.setTrailer(.doc_comments, value); + } + + pub fn getBodyNode(self: *const FnProto) ?*Node { return self.getTrailer(.body_node); } - pub fn getTrailer(self: *const FnProto, comptime field: TrailerFlags.FieldEnum) ?TrailerFlags.Field(field) { + pub fn setBodyNode(self: *FnProto, value: *Node) void { + self.setTrailer(.body_node, value); + } + + pub fn getLibName(self: *const FnProto) ?*Node { + return self.getTrailer(.lib_name); + } + + pub fn setLibName(self: *FnProto, value: *Node) void { + self.setTrailer(.lib_name, value); + } + + pub fn getAlignExpr(self: *const FnProto) ?*Node { + return self.getTrailer(.align_expr); + } + + pub fn setAlignExpr(self: *FnProto, value: *Node) void { + self.setTrailer(.align_expr, value); + } + + pub fn getSectionExpr(self: *const FnProto) ?*Node { + return self.getTrailer(.section_expr); + } + + pub fn setSectionExpr(self: *FnProto, value: *Node) void { + self.setTrailer(.section_expr, value); + } + + pub fn getCallconvExpr(self: *const FnProto) ?*Node { + return self.getTrailer(.callconv_expr); + } + + pub fn setCallconvExpr(self: *FnProto, value: *Node) void { + self.setTrailer(.callconv_expr, value); + } + + pub fn getVisibToken(self: *const FnProto) ?TokenIndex { + return self.getTrailer(.visib_token); + } + + pub fn setVisibToken(self: *FnProto, value: TokenIndex) void { + self.setTrailer(.visib_token, value); + } + + pub fn getNameToken(self: *const FnProto) ?TokenIndex { + return self.getTrailer(.name_token); + } + + pub fn setNameToken(self: *FnProto, value: TokenIndex) void { + self.setTrailer(.name_token, value); + } + + pub fn getVarArgsToken(self: *const FnProto) ?TokenIndex { + return self.getTrailer(.var_args_token); + } + + pub fn setVarArgsToken(self: *FnProto, value: TokenIndex) void { + self.setTrailer(.var_args_token, value); + } + + pub fn getExternExportInlineToken(self: *const FnProto) ?TokenIndex { + return self.getTrailer(.extern_export_inline_token); + } + + pub fn setExternExportInlineToken(self: *FnProto, value: TokenIndex) void { + self.setTrailer(.extern_export_inline_token, value); + } + + pub fn getIsExternPrototype(self: *const FnProto) ?void { + return self.getTrailer(.is_extern_prototype); + } + + pub fn setIsExternPrototype(self: *FnProto, value: void) void { + self.setTrailer(.is_extern_prototype, value); + } + + pub fn getIsAsync(self: *const FnProto) ?void { + return self.getTrailer(.is_async); + } + + pub fn setIsAsync(self: *FnProto, value: void) void { + self.setTrailer(.is_async, value); + } + + fn getTrailer(self: *const FnProto, comptime field: TrailerFlags.FieldEnum) ?TrailerFlags.Field(field) { const trailers_start = @alignCast( @alignOf(ParamDecl), @ptrCast([*]const u8, self) + @sizeOf(FnProto) + @sizeOf(ParamDecl) * self.params_len, @@ -1338,7 +1518,7 @@ pub const Node = struct { return self.trailer_flags.get(trailers_start, field); } - pub fn setTrailer(self: *FnProto, comptime field: TrailerFlags.FieldEnum, value: TrailerFlags.Field(field)) void { + fn setTrailer(self: *FnProto, comptime field: TrailerFlags.FieldEnum, value: TrailerFlags.Field(field)) void { const trailers_start = @alignCast( @alignOf(ParamDecl), @ptrCast([*]u8, self) + @sizeOf(FnProto) + @sizeOf(ParamDecl) * self.params_len, @@ -1376,7 +1556,7 @@ pub const Node = struct { pub fn iterate(self: *const FnProto, index: usize) ?*Node { var i = index; - if (self.getTrailer(.lib_name)) |lib_name| { + if (self.getLibName()) |lib_name| { if (i < 1) return lib_name; i -= 1; } @@ -1394,12 +1574,12 @@ pub const Node = struct { } i -= params_len; - if (self.getTrailer(.align_expr)) |align_expr| { + if (self.getAlignExpr()) |align_expr| { if (i < 1) return align_expr; i -= 1; } - if (self.getTrailer(.section_expr)) |section_expr| { + if (self.getSectionExpr()) |section_expr| { if (i < 1) return section_expr; i -= 1; } @@ -1412,7 +1592,7 @@ pub const Node = struct { .Invalid => {}, } - if (self.body()) |body_node| { + if (self.getBodyNode()) |body_node| { if (i < 1) return body_node; i -= 1; } @@ -1421,14 +1601,14 @@ pub const Node = struct { } pub fn firstToken(self: *const FnProto) TokenIndex { - if (self.getTrailer(.visib_token)) |visib_token| return visib_token; - if (self.getTrailer(.extern_export_inline_token)) |extern_export_inline_token| return extern_export_inline_token; - assert(self.getTrailer(.lib_name) == null); + if (self.getVisibToken()) |visib_token| return visib_token; + if (self.getExternExportInlineToken()) |extern_export_inline_token| return extern_export_inline_token; + assert(self.getLibName() == null); return self.fn_token; } pub fn lastToken(self: *const FnProto) TokenIndex { - if (self.body()) |body_node| return body_node.lastToken(); + if (self.getBodyNode()) |body_node| return body_node.lastToken(); switch (self.return_type) { .Explicit, .InferErrorSet => |node| return node.lastToken(), .Invalid => |tok| return tok, @@ -2676,16 +2856,24 @@ pub const Node = struct { return self.getTrailer(.rhs); } + pub fn setRHS(self: *ControlFlowExpression, value: *Node) void { + self.setTrailer(.rhs, value); + } + pub fn getLabel(self: *const ControlFlowExpression) ?TokenIndex { return self.getTrailer(.label); } - pub fn getTrailer(self: *const ControlFlowExpression, comptime field: TrailerFlags.FieldEnum) ?TrailerFlags.Field(field) { + pub fn setLabel(self: *ControlFlowExpression, value: TokenIndex) void { + self.setTrailer(.label, value); + } + + fn getTrailer(self: *const ControlFlowExpression, comptime field: TrailerFlags.FieldEnum) ?TrailerFlags.Field(field) { const trailers_start = @ptrCast([*]const u8, self) + @sizeOf(ControlFlowExpression); return self.trailer_flags.get(trailers_start, field); } - pub fn setTrailer(self: *ControlFlowExpression, comptime field: TrailerFlags.FieldEnum, value: TrailerFlags.Field(field)) void { + fn setTrailer(self: *ControlFlowExpression, comptime field: TrailerFlags.FieldEnum, value: TrailerFlags.Field(field)) void { const trailers_start = @ptrCast([*]u8, self) + @sizeOf(ControlFlowExpression); self.trailer_flags.set(trailers_start, field, value); } diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index 4f280dbefc..5a62cdd2ce 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -232,9 +232,9 @@ fn renderContainerDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tr .FnProto => { const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl); - try renderDocComments(tree, stream, fn_proto, fn_proto.getTrailer(.doc_comments), indent, start_col); + try renderDocComments(tree, stream, fn_proto, fn_proto.getDocComments(), indent, start_col); - if (fn_proto.getTrailer(.body_node)) |body_node| { + if (fn_proto.getBodyNode()) |body_node| { try renderExpression(allocator, stream, tree, indent, start_col, decl, .Space); try renderExpression(allocator, stream, tree, indent, start_col, body_node, space); } else { @@ -257,7 +257,7 @@ fn renderContainerDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tr .VarDecl => { const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", decl); - try renderDocComments(tree, stream, var_decl, var_decl.getTrailer(.doc_comments), indent, start_col); + try renderDocComments(tree, stream, var_decl, var_decl.getDocComments(), indent, start_col); try renderVarDecl(allocator, stream, tree, indent, start_col, var_decl); }, @@ -1520,23 +1520,23 @@ fn renderExpression( .FnProto => { const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", base); - if (fn_proto.getTrailer(.visib_token)) |visib_token_index| { + if (fn_proto.getVisibToken()) |visib_token_index| { const visib_token = tree.token_ids[visib_token_index]; assert(visib_token == .Keyword_pub or visib_token == .Keyword_export); try renderToken(tree, stream, visib_token_index, indent, start_col, Space.Space); // pub } - if (fn_proto.getTrailer(.extern_export_inline_token)) |extern_export_inline_token| { - if (fn_proto.getTrailer(.is_extern_prototype) == null) + if (fn_proto.getExternExportInlineToken()) |extern_export_inline_token| { + if (fn_proto.getIsExternPrototype() == null) try renderToken(tree, stream, extern_export_inline_token, indent, start_col, Space.Space); // extern/export/inline } - if (fn_proto.getTrailer(.lib_name)) |lib_name| { + if (fn_proto.getLibName()) |lib_name| { try renderExpression(allocator, stream, tree, indent, start_col, lib_name, Space.Space); } - const lparen = if (fn_proto.getTrailer(.name_token)) |name_token| blk: { + const lparen = if (fn_proto.getNameToken()) |name_token| blk: { try renderToken(tree, stream, fn_proto.fn_token, indent, start_col, Space.Space); // fn try renderToken(tree, stream, name_token, indent, start_col, Space.None); // name break :blk tree.nextToken(name_token); @@ -1549,11 +1549,11 @@ fn renderExpression( const rparen = tree.prevToken( // the first token for the annotation expressions is the left // parenthesis, hence the need for two prevToken - if (fn_proto.getTrailer(.align_expr)) |align_expr| + if (fn_proto.getAlignExpr()) |align_expr| tree.prevToken(tree.prevToken(align_expr.firstToken())) - else if (fn_proto.getTrailer(.section_expr)) |section_expr| + else if (fn_proto.getSectionExpr()) |section_expr| tree.prevToken(tree.prevToken(section_expr.firstToken())) - else if (fn_proto.getTrailer(.callconv_expr)) |callconv_expr| + else if (fn_proto.getCallconvExpr()) |callconv_expr| tree.prevToken(tree.prevToken(callconv_expr.firstToken())) else switch (fn_proto.return_type) { .Explicit => |node| node.firstToken(), @@ -1574,12 +1574,12 @@ fn renderExpression( for (fn_proto.params()) |param_decl, i| { try renderParamDecl(allocator, stream, tree, indent, start_col, param_decl, Space.None); - if (i + 1 < fn_proto.params_len or fn_proto.getTrailer(.var_args_token) != null) { + if (i + 1 < fn_proto.params_len or fn_proto.getVarArgsToken() != null) { const comma = tree.nextToken(param_decl.lastToken()); try renderToken(tree, stream, comma, indent, start_col, Space.Space); // , } } - if (fn_proto.getTrailer(.var_args_token)) |var_args_token| { + if (fn_proto.getVarArgsToken()) |var_args_token| { try renderToken(tree, stream, var_args_token, indent, start_col, Space.None); } } else { @@ -1591,7 +1591,7 @@ fn renderExpression( try stream.writeByteNTimes(' ', new_indent); try renderParamDecl(allocator, stream, tree, new_indent, start_col, param_decl, Space.Comma); } - if (fn_proto.getTrailer(.var_args_token)) |var_args_token| { + if (fn_proto.getVarArgsToken()) |var_args_token| { try stream.writeByteNTimes(' ', new_indent); try renderToken(tree, stream, var_args_token, new_indent, start_col, Space.Comma); } @@ -1600,7 +1600,7 @@ fn renderExpression( try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // ) - if (fn_proto.getTrailer(.align_expr)) |align_expr| { + if (fn_proto.getAlignExpr()) |align_expr| { const align_rparen = tree.nextToken(align_expr.lastToken()); const align_lparen = tree.prevToken(align_expr.firstToken()); const align_kw = tree.prevToken(align_lparen); @@ -1611,7 +1611,7 @@ fn renderExpression( try renderToken(tree, stream, align_rparen, indent, start_col, Space.Space); // ) } - if (fn_proto.getTrailer(.section_expr)) |section_expr| { + if (fn_proto.getSectionExpr()) |section_expr| { const section_rparen = tree.nextToken(section_expr.lastToken()); const section_lparen = tree.prevToken(section_expr.firstToken()); const section_kw = tree.prevToken(section_lparen); @@ -1622,7 +1622,7 @@ fn renderExpression( try renderToken(tree, stream, section_rparen, indent, start_col, Space.Space); // ) } - if (fn_proto.getTrailer(.callconv_expr)) |callconv_expr| { + if (fn_proto.getCallconvExpr()) |callconv_expr| { const callconv_rparen = tree.nextToken(callconv_expr.lastToken()); const callconv_lparen = tree.prevToken(callconv_expr.firstToken()); const callconv_kw = tree.prevToken(callconv_lparen); @@ -1631,9 +1631,9 @@ fn renderExpression( try renderToken(tree, stream, callconv_lparen, indent, start_col, Space.None); // ( try renderExpression(allocator, stream, tree, indent, start_col, callconv_expr, Space.None); try renderToken(tree, stream, callconv_rparen, indent, start_col, Space.Space); // ) - } else if (fn_proto.getTrailer(.is_extern_prototype) != null) { + } else if (fn_proto.getIsExternPrototype() != null) { try stream.writeAll("callconv(.C) "); - } else if (fn_proto.getTrailer(.is_async) != null) { + } else if (fn_proto.getIsAsync() != null) { try stream.writeAll("callconv(.Async) "); } @@ -2221,69 +2221,69 @@ fn renderVarDecl( start_col: *usize, var_decl: *ast.Node.VarDecl, ) (@TypeOf(stream).Error || Error)!void { - if (var_decl.getTrailer(.visib_token)) |visib_token| { + if (var_decl.getVisibToken()) |visib_token| { try renderToken(tree, stream, visib_token, indent, start_col, Space.Space); // pub } - if (var_decl.getTrailer(.extern_export_token)) |extern_export_token| { + if (var_decl.getExternExportToken()) |extern_export_token| { try renderToken(tree, stream, extern_export_token, indent, start_col, Space.Space); // extern - if (var_decl.getTrailer(.lib_name)) |lib_name| { + if (var_decl.getLibName()) |lib_name| { try renderExpression(allocator, stream, tree, indent, start_col, lib_name, Space.Space); // "lib" } } - if (var_decl.getTrailer(.comptime_token)) |comptime_token| { + if (var_decl.getComptimeToken()) |comptime_token| { try renderToken(tree, stream, comptime_token, indent, start_col, Space.Space); // comptime } - if (var_decl.getTrailer(.thread_local_token)) |thread_local_token| { + if (var_decl.getThreadLocalToken()) |thread_local_token| { try renderToken(tree, stream, thread_local_token, indent, start_col, Space.Space); // threadlocal } try renderToken(tree, stream, var_decl.mut_token, indent, start_col, Space.Space); // var - const name_space = if (var_decl.getTrailer(.type_node) == null and - (var_decl.getTrailer(.align_node) != null or - var_decl.getTrailer(.section_node) != null or - var_decl.getTrailer(.init_node) != null)) + const name_space = if (var_decl.getTypeNode() == null and + (var_decl.getAlignNode() != null or + var_decl.getSectionNode() != null or + var_decl.getInitNode() != null)) Space.Space else Space.None; try renderToken(tree, stream, var_decl.name_token, indent, start_col, name_space); - if (var_decl.getTrailer(.type_node)) |type_node| { + if (var_decl.getTypeNode()) |type_node| { try renderToken(tree, stream, tree.nextToken(var_decl.name_token), indent, start_col, Space.Space); - const s = if (var_decl.getTrailer(.align_node) != null or - var_decl.getTrailer(.section_node) != null or - var_decl.getTrailer(.init_node) != null) Space.Space else Space.None; + const s = if (var_decl.getAlignNode() != null or + var_decl.getSectionNode() != null or + var_decl.getInitNode() != null) Space.Space else Space.None; try renderExpression(allocator, stream, tree, indent, start_col, type_node, s); } - if (var_decl.getTrailer(.align_node)) |align_node| { + if (var_decl.getAlignNode()) |align_node| { const lparen = tree.prevToken(align_node.firstToken()); const align_kw = tree.prevToken(lparen); const rparen = tree.nextToken(align_node.lastToken()); try renderToken(tree, stream, align_kw, indent, start_col, Space.None); // align try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( try renderExpression(allocator, stream, tree, indent, start_col, align_node, Space.None); - const s = if (var_decl.getTrailer(.section_node) != null or var_decl.getTrailer(.init_node) != null) Space.Space else Space.None; + const s = if (var_decl.getSectionNode() != null or var_decl.getInitNode() != null) Space.Space else Space.None; try renderToken(tree, stream, rparen, indent, start_col, s); // ) } - if (var_decl.getTrailer(.section_node)) |section_node| { + if (var_decl.getSectionNode()) |section_node| { const lparen = tree.prevToken(section_node.firstToken()); const section_kw = tree.prevToken(lparen); const rparen = tree.nextToken(section_node.lastToken()); try renderToken(tree, stream, section_kw, indent, start_col, Space.None); // linksection try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( try renderExpression(allocator, stream, tree, indent, start_col, section_node, Space.None); - const s = if (var_decl.getTrailer(.init_node) != null) Space.Space else Space.None; + const s = if (var_decl.getInitNode() != null) Space.Space else Space.None; try renderToken(tree, stream, rparen, indent, start_col, s); // ) } - if (var_decl.getTrailer(.init_node)) |init_node| { + if (var_decl.getInitNode()) |init_node| { const s = if (init_node.tag == .MultilineStringLiteral) Space.None else Space.Space; - try renderToken(tree, stream, var_decl.getTrailer(.eq_token).?, indent, start_col, s); // = + try renderToken(tree, stream, var_decl.getEqToken().?, indent, start_col, s); // = try renderExpression(allocator, stream, tree, indent, start_col, init_node, Space.None); } diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig index 005f44a270..c476c307d2 100644 --- a/src-self-hosted/Module.zig +++ b/src-self-hosted/Module.zig @@ -1256,8 +1256,8 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { }; defer fn_type_scope.instructions.deinit(self.gpa); - decl.is_pub = fn_proto.getTrailer(.visib_token) != null; - const body_node = fn_proto.getTrailer(.body_node) orelse + decl.is_pub = fn_proto.getVisibToken() != null; + const body_node = fn_proto.getBodyNode() orelse return self.failTok(&fn_type_scope.base, fn_proto.fn_token, "TODO implement extern functions", .{}); const param_decls = fn_proto.params(); @@ -1276,19 +1276,19 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { }; param_types[i] = try astgen.expr(self, &fn_type_scope.base, type_type_rl, param_type_node); } - if (fn_proto.getTrailer(.var_args_token)) |var_args_token| { + if (fn_proto.getVarArgsToken()) |var_args_token| { return self.failTok(&fn_type_scope.base, var_args_token, "TODO implement var args", .{}); } - if (fn_proto.getTrailer(.lib_name)) |lib_name| { + if (fn_proto.getLibName()) |lib_name| { return self.failNode(&fn_type_scope.base, lib_name, "TODO implement function library name", .{}); } - if (fn_proto.getTrailer(.align_expr)) |align_expr| { + if (fn_proto.getAlignExpr()) |align_expr| { return self.failNode(&fn_type_scope.base, align_expr, "TODO implement function align expression", .{}); } - if (fn_proto.getTrailer(.section_expr)) |sect_expr| { + if (fn_proto.getSectionExpr()) |sect_expr| { return self.failNode(&fn_type_scope.base, sect_expr, "TODO implement function section expression", .{}); } - if (fn_proto.getTrailer(.callconv_expr)) |callconv_expr| { + if (fn_proto.getCallconvExpr()) |callconv_expr| { return self.failNode( &fn_type_scope.base, callconv_expr, @@ -1430,10 +1430,10 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { self.bin_file.freeDecl(decl); } - if (fn_proto.getTrailer(.extern_export_inline_token)) |maybe_export_token| { + if (fn_proto.getExternExportInlineToken()) |maybe_export_token| { if (tree.token_ids[maybe_export_token] == .Keyword_export) { const export_src = tree.token_locs[maybe_export_token].start; - const name_loc = tree.token_locs[fn_proto.getTrailer(.name_token).?]; + const name_loc = tree.token_locs[fn_proto.getNameToken().?]; const name = tree.tokenSliceLoc(name_loc); // The scope needs to have the decl in it. try self.analyzeExport(&block_scope.base, export_src, name, decl); @@ -1460,37 +1460,37 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { }; defer block_scope.instructions.deinit(self.gpa); - decl.is_pub = var_decl.getTrailer(.visib_token) != null; + decl.is_pub = var_decl.getVisibToken() != null; const is_extern = blk: { - const maybe_extern_token = var_decl.getTrailer(.extern_export_token) orelse + const maybe_extern_token = var_decl.getExternExportToken() orelse break :blk false; if (tree.token_ids[maybe_extern_token] != .Keyword_extern) break :blk false; - if (var_decl.getTrailer(.init_node)) |some| { + if (var_decl.getInitNode()) |some| { return self.failNode(&block_scope.base, some, "extern variables have no initializers", .{}); } break :blk true; }; - if (var_decl.getTrailer(.lib_name)) |lib_name| { + if (var_decl.getLibName()) |lib_name| { assert(is_extern); return self.failNode(&block_scope.base, lib_name, "TODO implement function library name", .{}); } const is_mutable = tree.token_ids[var_decl.mut_token] == .Keyword_var; - const is_threadlocal = if (var_decl.getTrailer(.thread_local_token)) |some| blk: { + const is_threadlocal = if (var_decl.getThreadLocalToken()) |some| blk: { if (!is_mutable) { return self.failTok(&block_scope.base, some, "threadlocal variable cannot be constant", .{}); } break :blk true; } else false; - assert(var_decl.getTrailer(.comptime_token) == null); - if (var_decl.getTrailer(.align_node)) |align_expr| { + assert(var_decl.getComptimeToken() == null); + if (var_decl.getAlignNode()) |align_expr| { return self.failNode(&block_scope.base, align_expr, "TODO implement function align expression", .{}); } - if (var_decl.getTrailer(.section_node)) |sect_expr| { + if (var_decl.getSectionNode()) |sect_expr| { return self.failNode(&block_scope.base, sect_expr, "TODO implement function section expression", .{}); } const explicit_type = blk: { - const type_node = var_decl.getTrailer(.type_node) orelse + const type_node = var_decl.getTypeNode() orelse break :blk null; // Temporary arena for the zir instructions. @@ -1517,7 +1517,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { }; var var_type: Type = undefined; - const value: ?Value = if (var_decl.getTrailer(.init_node)) |init_node| blk: { + const value: ?Value = if (var_decl.getInitNode()) |init_node| blk: { var gen_scope_arena = std.heap.ArenaAllocator.init(self.gpa); defer gen_scope_arena.deinit(); var gen_scope: Scope.GenZIR = .{ @@ -1602,7 +1602,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { decl.analysis = .complete; decl.generation = self.generation; - if (var_decl.getTrailer(.extern_export_token)) |maybe_export_token| { + if (var_decl.getExternExportToken()) |maybe_export_token| { if (tree.token_ids[maybe_export_token] == .Keyword_export) { const export_src = tree.token_locs[maybe_export_token].start; const name_loc = tree.token_locs[var_decl.name_token]; @@ -1768,7 +1768,7 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void { for (decls) |src_decl, decl_i| { if (src_decl.cast(ast.Node.FnProto)) |fn_proto| { // We will create a Decl for it regardless of analysis status. - const name_tok = fn_proto.getTrailer(.name_token) orelse { + const name_tok = fn_proto.getNameToken() orelse { @panic("TODO missing function name"); }; @@ -1804,7 +1804,7 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void { } else { const new_decl = try self.createNewDecl(&root_scope.base, name, decl_i, name_hash, contents_hash); root_scope.decls.appendAssumeCapacity(new_decl); - if (fn_proto.getTrailer(.extern_export_inline_token)) |maybe_export_token| { + if (fn_proto.getExternExportInlineToken()) |maybe_export_token| { if (tree.token_ids[maybe_export_token] == .Keyword_export) { self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); } @@ -1831,7 +1831,7 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void { } else { const new_decl = try self.createNewDecl(&root_scope.base, name, decl_i, name_hash, contents_hash); root_scope.decls.appendAssumeCapacity(new_decl); - if (var_decl.getTrailer(.extern_export_token)) |maybe_export_token| { + if (var_decl.getExternExportToken()) |maybe_export_token| { if (tree.token_ids[maybe_export_token] == .Keyword_export) { self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); } diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig index 294a9be9c3..472f8deaa0 100644 --- a/src-self-hosted/astgen.zig +++ b/src-self-hosted/astgen.zig @@ -451,16 +451,16 @@ fn varDecl( block_arena: *Allocator, ) InnerError!*Scope { // TODO implement detection of shadowing - if (node.getTrailer(.comptime_token)) |comptime_token| { + if (node.getComptimeToken()) |comptime_token| { return mod.failTok(scope, comptime_token, "TODO implement comptime locals", .{}); } - if (node.getTrailer(.align_node)) |align_node| { + if (node.getAlignNode()) |align_node| { return mod.failNode(scope, align_node, "TODO implement alignment on locals", .{}); } const tree = scope.tree(); const name_src = tree.token_locs[node.name_token].start; const ident_name = try identifierTokenString(mod, scope, node.name_token); - const init_node = node.getTrailer(.init_node) orelse + const init_node = node.getInitNode() orelse return mod.fail(scope, name_src, "variables must be initialized", .{}); switch (tree.token_ids[node.mut_token]) { @@ -469,7 +469,7 @@ fn varDecl( // or an rvalue as a result location. If it is an rvalue, we can use the instruction as // the variable, no memory location needed. const result_loc = if (nodeMayNeedMemoryLocation(init_node)) r: { - if (node.getTrailer(.type_node)) |type_node| { + if (node.getTypeNode()) |type_node| { const type_inst = try typeExpr(mod, scope, type_node); const alloc = try addZIRUnOp(mod, scope, name_src, .alloc, type_inst); break :r ResultLoc{ .ptr = alloc }; @@ -478,7 +478,7 @@ fn varDecl( break :r ResultLoc{ .inferred_ptr = alloc }; } } else r: { - if (node.getTrailer(.type_node)) |type_node| + if (node.getTypeNode()) |type_node| break :r ResultLoc{ .ty = try typeExpr(mod, scope, type_node) } else break :r .none; @@ -494,7 +494,7 @@ fn varDecl( return &sub_scope.base; }, .Keyword_var => { - const var_data: struct { result_loc: ResultLoc, alloc: *zir.Inst } = if (node.getTrailer(.type_node)) |type_node| a: { + const var_data: struct { result_loc: ResultLoc, alloc: *zir.Inst } = if (node.getTypeNode()) |type_node| a: { const type_inst = try typeExpr(mod, scope, type_node); const alloc = try addZIRUnOp(mod, scope, name_src, .alloc, type_inst); break :a .{ .alloc = alloc, .result_loc = .{ .ptr = alloc } }; diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig index 7a24af976d..82c06d8003 100644 --- a/src-self-hosted/codegen.zig +++ b/src-self-hosted/codegen.zig @@ -439,7 +439,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (module_fn.owner_decl.scope.cast(Module.Scope.File)) |scope_file| { const tree = scope_file.contents.tree; const fn_proto = tree.root_node.decls()[module_fn.owner_decl.src_index].castTag(.FnProto).?; - const block = fn_proto.body().?.castTag(.Block).?; + const block = fn_proto.getBodyNode().?.castTag(.Block).?; const lbrace_src = tree.token_locs[block.lbrace].start; const rbrace_src = tree.token_locs[block.rbrace].start; break :blk .{ .lbrace_src = lbrace_src, .rbrace_src = rbrace_src, .source = tree.source }; diff --git a/src-self-hosted/link/Elf.zig b/src-self-hosted/link/Elf.zig index f2a3218118..8bf28557b4 100644 --- a/src-self-hosted/link/Elf.zig +++ b/src-self-hosted/link/Elf.zig @@ -1661,7 +1661,7 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { // TODO Look into improving the performance here by adding a token-index-to-line // lookup table. Currently this involves scanning over the source code for newlines. const fn_proto = file_ast_decls[decl.src_index].castTag(.FnProto).?; - const block = fn_proto.body().?.castTag(.Block).?; + const block = fn_proto.getBodyNode().?.castTag(.Block).?; const line_delta = std.zig.lineDelta(tree.source, 0, tree.token_locs[block.lbrace].start); break :blk @intCast(u28, line_delta); } else if (decl.scope.cast(Module.Scope.ZIRModule)) |zir_module| { @@ -2160,7 +2160,7 @@ pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Dec // TODO Look into improving the performance here by adding a token-index-to-line // lookup table. Currently this involves scanning over the source code for newlines. const fn_proto = file_ast_decls[decl.src_index].castTag(.FnProto).?; - const block = fn_proto.body().?.castTag(.Block).?; + const block = fn_proto.getBodyNode().?.castTag(.Block).?; const line_delta = std.zig.lineDelta(tree.source, 0, tree.token_locs[block.lbrace].start); const casted_line_off = @intCast(u28, line_delta); diff --git a/src-self-hosted/translate_c.zig b/src-self-hosted/translate_c.zig index 122415331e..888d21db9d 100644 --- a/src-self-hosted/translate_c.zig +++ b/src-self-hosted/translate_c.zig @@ -675,7 +675,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const ZigClangFunctionDecl) Error!void { } const body_node = try block_scope.complete(rp.c); - proto_node.setTrailer(.body_node, body_node); + proto_node.setBodyNode(body_node); return addTopLevelDecl(c, fn_name, &proto_node.base); } @@ -4493,7 +4493,7 @@ fn transCreateNodeMacroFn(c: *Context, name: []const u8, ref: *ast.Node, proto_a const block_lbrace = try appendToken(c, .LBrace, "{"); const return_kw = try appendToken(c, .Keyword_return, "return"); - const unwrap_expr = try transCreateNodeUnwrapNull(c, ref.cast(ast.Node.VarDecl).?.getTrailer(.init_node).?); + const unwrap_expr = try transCreateNodeUnwrapNull(c, ref.cast(ast.Node.VarDecl).?.getInitNode().?); const call_expr = try c.createCall(unwrap_expr, fn_params.items.len); const call_params = call_expr.params(); @@ -6361,7 +6361,7 @@ fn getContainer(c: *Context, node: *ast.Node) ?*ast.Node { const ident = node.castTag(.Identifier).?; if (c.global_scope.sym_table.get(tokenSlice(c, ident.token))) |value| { if (value.cast(ast.Node.VarDecl)) |var_decl| - return getContainer(c, var_decl.getTrailer(.init_node).?); + return getContainer(c, var_decl.getInitNode().?); } }, @@ -6390,7 +6390,7 @@ fn getContainerTypeOf(c: *Context, ref: *ast.Node) ?*ast.Node { if (ref.castTag(.Identifier)) |ident| { if (c.global_scope.sym_table.get(tokenSlice(c, ident.token))) |value| { if (value.cast(ast.Node.VarDecl)) |var_decl| { - if (var_decl.getTrailer(.type_node)) |ty| + if (var_decl.getTypeNode()) |ty| return getContainer(c, ty); } } @@ -6412,7 +6412,7 @@ fn getContainerTypeOf(c: *Context, ref: *ast.Node) ?*ast.Node { } fn getFnProto(c: *Context, ref: *ast.Node) ?*ast.Node.FnProto { - const init = if (ref.cast(ast.Node.VarDecl)) |v| v.getTrailer(.init_node).? else return null; + const init = if (ref.cast(ast.Node.VarDecl)) |v| v.getInitNode().? else return null; if (getContainerTypeOf(c, init)) |ty_node| { if (ty_node.castTag(.OptionalType)) |prefix| { if (prefix.rhs.cast(ast.Node.FnProto)) |fn_proto| { -- cgit v1.2.3