aboutsummaryrefslogtreecommitdiff
path: root/src/codegen.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2021-01-31 20:15:08 -0800
committerGitHub <noreply@github.com>2021-01-31 20:15:08 -0800
commitbf76501b5d46277d3706a1f0b92ba52f2a47d894 (patch)
tree7a40c904b9246092009c02f5b09a2a8732b69175 /src/codegen.zig
parentfdc875ed0080cd2542a854a8cd6c627b25e9b7a4 (diff)
parent0f5eda973e0c17b3f792cdb06674bf8d2863c8fb (diff)
downloadzig-bf76501b5d46277d3706a1f0b92ba52f2a47d894.tar.gz
zig-bf76501b5d46277d3706a1f0b92ba52f2a47d894.zip
Merge pull request #7847 from ziglang/astgen-rl-rework
stage2: rework astgen result locations
Diffstat (limited to 'src/codegen.zig')
-rw-r--r--src/codegen.zig57
1 files changed, 34 insertions, 23 deletions
diff --git a/src/codegen.zig b/src/codegen.zig
index 1ca2bb2abe..362b04ab26 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -840,14 +840,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.arg => return self.genArg(inst.castTag(.arg).?),
.assembly => return self.genAsm(inst.castTag(.assembly).?),
.bitcast => return self.genBitCast(inst.castTag(.bitcast).?),
- .bitand => return self.genBitAnd(inst.castTag(.bitand).?),
- .bitor => return self.genBitOr(inst.castTag(.bitor).?),
+ .bit_and => return self.genBitAnd(inst.castTag(.bit_and).?),
+ .bit_or => return self.genBitOr(inst.castTag(.bit_or).?),
.block => return self.genBlock(inst.castTag(.block).?),
.br => return self.genBr(inst.castTag(.br).?),
+ .br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?),
.breakpoint => return self.genBreakpoint(inst.src),
- .brvoid => return self.genBrVoid(inst.castTag(.brvoid).?),
- .booland => return self.genBoolOp(inst.castTag(.booland).?),
- .boolor => return self.genBoolOp(inst.castTag(.boolor).?),
+ .br_void => return self.genBrVoid(inst.castTag(.br_void).?),
+ .bool_and => return self.genBoolOp(inst.castTag(.bool_and).?),
+ .bool_or => return self.genBoolOp(inst.castTag(.bool_or).?),
.call => return self.genCall(inst.castTag(.call).?),
.cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt),
.cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte),
@@ -1097,7 +1098,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (inst.base.isUnused())
return MCValue.dead;
switch (arch) {
- .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bitand),
+ .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bit_and),
else => return self.fail(inst.base.src, "TODO implement bitwise and for {}", .{self.target.cpu.arch}),
}
}
@@ -1107,7 +1108,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (inst.base.isUnused())
return MCValue.dead;
switch (arch) {
- .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bitor),
+ .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bit_or),
else => return self.fail(inst.base.src, "TODO implement bitwise or for {}", .{self.target.cpu.arch}),
}
}
@@ -1371,10 +1372,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.rsb(.al, dst_reg, dst_reg, operand).toU32());
}
},
- .booland, .bitand => {
+ .bool_and, .bit_and => {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.@"and"(.al, dst_reg, dst_reg, operand).toU32());
},
- .boolor, .bitor => {
+ .bool_or, .bit_or => {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, dst_reg, dst_reg, operand).toU32());
},
.not, .xor => {
@@ -2441,17 +2442,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
+ fn genBrBlockFlat(self: *Self, inst: *ir.Inst.BrBlockFlat) !MCValue {
+ try self.genBody(inst.body);
+ const last = inst.body.instructions[inst.body.instructions.len - 1];
+ return self.br(inst.base.src, inst.block, last);
+ }
+
fn genBr(self: *Self, inst: *ir.Inst.Br) !MCValue {
- if (inst.operand.ty.hasCodeGenBits()) {
- const operand = try self.resolveInst(inst.operand);
- const block_mcv = @bitCast(MCValue, inst.block.codegen.mcv);
- if (block_mcv == .none) {
- inst.block.codegen.mcv = @bitCast(AnyMCValue, operand);
- } else {
- try self.setRegOrMem(inst.base.src, inst.block.base.ty, block_mcv, operand);
- }
- }
- return self.brVoid(inst.base.src, inst.block);
+ return self.br(inst.base.src, inst.block, inst.operand);
}
fn genBrVoid(self: *Self, inst: *ir.Inst.BrVoid) !MCValue {
@@ -2464,20 +2462,33 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
switch (arch) {
.x86_64 => switch (inst.base.tag) {
// lhs AND rhs
- .booland => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs, 4, 0x20),
+ .bool_and => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs, 4, 0x20),
// lhs OR rhs
- .boolor => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs, 1, 0x08),
+ .bool_or => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs, 1, 0x08),
else => unreachable, // Not a boolean operation
},
.arm, .armeb => switch (inst.base.tag) {
- .booland => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .booland),
- .boolor => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .boolor),
+ .bool_and => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bool_and),
+ .bool_or => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bool_or),
else => unreachable, // Not a boolean operation
},
else => return self.fail(inst.base.src, "TODO implement boolean operations for {}", .{self.target.cpu.arch}),
}
}
+ fn br(self: *Self, src: usize, block: *ir.Inst.Block, operand: *ir.Inst) !MCValue {
+ if (operand.ty.hasCodeGenBits()) {
+ const operand_mcv = try self.resolveInst(operand);
+ const block_mcv = @bitCast(MCValue, block.codegen.mcv);
+ if (block_mcv == .none) {
+ block.codegen.mcv = @bitCast(AnyMCValue, operand_mcv);
+ } else {
+ try self.setRegOrMem(src, block.base.ty, block_mcv, operand_mcv);
+ }
+ }
+ return self.brVoid(src, block);
+ }
+
fn brVoid(self: *Self, src: usize, block: *ir.Inst.Block) !MCValue {
// Emit a jump with a relocation. It will be patched up after the block ends.
try block.codegen.relocs.ensureCapacity(self.gpa, block.codegen.relocs.items.len + 1);