aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorLoris Cro <kappaloris@gmail.com>2022-08-16 16:52:26 +0200
committerGitHub <noreply@github.com>2022-08-16 16:52:26 +0200
commit7f7d58ee895d0e29850ed1823ba91e672edc03a6 (patch)
tree7bd7515a24e5eafa8c4a95332908e6e5e41ffa47 /src
parent5929da37a117dfe67983155b39d4ee39e11f7ebc (diff)
parentb3922289be1ffaf194b55face332892280981356 (diff)
downloadzig-7f7d58ee895d0e29850ed1823ba91e672edc03a6.tar.gz
zig-7f7d58ee895d0e29850ed1823ba91e672edc03a6.zip
Merge branch 'master' into autodoc-links
Diffstat (limited to 'src')
-rw-r--r--src/Air.zig5
-rw-r--r--src/AstGen.zig208
-rw-r--r--src/Autodoc.zig24
-rw-r--r--src/Compilation.zig4
-rw-r--r--src/Liveness.zig2
-rw-r--r--src/Module.zig64
-rw-r--r--src/Sema.zig787
-rw-r--r--src/Zir.zig55
-rw-r--r--src/arch/aarch64/CodeGen.zig1
-rw-r--r--src/arch/arm/CodeGen.zig195
-rw-r--r--src/arch/arm/Emit.zig57
-rw-r--r--src/arch/riscv64/CodeGen.zig1
-rw-r--r--src/arch/sparc64/CodeGen.zig1
-rw-r--r--src/arch/wasm/CodeGen.zig744
-rw-r--r--src/arch/x86_64/CodeGen.zig49
-rw-r--r--src/clang_options_data.zig13
-rw-r--r--src/codegen/c.zig1
-rw-r--r--src/codegen/llvm.zig198
-rw-r--r--src/codegen/llvm/bindings.zig6
-rw-r--r--src/link.zig1
-rw-r--r--src/link/Dwarf.zig13
-rw-r--r--src/link/MachO.zig6
-rw-r--r--src/link/MachO/Atom.zig2
-rw-r--r--src/link/MachO/DebugSymbols.zig16
-rw-r--r--src/link/MachO/Object.zig25
-rw-r--r--src/main.zig8
-rw-r--r--src/print_air.zig1
-rw-r--r--src/print_zir.zig80
-rw-r--r--src/stage1/all_types.hpp1
-rw-r--r--src/stage1/analyze.cpp6
-rw-r--r--src/stage1/ir.cpp40
-rw-r--r--src/stage1/parser.cpp11
-rw-r--r--src/test.zig48
-rw-r--r--src/type.zig69
-rw-r--r--src/value.zig48
35 files changed, 1911 insertions, 879 deletions
diff --git a/src/Air.zig b/src/Air.zig
index c5734278d3..e08993bbed 100644
--- a/src/Air.zig
+++ b/src/Air.zig
@@ -673,6 +673,10 @@ pub const Inst = struct {
/// Uses the `un_op` field.
error_name,
+ /// Returns true if error set has error with value.
+ /// Uses the `ty_op` field.
+ error_set_has_value,
+
/// Constructs a vector, tuple, struct, or array value out of runtime-known elements.
/// Some of the elements may be comptime-known.
/// Uses the `ty_pl` field, payload is index of an array of elements, each of which
@@ -1062,6 +1066,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.is_err_ptr,
.is_non_err_ptr,
.is_named_enum_value,
+ .error_set_has_value,
=> return Type.bool,
.const_ty => return Type.type,
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 9c8252da55..1151ed60da 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -152,6 +152,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
0,
tree.containerDeclRoot(),
.Auto,
+ 0,
)) |struct_decl_ref| {
assert(refToIndex(struct_decl_ref).? == 0);
} else |err| switch (err) {
@@ -2454,7 +2455,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.trunc,
.round,
.tag_name,
- .reify,
.type_name,
.frame_type,
.frame_size,
@@ -2501,7 +2501,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.closure_get,
.array_base_ptr,
.field_base_ptr,
- .param_type,
.ret_ptr,
.ret_type,
.@"try",
@@ -3071,6 +3070,19 @@ fn emitDbgNode(gz: *GenZir, node: Ast.Node.Index) !void {
const line = astgen.source_line - gz.decl_line;
const column = astgen.source_column;
+ if (gz.instructions.items.len > 0) {
+ const last = gz.instructions.items[gz.instructions.items.len - 1];
+ const zir_tags = astgen.instructions.items(.tag);
+ if (zir_tags[last] == .dbg_stmt) {
+ const zir_datas = astgen.instructions.items(.data);
+ zir_datas[last].dbg_stmt = .{
+ .line = line,
+ .column = column,
+ };
+ return;
+ }
+ }
+
_ = try gz.add(.{ .tag = .dbg_stmt, .data = .{
.dbg_stmt = .{
.line = line,
@@ -4212,15 +4224,18 @@ fn structDeclInner(
node: Ast.Node.Index,
container_decl: Ast.full.ContainerDecl,
layout: std.builtin.Type.ContainerLayout,
+ backing_int_node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const decl_inst = try gz.reserveInstructionIndex();
- if (container_decl.ast.members.len == 0) {
+ if (container_decl.ast.members.len == 0 and backing_int_node == 0) {
try gz.setStruct(decl_inst, .{
.src_node = node,
.layout = layout,
.fields_len = 0,
.decls_len = 0,
+ .backing_int_ref = .none,
+ .backing_int_body_len = 0,
.known_non_opv = false,
.known_comptime_only = false,
});
@@ -4255,6 +4270,35 @@ fn structDeclInner(
};
defer block_scope.unstack();
+ const scratch_top = astgen.scratch.items.len;
+ defer astgen.scratch.items.len = scratch_top;
+
+ var backing_int_body_len: usize = 0;
+ const backing_int_ref: Zir.Inst.Ref = blk: {
+ if (backing_int_node != 0) {
+ if (layout != .Packed) {
+ return astgen.failNode(backing_int_node, "non-packed struct does not support backing integer type", .{});
+ } else {
+ const backing_int_ref = try typeExpr(&block_scope, &namespace.base, backing_int_node);
+ if (!block_scope.isEmpty()) {
+ if (!block_scope.endsWithNoReturn()) {
+ _ = try block_scope.addBreak(.break_inline, decl_inst, backing_int_ref);
+ }
+
+ const body = block_scope.instructionsSlice();
+ const old_scratch_len = astgen.scratch.items.len;
+ try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body));
+ appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body);
+ backing_int_body_len = astgen.scratch.items.len - old_scratch_len;
+ block_scope.instructions.items.len = block_scope.instructions_top;
+ }
+ break :blk backing_int_ref;
+ }
+ } else {
+ break :blk .none;
+ }
+ };
+
const decl_count = try astgen.scanDecls(&namespace, container_decl.ast.members);
const field_count = @intCast(u32, container_decl.ast.members.len - decl_count);
@@ -4279,7 +4323,7 @@ fn structDeclInner(
var known_non_opv = false;
var known_comptime_only = false;
for (container_decl.ast.members) |member_node| {
- const member = switch (try containerMember(gz, &namespace.base, &wip_members, member_node)) {
+ const member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) {
.decl => continue,
.field => |field| field,
};
@@ -4367,6 +4411,8 @@ fn structDeclInner(
.layout = layout,
.fields_len = field_count,
.decls_len = decl_count,
+ .backing_int_ref = backing_int_ref,
+ .backing_int_body_len = @intCast(u32, backing_int_body_len),
.known_non_opv = known_non_opv,
.known_comptime_only = known_comptime_only,
});
@@ -4375,7 +4421,9 @@ fn structDeclInner(
const decls_slice = wip_members.declsSlice();
const fields_slice = wip_members.fieldsSlice();
const bodies_slice = astgen.scratch.items[bodies_start..];
- try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + fields_slice.len + bodies_slice.len);
+ try astgen.extra.ensureUnusedCapacity(gpa, backing_int_body_len +
+ decls_slice.len + fields_slice.len + bodies_slice.len);
+ astgen.extra.appendSliceAssumeCapacity(astgen.scratch.items[scratch_top..][0..backing_int_body_len]);
astgen.extra.appendSliceAssumeCapacity(decls_slice);
astgen.extra.appendSliceAssumeCapacity(fields_slice);
astgen.extra.appendSliceAssumeCapacity(bodies_slice);
@@ -4446,7 +4494,7 @@ fn unionDeclInner(
defer wip_members.deinit();
for (members) |member_node| {
- const member = switch (try containerMember(gz, &namespace.base, &wip_members, member_node)) {
+ const member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) {
.decl => continue,
.field => |field| field,
};
@@ -4571,9 +4619,7 @@ fn containerDecl(
else => unreachable,
} else std.builtin.Type.ContainerLayout.Auto;
- assert(container_decl.ast.arg == 0);
-
- const result = try structDeclInner(gz, scope, node, container_decl, layout);
+ const result = try structDeclInner(gz, scope, node, container_decl, layout, container_decl.ast.arg);
return rvalue(gz, rl, result, node);
},
.keyword_union => {
@@ -4733,7 +4779,7 @@ fn containerDecl(
for (container_decl.ast.members) |member_node| {
if (member_node == counts.nonexhaustive_node)
continue;
- const member = switch (try containerMember(gz, &namespace.base, &wip_members, member_node)) {
+ const member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) {
.decl => continue,
.field => |field| field,
};
@@ -4811,13 +4857,26 @@ fn containerDecl(
};
defer namespace.deinit(gpa);
+ astgen.advanceSourceCursorToNode(node);
+ var block_scope: GenZir = .{
+ .parent = &namespace.base,
+ .decl_node_index = node,
+ .decl_line = astgen.source_line,
+ .astgen = astgen,
+ .force_comptime = true,
+ .in_defer = false,
+ .instructions = gz.instructions,
+ .instructions_top = gz.instructions.items.len,
+ };
+ defer block_scope.unstack();
+
const decl_count = try astgen.scanDecls(&namespace, container_decl.ast.members);
var wip_members = try WipMembers.init(gpa, &astgen.scratch, decl_count, 0, 0, 0);
defer wip_members.deinit();
for (container_decl.ast.members) |member_node| {
- const res = try containerMember(gz, &namespace.base, &wip_members, member_node);
+ const res = try containerMember(&block_scope, &namespace.base, &wip_members, member_node);
if (res == .field) {
return astgen.failNode(member_node, "opaque types cannot have fields", .{});
}
@@ -5038,6 +5097,16 @@ fn tryExpr(
if (parent_gz.in_defer) return astgen.failNode(node, "'try' not allowed inside defer expression", .{});
+ // Ensure debug line/column information is emitted for this try expression.
+ // Then we will save the line/column so that we can emit another one that goes
+ // "backwards" because we want to evaluate the operand, but then put the debug
+ // info back at the try keyword for error return tracing.
+ if (!parent_gz.force_comptime) {
+ try emitDbgNode(parent_gz, node);
+ }
+ const try_line = astgen.source_line - parent_gz.decl_line;
+ const try_column = astgen.source_column;
+
const operand_rl: ResultLoc = switch (rl) {
.ref => .ref,
else => .none,
@@ -5067,6 +5136,7 @@ fn tryExpr(
};
const err_code = try else_scope.addUnNode(err_tag, operand, node);
try genDefers(&else_scope, &fn_block.base, scope, .{ .both = err_code });
+ try emitDbgStmt(&else_scope, try_line, try_column);
_ = try else_scope.addUnNode(.ret_node, err_code, node);
try else_scope.setTryBody(try_inst, operand);
@@ -6573,6 +6643,16 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
if (gz.in_defer) return astgen.failNode(node, "cannot return from defer expression", .{});
+ // Ensure debug line/column information is emitted for this return expression.
+ // Then we will save the line/column so that we can emit another one that goes
+ // "backwards" because we want to evaluate the operand, but then put the debug
+ // info back at the return keyword for error return tracing.
+ if (!gz.force_comptime) {
+ try emitDbgNode(gz, node);
+ }
+ const ret_line = astgen.source_line - gz.decl_line;
+ const ret_column = astgen.source_column;
+
const defer_outer = &astgen.fn_block.?.base;
const operand_node = node_datas[node].lhs;
@@ -6591,11 +6671,13 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
const defer_counts = countDefers(astgen, defer_outer, scope);
if (!defer_counts.need_err_code) {
try genDefers(gz, defer_outer, scope, .both_sans_err);
+ try emitDbgStmt(gz, ret_line, ret_column);
_ = try gz.addStrTok(.ret_err_value, err_name_str_index, ident_token);
return Zir.Inst.Ref.unreachable_value;
}
const err_code = try gz.addStrTok(.ret_err_value_code, err_name_str_index, ident_token);
try genDefers(gz, defer_outer, scope, .{ .both = err_code });
+ try emitDbgStmt(gz, ret_line, ret_column);
_ = try gz.addUnNode(.ret_node, err_code, node);
return Zir.Inst.Ref.unreachable_value;
}
@@ -6614,6 +6696,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
.never => {
// Returning a value that cannot be an error; skip error defers.
try genDefers(gz, defer_outer, scope, .normal_only);
+ try emitDbgStmt(gz, ret_line, ret_column);
try gz.addRet(rl, operand, node);
return Zir.Inst.Ref.unreachable_value;
},
@@ -6621,6 +6704,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
// Value is always an error. Emit both error defers and regular defers.
const err_code = if (rl == .ptr) try gz.addUnNode(.load, rl.ptr, node) else operand;
try genDefers(gz, defer_outer, scope, .{ .both = err_code });
+ try emitDbgStmt(gz, ret_line, ret_column);
try gz.addRet(rl, operand, node);
return Zir.Inst.Ref.unreachable_value;
},
@@ -6629,6 +6713,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
if (!defer_counts.have_err) {
// Only regular defers; no branch needed.
try genDefers(gz, defer_outer, scope, .normal_only);
+ try emitDbgStmt(gz, ret_line, ret_column);
try gz.addRet(rl, operand, node);
return Zir.Inst.Ref.unreachable_value;
}
@@ -6642,6 +6727,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
defer then_scope.unstack();
try genDefers(&then_scope, defer_outer, scope, .normal_only);
+ try emitDbgStmt(&then_scope, ret_line, ret_column);
try then_scope.addRet(rl, operand, node);
var else_scope = gz.makeSubBlock(scope);
@@ -6651,6 +6737,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
.both = try else_scope.addUnNode(.err_union_code, result, node),
};
try genDefers(&else_scope, defer_outer, scope, which_ones);
+ try emitDbgStmt(&else_scope, ret_line, ret_column);
try else_scope.addRet(rl, operand, node);
try setCondBrPayload(condbr, is_non_err, &then_scope, 0, &else_scope, 0);
@@ -7553,7 +7640,6 @@ fn builtinCall(
.trunc => return simpleUnOp(gz, scope, rl, node, .none, params[0], .trunc),
.round => return simpleUnOp(gz, scope, rl, node, .none, params[0], .round),
.tag_name => return simpleUnOp(gz, scope, rl, node, .none, params[0], .tag_name),
- .Type => return simpleUnOp(gz, scope, rl, node, .{ .coerced_ty = .type_info_type }, params[0], .reify),
.type_name => return simpleUnOp(gz, scope, rl, node, .none, params[0], .type_name),
.Frame => return simpleUnOp(gz, scope, rl, node, .none, params[0], .frame_type),
.frame_size => return simpleUnOp(gz, scope, rl, node, .none, params[0], .frame_size),
@@ -7568,6 +7654,31 @@ fn builtinCall(
.truncate => return typeCast(gz, scope, rl, node, params[0], params[1], .truncate),
// zig fmt: on
+ .Type => {
+ const operand = try expr(gz, scope, .{ .coerced_ty = .type_info_type }, params[0]);
+
+ const gpa = gz.astgen.gpa;
+
+ try gz.instructions.ensureUnusedCapacity(gpa, 1);
+ try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1);
+
+ const payload_index = try gz.astgen.addExtra(Zir.Inst.UnNode{
+ .node = gz.nodeIndexToRelative(node),
+ .operand = operand,
+ });
+ const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+ gz.astgen.instructions.appendAssumeCapacity(.{
+ .tag = .extended,
+ .data = .{ .extended = .{
+ .opcode = .reify,
+ .small = @enumToInt(gz.anon_name_strategy),
+ .operand = payload_index,
+ } },
+ });
+ gz.instructions.appendAssumeCapacity(new_index);
+ const result = indexToRef(new_index);
+ return rvalue(gz, rl, result, node);
+ },
.panic => {
try emitDbgNode(gz, node);
return simpleUnOp(gz, scope, rl, node, .{ .ty = .const_slice_u8_type }, params[0], if (gz.force_comptime) .panic_comptime else .panic);
@@ -8152,6 +8263,33 @@ fn callExpr(
assert(callee != .none);
assert(node != 0);
+ const call_index = @intCast(Zir.Inst.Index, astgen.instructions.len);
+ const call_inst = Zir.indexToRef(call_index);
+ try gz.astgen.instructions.append(astgen.gpa, undefined);
+ try gz.instructions.append(astgen.gpa, call_index);
+
+ const scratch_top = astgen.scratch.items.len;
+ defer astgen.scratch.items.len = scratch_top;
+
+ var scratch_index = scratch_top;
+ try astgen.scratch.resize(astgen.gpa, scratch_top + call.ast.params.len);
+
+ for (call.ast.params) |param_node| {
+ var arg_block = gz.makeSubBlock(scope);
+ defer arg_block.unstack();
+
+ // `call_inst` is reused to provide the param type.
+ const arg_ref = try expr(&arg_block, &arg_block.base, .{ .coerced_ty = call_inst }, param_node);
+ _ = try arg_block.addBreak(.break_inline, call_index, arg_ref);
+
+ const body = arg_block.instructionsSlice();
+ try astgen.scratch.ensureUnusedCapacity(astgen.gpa, countBodyLenAfterFixups(astgen, body));
+ appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body);
+
+ astgen.scratch.items[scratch_index] = @intCast(u32, astgen.scratch.items.len - scratch_top);
+ scratch_index += 1;
+ }
+
const payload_index = try addExtra(astgen, Zir.Inst.Call{
.callee = callee,
.flags = .{
@@ -8159,22 +8297,16 @@ fn callExpr(
.args_len = @intCast(Zir.Inst.Call.Flags.PackedArgsLen, call.ast.params.len),
},
});
- var extra_index = try reserveExtra(astgen, call.ast.params.len);
-
- for (call.ast.params) |param_node, i| {
- const param_type = try gz.add(.{
- .tag = .param_type,
- .data = .{ .param_type = .{
- .callee = callee,
- .param_index = @intCast(u32, i),
- } },
- });
- const arg_ref = try expr(gz, scope, .{ .coerced_ty = param_type }, param_node);
- astgen.extra.items[extra_index] = @enumToInt(arg_ref);
- extra_index += 1;
+ if (call.ast.params.len != 0) {
+ try astgen.extra.appendSlice(astgen.gpa, astgen.scratch.items[scratch_top..]);
}
-
- const call_inst = try gz.addPlNodePayloadIndex(.call, node, payload_index);
+ gz.astgen.instructions.set(call_index, .{
+ .tag = .call,
+ .data = .{ .pl_node = .{
+ .src_node = gz.nodeIndexToRelative(node),
+ .payload_index = payload_index,
+ } },
+ });
return rvalue(gz, rl, call_inst, node); // TODO function call with result location
}
@@ -11158,6 +11290,8 @@ const GenZir = struct {
src_node: Ast.Node.Index,
fields_len: u32,
decls_len: u32,
+ backing_int_ref: Zir.Inst.Ref,
+ backing_int_body_len: u32,
layout: std.builtin.Type.ContainerLayout,
known_non_opv: bool,
known_comptime_only: bool,
@@ -11165,7 +11299,7 @@ const GenZir = struct {
const astgen = gz.astgen;
const gpa = astgen.gpa;
- try astgen.extra.ensureUnusedCapacity(gpa, 4);
+ try astgen.extra.ensureUnusedCapacity(gpa, 6);
const payload_index = @intCast(u32, astgen.extra.items.len);
if (args.src_node != 0) {
@@ -11178,6 +11312,12 @@ const GenZir = struct {
if (args.decls_len != 0) {
astgen.extra.appendAssumeCapacity(args.decls_len);
}
+ if (args.backing_int_ref != .none) {
+ astgen.extra.appendAssumeCapacity(args.backing_int_body_len);
+ if (args.backing_int_body_len == 0) {
+ astgen.extra.appendAssumeCapacity(@enumToInt(args.backing_int_ref));
+ }
+ }
astgen.instructions.set(inst, .{
.tag = .extended,
.data = .{ .extended = .{
@@ -11186,6 +11326,7 @@ const GenZir = struct {
.has_src_node = args.src_node != 0,
.has_fields_len = args.fields_len != 0,
.has_decls_len = args.decls_len != 0,
+ .has_backing_int = args.backing_int_ref != .none,
.known_non_opv = args.known_non_opv,
.known_comptime_only = args.known_comptime_only,
.name_strategy = gz.anon_name_strategy,
@@ -11667,3 +11808,14 @@ fn countBodyLenAfterFixups(astgen: *AstGen, body: []const Zir.Inst.Index) u32 {
}
return @intCast(u32, count);
}
+
+fn emitDbgStmt(gz: *GenZir, line: u32, column: u32) !void {
+ if (gz.force_comptime) return;
+
+ _ = try gz.add(.{ .tag = .dbg_stmt, .data = .{
+ .dbg_stmt = .{
+ .line = line,
+ .column = column,
+ },
+ } });
+}
diff --git a/src/Autodoc.zig b/src/Autodoc.zig
index 5e0848b60f..d90ebc3de8 100644
--- a/src/Autodoc.zig
+++ b/src/Autodoc.zig
@@ -1220,7 +1220,6 @@ fn walkInstruction(
.trunc,
.round,
.tag_name,
- .reify,
.type_name,
.frame_type,
.frame_size,
@@ -2079,14 +2078,19 @@ fn walkInstruction(
const args_len = extra.data.flags.args_len;
var args = try self.arena.alloc(DocData.Expr, args_len);
- const arg_refs = file.zir.refSlice(extra.end, args_len);
- for (arg_refs) |ref, idx| {
+ const body = file.zir.extra[extra.end..];
+
+ var i: usize = 0;
+ while (i < args_len) : (i += 1) {
+ const arg_end = file.zir.extra[extra.end + i];
+ const break_index = body[arg_end - 1];
+ const ref = data[break_index].@"break".operand;
// TODO: consider toggling need_type to true if we ever want
// to show discrepancies between the types of provided
// arguments and the types declared in the function
// signature for its parameters.
const wr = try self.walkRef(file, parent_scope, ref, false);
- args[idx] = wr.expr;
+ args[i] = wr.expr;
}
const cte_slot_index = self.comptime_exprs.items.len;
@@ -2532,6 +2536,17 @@ fn walkInstruction(
break :blk decls_len;
} else 0;
+ // TODO: Expose explicit backing integer types in some way.
+ if (small.has_backing_int) {
+ const backing_int_body_len = file.zir.extra[extra_index];
+ extra_index += 1; // backing_int_body_len
+ if (backing_int_body_len == 0) {
+ extra_index += 1; // backing_int_ref
+ } else {
+ extra_index += backing_int_body_len; // backing_int_body_inst
+ }
+ }
+
var decl_indexes: std.ArrayListUnmanaged(usize) = .{};
var priv_decl_indexes: std.ArrayListUnmanaged(usize) = .{};
@@ -2605,6 +2620,7 @@ fn walkInstruction(
},
.error_to_int,
.int_to_error,
+ .reify,
=> {
const extra = file.zir.extraData(Zir.Inst.UnNode, extended.operand).data;
const bin_index = self.exprs.items.len;
diff --git a/src/Compilation.zig b/src/Compilation.zig
index d3816e542c..af39154a3f 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -1041,6 +1041,10 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
}
}
+ // If LLVM does not support the target, then we can't use it.
+ if (!target_util.hasLlvmSupport(options.target, ofmt))
+ break :blk false;
+
break :blk build_options.is_stage1;
};
diff --git a/src/Liveness.zig b/src/Liveness.zig
index 748016d584..5a4bd2265e 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -267,6 +267,7 @@ pub fn categorizeOperand(
.byte_swap,
.bit_reverse,
.splat,
+ .error_set_has_value,
=> {
const o = air_datas[inst].ty_op;
if (o.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
@@ -842,6 +843,7 @@ fn analyzeInst(
.byte_swap,
.bit_reverse,
.splat,
+ .error_set_has_value,
=> {
const o = inst_datas[inst].ty_op;
return trackOperands(a, new_set, inst, main_tomb, .{ o.operand, .none, .none });
diff --git a/src/Module.zig b/src/Module.zig
index ab394af0ad..995fdda7ea 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -84,7 +84,6 @@ string_literal_bytes: std.ArrayListUnmanaged(u8) = .{},
/// The set of all the generic function instantiations. This is used so that when a generic
/// function is called twice with the same comptime parameter arguments, both calls dispatch
/// to the same function.
-/// TODO: remove functions from this set when they are destroyed.
monomorphed_funcs: MonomorphedFuncsSet = .{},
/// The set of all comptime function calls that have been cached so that future calls
/// with the same parameters will get the same return value.
@@ -92,7 +91,6 @@ memoized_calls: MemoizedCallSet = .{},
/// Contains the values from `@setAlignStack`. A sparse table is used here
/// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while
/// functions are many.
-/// TODO: remove functions from this set when they are destroyed.
align_stack_fns: std.AutoHashMapUnmanaged(*const Fn, SetAlignStack) = .{},
/// We optimize memory usage for a compilation with no compile errors by storing the
@@ -560,6 +558,8 @@ pub const Decl = struct {
gpa.destroy(extern_fn);
}
if (decl.getFunction()) |func| {
+ _ = mod.align_stack_fns.remove(func);
+ _ = mod.monomorphed_funcs.remove(func);
func.deinit(gpa);
gpa.destroy(func);
}
@@ -853,8 +853,6 @@ pub const EmitH = struct {
pub const ErrorSet = struct {
/// The Decl that corresponds to the error set itself.
owner_decl: Decl.Index,
- /// Offset from Decl node index, points to the error set AST node.
- node_offset: i32,
/// The string bytes are stored in the owner Decl arena.
/// These must be in sorted order. See sortNames.
names: NameMap,
@@ -866,7 +864,7 @@ pub const ErrorSet = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(self.node_offset),
+ .lazy = LazySrcLoc.nodeOffset(0),
};
}
@@ -893,12 +891,15 @@ pub const Struct = struct {
namespace: Namespace,
/// The Decl that corresponds to the struct itself.
owner_decl: Decl.Index,
- /// Offset from `owner_decl`, points to the struct AST node.
- node_offset: i32,
/// Index of the struct_decl ZIR instruction.
zir_index: Zir.Inst.Index,
layout: std.builtin.Type.ContainerLayout,
+ /// If the layout is not packed, this is the noreturn type.
+ /// If the layout is packed, this is the backing integer type of the packed struct.
+ /// Whether zig chooses this type or the user specifies it, it is stored here.
+ /// This will be set to the noreturn type until status is `have_layout`.
+ backing_int_ty: Type = Type.initTag(.noreturn),
status: enum {
none,
field_types_wip,
@@ -953,7 +954,7 @@ pub const Struct = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(s.node_offset),
+ .lazy = LazySrcLoc.nodeOffset(0),
};
}
@@ -968,7 +969,7 @@ pub const Struct = struct {
});
return s.srcLoc(mod);
};
- const node = owner_decl.relativeToNodeIndex(s.node_offset);
+ const node = owner_decl.relativeToNodeIndex(0);
const node_tags = tree.nodes.items(.tag);
switch (node_tags[node]) {
.container_decl,
@@ -1029,7 +1030,7 @@ pub const Struct = struct {
pub fn packedFieldBitOffset(s: Struct, target: Target, index: usize) u16 {
assert(s.layout == .Packed);
- assert(s.haveFieldTypes());
+ assert(s.haveLayout());
var bit_sum: u64 = 0;
for (s.fields.values()) |field, i| {
if (i == index) {
@@ -1037,19 +1038,7 @@ pub const Struct = struct {
}
bit_sum += field.ty.bitSize(target);
}
- return @intCast(u16, bit_sum);
- }
-
- pub fn packedIntegerBits(s: Struct, target: Target) u16 {
- return s.packedFieldBitOffset(target, s.fields.count());
- }
-
- pub fn packedIntegerType(s: Struct, target: Target, buf: *Type.Payload.Bits) Type {
- buf.* = .{
- .base = .{ .tag = .int_unsigned },
- .data = s.packedIntegerBits(target),
- };
- return Type.initPayload(&buf.base);
+ unreachable; // index out of bounds
}
};
@@ -1060,8 +1049,6 @@ pub const Struct = struct {
pub const EnumSimple = struct {
/// The Decl that corresponds to the enum itself.
owner_decl: Decl.Index,
- /// Offset from `owner_decl`, points to the enum decl AST node.
- node_offset: i32,
/// Set of field names in declaration order.
fields: NameMap,
@@ -1072,7 +1059,7 @@ pub const EnumSimple = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(self.node_offset),
+ .lazy = LazySrcLoc.nodeOffset(0),
};
}
};
@@ -1083,8 +1070,6 @@ pub const EnumSimple = struct {
pub const EnumNumbered = struct {
/// The Decl that corresponds to the enum itself.
owner_decl: Decl.Index,
- /// Offset from `owner_decl`, points to the enum decl AST node.
- node_offset: i32,
/// An integer type which is used for the numerical value of the enum.
/// Whether zig chooses this type or the user specifies it, it is stored here.
tag_ty: Type,
@@ -1103,7 +1088,7 @@ pub const EnumNumbered = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(self.node_offset),
+ .lazy = LazySrcLoc.nodeOffset(0),
};
}
};
@@ -1113,8 +1098,6 @@ pub const EnumNumbered = struct {
pub const EnumFull = struct {
/// The Decl that corresponds to the enum itself.
owner_decl: Decl.Index,
- /// Offset from `owner_decl`, points to the enum decl AST node.
- node_offset: i32,
/// An integer type which is used for the numerical value of the enum.
/// Whether zig chooses this type or the user specifies it, it is stored here.
tag_ty: Type,
@@ -1137,7 +1120,7 @@ pub const EnumFull = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(self.node_offset),
+ .lazy = LazySrcLoc.nodeOffset(0),
};
}
};
@@ -1155,8 +1138,6 @@ pub const Union = struct {
namespace: Namespace,
/// The Decl that corresponds to the union itself.
owner_decl: Decl.Index,
- /// Offset from `owner_decl`, points to the union decl AST node.
- node_offset: i32,
/// Index of the union_decl ZIR instruction.
zir_index: Zir.Inst.Index,
@@ -1203,7 +1184,7 @@ pub const Union = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(self.node_offset),
+ .lazy = LazySrcLoc.nodeOffset(0),
};
}
@@ -1218,7 +1199,7 @@ pub const Union = struct {
});
return u.srcLoc(mod);
};
- const node = owner_decl.relativeToNodeIndex(u.node_offset);
+ const node = owner_decl.relativeToNodeIndex(0);
const node_tags = tree.nodes.items(.tag);
var buf: [2]Ast.Node.Index = undefined;
switch (node_tags[node]) {
@@ -1410,8 +1391,6 @@ pub const Union = struct {
pub const Opaque = struct {
/// The Decl that corresponds to the opaque itself.
owner_decl: Decl.Index,
- /// Offset from `owner_decl`, points to the opaque decl AST node.
- node_offset: i32,
/// Represents the declarations inside this opaque.
namespace: Namespace,
@@ -1420,7 +1399,7 @@ pub const Opaque = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(self.node_offset),
+ .lazy = LazySrcLoc.nodeOffset(0),
};
}
@@ -4115,6 +4094,12 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
// The exports this Decl performs will be re-discovered, so we remove them here
// prior to re-analysis.
mod.deleteDeclExports(decl_index);
+
+ // Similarly, `@setAlignStack` invocations will be re-discovered.
+ if (decl.getFunction()) |func| {
+ _ = mod.align_stack_fns.remove(func);
+ }
+
// Dependencies will be re-discovered, so we remove them here prior to re-analysis.
for (decl.dependencies.keys()) |dep_index| {
const dep = mod.declPtr(dep_index);
@@ -4337,7 +4322,6 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
struct_obj.* = .{
.owner_decl = undefined, // set below
.fields = .{},
- .node_offset = 0, // it's the struct for the root file
.zir_index = undefined, // set below
.layout = .Auto,
.status = .none,
diff --git a/src/Sema.zig b/src/Sema.zig
index 2f3c931e16..879ecb4e2f 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -78,6 +78,7 @@ post_hoc_blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *LabeledBlock) = .{},
err: ?*Module.ErrorMsg = null,
const std = @import("std");
+const math = std.math;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
@@ -772,7 +773,6 @@ fn analyzeBodyInner(
.optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false),
.optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false),
.optional_type => try sema.zirOptionalType(block, inst),
- .param_type => try sema.zirParamType(block, inst),
.ptr_type => try sema.zirPtrType(block, inst),
.overflow_arithmetic_ptr => try sema.zirOverflowArithmeticPtr(block, inst),
.ref => try sema.zirRef(block, inst),
@@ -816,7 +816,6 @@ fn analyzeBodyInner(
.embed_file => try sema.zirEmbedFile(block, inst),
.error_name => try sema.zirErrorName(block, inst),
.tag_name => try sema.zirTagName(block, inst),
- .reify => try sema.zirReify(block, inst),
.type_name => try sema.zirTypeName(block, inst),
.frame_type => try sema.zirFrameType(block, inst),
.frame_size => try sema.zirFrameSize(block, inst),
@@ -951,6 +950,7 @@ fn analyzeBodyInner(
.select => try sema.zirSelect( block, extended),
.error_to_int => try sema.zirErrorToInt( block, extended),
.int_to_error => try sema.zirIntToError( block, extended),
+ .reify => try sema.zirReify( block, extended, inst),
// zig fmt: on
.fence => {
try sema.zirFence(block, extended);
@@ -1818,10 +1818,10 @@ fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazyS
const tree = try sema.getAstTree(block);
const decl = sema.mod.declPtr(decl_index);
- const field_src = enumFieldSrcLoc(decl, tree.*, container_ty.getNodeOffset(), field_index);
+ const field_src = enumFieldSrcLoc(decl, tree.*, 0, field_index);
const default_value_src: LazySrcLoc = .{ .node_offset_field_default = field_src.node_offset.x };
- try sema.errNote(block, default_value_src, msg, "default value set here", .{});
+ try sema.mod.errNoteNonLazy(default_value_src.toSrcLoc(decl), msg, "default value set here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
@@ -1866,7 +1866,7 @@ fn addFieldErrNote(
const decl_index = container_ty.getOwnerDecl();
const decl = mod.declPtr(decl_index);
const tree = try sema.getAstTree(block);
- const field_src = enumFieldSrcLoc(decl, tree.*, container_ty.getNodeOffset(), field_index);
+ const field_src = enumFieldSrcLoc(decl, tree.*, 0, field_index);
try mod.errNoteNonLazy(field_src.toSrcLoc(decl), parent, format, args);
}
@@ -1906,8 +1906,6 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError {
}
const mod = sema.mod;
- sema.err = err_msg;
-
{
errdefer err_msg.destroy(mod.gpa);
if (err_msg.src_loc.lazy == .unneeded) {
@@ -1925,8 +1923,10 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError {
const gop = mod.failed_decls.getOrPutAssumeCapacity(sema.owner_decl_index);
if (gop.found_existing) {
// If there are multiple errors for the same Decl, prefer the first one added.
+ sema.err = null;
err_msg.destroy(mod.gpa);
} else {
+ sema.err = err_msg;
gop.value_ptr.* = err_msg;
}
return error.AnalysisFail;
@@ -2239,6 +2239,16 @@ pub fn analyzeStructDecl(
break :blk decls_len;
} else 0;
+ if (small.has_backing_int) {
+ const backing_int_body_len = sema.code.extra[extra_index];
+ extra_index += 1; // backing_int_body_len
+ if (backing_int_body_len == 0) {
+ extra_index += 1; // backing_int_ref
+ } else {
+ extra_index += backing_int_body_len; // backing_int_body_inst
+ }
+ }
+
_ = try sema.mod.scanNamespace(&struct_obj.namespace, extra_index, decls_len, new_decl);
}
@@ -2262,7 +2272,7 @@ fn zirStructDecl(
const struct_obj = try new_decl_arena_allocator.create(Module.Struct);
const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj);
const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty);
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = struct_val,
}, small.name_strategy, "struct", inst);
@@ -2272,7 +2282,6 @@ fn zirStructDecl(
struct_obj.* = .{
.owner_decl = new_decl_index,
.fields = .{},
- .node_offset = src.node_offset.x,
.zir_index = inst,
.layout = small.layout,
.status = .none,
@@ -2294,6 +2303,7 @@ fn zirStructDecl(
fn createAnonymousDeclTypeNamed(
sema: *Sema,
block: *Block,
+ src: LazySrcLoc,
typed_value: TypedValue,
name_strategy: Zir.Inst.NameStrategy,
anon_prefix: []const u8,
@@ -2303,7 +2313,8 @@ fn createAnonymousDeclTypeNamed(
const namespace = block.namespace;
const src_scope = block.wip_capture_scope;
const src_decl = mod.declPtr(block.src_decl);
- const new_decl_index = try mod.allocateNewDecl(namespace, src_decl.src_node, src_scope);
+ const src_node = src_decl.relativeToNodeIndex(src.node_offset.x);
+ const new_decl_index = try mod.allocateNewDecl(namespace, src_node, src_scope);
errdefer mod.destroyDecl(new_decl_index);
switch (name_strategy) {
@@ -2378,7 +2389,7 @@ fn createAnonymousDeclTypeNamed(
},
else => {},
};
- return sema.createAnonymousDeclTypeNamed(block, typed_value, .anon, anon_prefix, null);
+ return sema.createAnonymousDeclTypeNamed(block, src, typed_value, .anon, anon_prefix, null);
},
}
}
@@ -2442,7 +2453,7 @@ fn zirEnumDecl(
};
const enum_ty = Type.initPayload(&enum_ty_payload.base);
const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty);
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = enum_val,
}, small.name_strategy, "enum", inst);
@@ -2456,7 +2467,6 @@ fn zirEnumDecl(
.tag_ty_inferred = true,
.fields = .{},
.values = .{},
- .node_offset = src.node_offset.x,
.namespace = .{
.parent = block.namespace,
.ty = enum_ty,
@@ -2684,7 +2694,7 @@ fn zirUnionDecl(
const union_ty = Type.initPayload(&union_payload.base);
const union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty);
const mod = sema.mod;
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = union_val,
}, small.name_strategy, "union", inst);
@@ -2695,7 +2705,6 @@ fn zirUnionDecl(
.owner_decl = new_decl_index,
.tag_ty = Type.initTag(.@"null"),
.fields = .{},
- .node_offset = src.node_offset.x,
.zir_index = inst,
.layout = small.layout,
.status = .none,
@@ -2753,7 +2762,7 @@ fn zirOpaqueDecl(
};
const opaque_ty = Type.initPayload(&opaque_ty_payload.base);
const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty);
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = opaque_val,
}, small.name_strategy, "opaque", inst);
@@ -2763,7 +2772,6 @@ fn zirOpaqueDecl(
opaque_obj.* = .{
.owner_decl = new_decl_index,
- .node_offset = src.node_offset.x,
.namespace = .{
.parent = block.namespace,
.ty = opaque_ty,
@@ -2802,7 +2810,7 @@ fn zirErrorSetDecl(
const error_set_ty = try Type.Tag.error_set.create(new_decl_arena_allocator, error_set);
const error_set_val = try Value.Tag.ty.create(new_decl_arena_allocator, error_set_ty);
const mod = sema.mod;
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = error_set_val,
}, name_strategy, "error", inst);
@@ -2827,7 +2835,6 @@ fn zirErrorSetDecl(
error_set.* = .{
.owner_decl = new_decl_index,
- .node_offset = inst_data.src_node,
.names = names,
};
try new_decl.finalizeNewArena(&new_decl_arena);
@@ -4444,43 +4451,6 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v
return sema.storePtr2(block, src, ptr, src, operand, src, if (is_ret) .ret_ptr else .store);
}
-fn zirParamType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const callee_src = sema.src;
-
- const inst_data = sema.code.instructions.items(.data)[inst].param_type;
- const callee = try sema.resolveInst(inst_data.callee);
- const callee_ty = sema.typeOf(callee);
- var param_index = inst_data.param_index;
-
- const fn_ty = if (callee_ty.tag() == .bound_fn) fn_ty: {
- const bound_fn_val = try sema.resolveConstValue(block, .unneeded, callee, undefined);
- const bound_fn = bound_fn_val.castTag(.bound_fn).?.data;
- const fn_ty = sema.typeOf(bound_fn.func_inst);
- param_index += 1;
- break :fn_ty fn_ty;
- } else callee_ty;
-
- const fn_info = if (fn_ty.zigTypeTag() == .Pointer)
- fn_ty.childType().fnInfo()
- else
- fn_ty.fnInfo();
-
- if (param_index >= fn_info.param_types.len) {
- if (fn_info.is_var_args) {
- return sema.addType(Type.initTag(.var_args_param));
- }
- // TODO implement begin_call/end_call Zir instructions and check
- // argument count before casting arguments to parameter types.
- return sema.fail(block, callee_src, "wrong number of arguments", .{});
- }
-
- if (fn_info.param_types[param_index].tag() == .generic_poison) {
- return sema.addType(Type.initTag(.var_args_param));
- }
-
- return sema.addType(fn_info.param_types[param_index]);
-}
-
fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -5450,6 +5420,19 @@ fn lookupInNamespace(
return null;
}
+fn funcDeclSrc(sema: *Sema, block: *Block, src: LazySrcLoc, func_inst: Air.Inst.Ref) !?Module.SrcLoc {
+ const func_val = (try sema.resolveMaybeUndefVal(block, src, func_inst)) orelse return null;
+ if (func_val.isUndef()) return null;
+ const owner_decl_index = switch (func_val.tag()) {
+ .extern_fn => func_val.castTag(.extern_fn).?.data.owner_decl,
+ .function => func_val.castTag(.function).?.data.owner_decl,
+ .decl_ref => sema.mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data.owner_decl,
+ else => return null,
+ };
+ const owner_decl = sema.mod.declPtr(owner_decl_index);
+ return owner_decl.srcLoc();
+}
+
fn zirCall(
sema: *Sema,
block: *Block,
@@ -5462,13 +5445,14 @@ fn zirCall(
const func_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node };
const call_src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.Call, inst_data.payload_index);
- const args = sema.code.refSlice(extra.end, extra.data.flags.args_len);
+ const args_len = extra.data.flags.args_len;
const modifier = @intToEnum(std.builtin.CallOptions.Modifier, extra.data.flags.packed_modifier);
const ensure_result_used = extra.data.flags.ensure_result_used;
var func = try sema.resolveInst(extra.data.callee);
var resolved_args: []Air.Inst.Ref = undefined;
+ var arg_index: u32 = 0;
const func_type = sema.typeOf(func);
@@ -5479,16 +5463,93 @@ fn zirCall(
const bound_func = try sema.resolveValue(block, .unneeded, func, undefined);
const bound_data = &bound_func.cast(Value.Payload.BoundFn).?.data;
func = bound_data.func_inst;
- resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len + 1);
- resolved_args[0] = bound_data.arg0_inst;
- for (args) |zir_arg, i| {
- resolved_args[i + 1] = try sema.resolveInst(zir_arg);
- }
+ resolved_args = try sema.arena.alloc(Air.Inst.Ref, args_len + 1);
+ resolved_args[arg_index] = bound_data.arg0_inst;
+ arg_index += 1;
} else {
- resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len);
- for (args) |zir_arg, i| {
- resolved_args[i] = try sema.resolveInst(zir_arg);
+ resolved_args = try sema.arena.alloc(Air.Inst.Ref, args_len);
+ }
+ const total_args = args_len + @boolToInt(bound_arg_src != null);
+
+ const callee_ty = sema.typeOf(func);
+ const func_ty = func_ty: {
+ switch (callee_ty.zigTypeTag()) {
+ .Fn => break :func_ty callee_ty,
+ .Pointer => {
+ const ptr_info = callee_ty.ptrInfo().data;
+ if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) {
+ break :func_ty ptr_info.pointee_type;
+ }
+ },
+ else => {},
+ }
+ return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(sema.mod)});
+ };
+ const func_ty_info = func_ty.fnInfo();
+
+ const fn_params_len = func_ty_info.param_types.len;
+ check_args: {
+ if (func_ty_info.is_var_args) {
+ assert(func_ty_info.cc == .C);
+ if (total_args >= fn_params_len) break :check_args;
+ } else if (fn_params_len == total_args) {
+ break :check_args;
}
+
+ const decl_src = try sema.funcDeclSrc(block, func_src, func);
+ const member_str = if (bound_arg_src != null) "member function " else "";
+ const variadic_str = if (func_ty_info.is_var_args) "at least " else "";
+ const msg = msg: {
+ const msg = try sema.errMsg(
+ block,
+ func_src,
+ "{s}expected {s}{d} argument(s), found {d}",
+ .{
+ member_str,
+ variadic_str,
+ fn_params_len - @boolToInt(bound_arg_src != null),
+ args_len,
+ },
+ );
+ errdefer msg.destroy(sema.gpa);
+
+ if (decl_src) |some| try sema.mod.errNoteNonLazy(some, msg, "function declared here", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+
+ const args_body = sema.code.extra[extra.end..];
+
+ const parent_comptime = block.is_comptime;
+ // `extra_index` and `arg_index` are separate since the bound function is passed as the first argument.
+ var extra_index: usize = 0;
+ var arg_start: u32 = args_len;
+ while (extra_index < args_len) : ({
+ extra_index += 1;
+ arg_index += 1;
+ }) {
+ const arg_end = sema.code.extra[extra.end + extra_index];
+ defer arg_start = arg_end;
+
+ const param_ty = if (arg_index >= fn_params_len or
+ func_ty_info.param_types[arg_index].tag() == .generic_poison)
+ Type.initTag(.var_args_param)
+ else
+ func_ty_info.param_types[arg_index];
+
+ const old_comptime = block.is_comptime;
+ defer block.is_comptime = old_comptime;
+ // Generate args to comptime params in comptime block.
+ block.is_comptime = parent_comptime;
+ if (arg_index < fn_params_len and func_ty_info.comptime_params[arg_index]) {
+ block.is_comptime = true;
+ }
+
+ const param_ty_inst = try sema.addType(param_ty);
+ try sema.inst_map.put(sema.gpa, inst, param_ty_inst);
+
+ resolved_args[arg_index] = try sema.resolveBody(block, args_body[arg_start..arg_end], inst);
}
return sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args, bound_arg_src);
@@ -5582,13 +5643,20 @@ fn analyzeCall(
const func_ty_info = func_ty.fnInfo();
const cc = func_ty_info.cc;
if (cc == .Naked) {
- // TODO add error note: declared here
- return sema.fail(
- block,
- func_src,
- "unable to call function with naked calling convention",
- .{},
- );
+ const decl_src = try sema.funcDeclSrc(block, func_src, func);
+ const msg = msg: {
+ const msg = try sema.errMsg(
+ block,
+ func_src,
+ "unable to call function with naked calling convention",
+ .{},
+ );
+ errdefer msg.destroy(sema.gpa);
+
+ if (decl_src) |some| try sema.mod.errNoteNonLazy(some, msg, "function declared here", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
}
const fn_params_len = func_ty_info.param_types.len;
if (func_ty_info.is_var_args) {
@@ -5967,7 +6035,18 @@ fn analyzeCall(
else => |e| return e,
};
} else {
- args[i] = uncasted_arg;
+ args[i] = sema.coerceVarArgParam(block, uncasted_arg, .unneeded) catch |err| switch (err) {
+ error.NeededSourceLocation => {
+ const decl = sema.mod.declPtr(block.src_decl);
+ _ = try sema.coerceVarArgParam(
+ block,
+ uncasted_arg,
+ Module.argSrc(call_src.node_offset.x, sema.gpa, decl, i, bound_arg_src),
+ );
+ return error.AnalysisFail;
+ },
+ else => |e| return e,
+ };
}
}
@@ -6272,6 +6351,7 @@ fn instantiateGenericCall(
new_decl.is_exported = fn_owner_decl.is_exported;
new_decl.has_align = fn_owner_decl.has_align;
new_decl.has_linksection_or_addrspace = fn_owner_decl.has_linksection_or_addrspace;
+ new_decl.@"linksection" = fn_owner_decl.@"linksection";
new_decl.@"addrspace" = fn_owner_decl.@"addrspace";
new_decl.zir_decl_index = fn_owner_decl.zir_decl_index;
new_decl.alive = true; // This Decl is called at runtime.
@@ -6724,11 +6804,10 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const uncasted_operand = try sema.resolveInst(extra.operand);
const operand = try sema.coerce(block, Type.anyerror, uncasted_operand, operand_src);
- const result_ty = Type.u16;
if (try sema.resolveMaybeUndefVal(block, src, operand)) |val| {
if (val.isUndef()) {
- return sema.addConstUndef(result_ty);
+ return sema.addConstUndef(Type.err_int);
}
switch (val.tag()) {
.@"error" => {
@@ -6737,14 +6816,14 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
.base = .{ .tag = .int_u64 },
.data = (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value,
};
- return sema.addConstant(result_ty, Value.initPayload(&payload.base));
+ return sema.addConstant(Type.err_int, Value.initPayload(&payload.base));
},
// This is not a valid combination with the type `anyerror`.
.the_only_possible_value => unreachable,
// Assume it's already encoded as an integer.
- else => return sema.addConstant(result_ty, val),
+ else => return sema.addConstant(Type.err_int, val),
}
}
@@ -6753,14 +6832,14 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
if (!op_ty.isAnyError()) {
const names = op_ty.errorSetNames();
switch (names.len) {
- 0 => return sema.addConstant(result_ty, Value.zero),
- 1 => return sema.addIntUnsigned(result_ty, sema.mod.global_error_set.get(names[0]).?),
+ 0 => return sema.addConstant(Type.err_int, Value.zero),
+ 1 => return sema.addIntUnsigned(Type.err_int, sema.mod.global_error_set.get(names[0]).?),
else => {},
}
}
try sema.requireRuntimeBlock(block, src, operand_src);
- return block.addBitCast(result_ty, operand);
+ return block.addBitCast(Type.err_int, operand);
}
fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
@@ -6771,7 +6850,7 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
const src = LazySrcLoc.nodeOffset(extra.node);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const uncasted_operand = try sema.resolveInst(extra.operand);
- const operand = try sema.coerce(block, Type.u16, uncasted_operand, operand_src);
+ const operand = try sema.coerce(block, Type.err_int, uncasted_operand, operand_src);
const target = sema.mod.getTarget();
if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| {
@@ -6788,7 +6867,10 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
try sema.requireRuntimeBlock(block, src, operand_src);
if (block.wantSafety()) {
const is_lt_len = try block.addUnOp(.cmp_lt_errors_len, operand);
- try sema.addSafetyCheck(block, is_lt_len, .invalid_error_code);
+ const zero_val = try sema.addConstant(Type.err_int, Value.zero);
+ const is_non_zero = try block.addBinOp(.cmp_neq, operand, zero_val);
+ const ok = try block.addBinOp(.bit_and, is_lt_len, is_non_zero);
+ try sema.addSafetyCheck(block, ok, .invalid_error_code);
}
return block.addInst(.{
.tag = .bitcast,
@@ -10201,16 +10283,14 @@ fn zirShl(
const val = switch (air_tag) {
.shl_exact => val: {
- const shifted = try lhs_val.shl(rhs_val, lhs_ty, sema.arena, target);
+ const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, target);
if (scalar_ty.zigTypeTag() == .ComptimeInt) {
- break :val shifted;
+ break :val shifted.wrapped_result;
}
- const int_info = scalar_ty.intInfo(target);
- const truncated = try shifted.intTrunc(lhs_ty, sema.arena, int_info.signedness, int_info.bits, target);
- if (try sema.compare(block, src, truncated, .eq, shifted, lhs_ty)) {
- break :val shifted;
+ if (shifted.overflowed.compareWithZero(.eq)) {
+ break :val shifted.wrapped_result;
}
- return sema.addConstUndef(lhs_ty);
+ return sema.fail(block, src, "operation caused overflow", .{});
},
.shl_sat => if (scalar_ty.zigTypeTag() == .ComptimeInt)
@@ -10363,7 +10443,7 @@ fn zirShr(
// Detect if any ones would be shifted out.
const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, target);
if (!(try truncated.compareWithZeroAdvanced(.eq, sema.kit(block, src)))) {
- return sema.addConstUndef(lhs_ty);
+ return sema.fail(block, src, "exact shift shifted out 1 bits", .{});
}
}
const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, target);
@@ -11183,7 +11263,12 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
try sema.addDivByZeroSafety(block, resolved_type, maybe_rhs_val, casted_rhs, is_int);
}
- const air_tag = if (is_int) Air.Inst.Tag.div_trunc else switch (block.float_mode) {
+ const air_tag = if (is_int) blk: {
+ if (lhs_ty.isSignedInt() or rhs_ty.isSignedInt()) {
+ return sema.fail(block, src, "division with '{s}' and '{s}': signed integers must use @divTrunc, @divFloor, or @divExact", .{ @tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()) });
+ }
+ break :blk Air.Inst.Tag.div_trunc;
+ } else switch (block.float_mode) {
.Optimized => Air.Inst.Tag.div_float_optimized,
.Strict => Air.Inst.Tag.div_float,
};
@@ -11263,13 +11348,19 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
- // TODO: emit compile error if there is a remainder
+ const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, target);
+ if (modulus_val.compareWithZero(.neq)) {
+ return sema.fail(block, src, "exact division produced remainder", .{});
+ }
return sema.addConstant(
resolved_type,
try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, target),
);
} else {
- // TODO: emit compile error if there is a remainder
+ const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, target);
+ if (modulus_val.compareWithZero(.neq)) {
+ return sema.fail(block, src, "exact division produced remainder", .{});
+ }
return sema.addConstant(
resolved_type,
try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, target),
@@ -11756,7 +11847,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return sema.failWithDivideByZero(block, rhs_src);
}
if (maybe_lhs_val) |lhs_val| {
- const rem_result = try lhs_val.intRem(rhs_val, resolved_type, sema.arena, target);
+ const rem_result = try sema.intRem(block, resolved_type, lhs_val, lhs_src, rhs_val, rhs_src);
// If this answer could possibly be different by doing `intMod`,
// we must emit a compile error. Otherwise, it's OK.
if ((try rhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) != (try lhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) and
@@ -11818,6 +11909,60 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
}
+fn intRem(
+ sema: *Sema,
+ block: *Block,
+ ty: Type,
+ lhs: Value,
+ lhs_src: LazySrcLoc,
+ rhs: Value,
+ rhs_src: LazySrcLoc,
+) CompileError!Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try sema.arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try sema.intRemScalar(block, lhs.indexVectorlike(i), lhs_src, rhs.indexVectorlike(i), rhs_src);
+ }
+ return Value.Tag.aggregate.create(sema.arena, result_data);
+ }
+ return sema.intRemScalar(block, lhs, lhs_src, rhs, rhs_src);
+}
+
+fn intRemScalar(
+ sema: *Sema,
+ block: *Block,
+ lhs: Value,
+ lhs_src: LazySrcLoc,
+ rhs: Value,
+ rhs_src: LazySrcLoc,
+) CompileError!Value {
+ const target = sema.mod.getTarget();
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var lhs_space: Value.BigIntSpace = undefined;
+ var rhs_space: Value.BigIntSpace = undefined;
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema.kit(block, lhs_src));
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema.kit(block, rhs_src));
+ const limbs_q = try sema.arena.alloc(
+ math.big.Limb,
+ lhs_bigint.limbs.len,
+ );
+ const limbs_r = try sema.arena.alloc(
+ math.big.Limb,
+ // TODO: consider reworking Sema to re-use Values rather than
+ // always producing new Value objects.
+ rhs_bigint.limbs.len,
+ );
+ const limbs_buffer = try sema.arena.alloc(
+ math.big.Limb,
+ math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
+ );
+ var result_q = math.big.int.Mutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
+ var result_r = math.big.int.Mutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
+ result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
+ return Value.fromBigInt(sema.arena, result_r.toConst());
+}
+
fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
@@ -11982,7 +12127,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
if (maybe_lhs_val) |lhs_val| {
return sema.addConstant(
resolved_type,
- try lhs_val.intRem(rhs_val, resolved_type, sema.arena, target),
+ try sema.intRem(block, resolved_type, lhs_val, lhs_src, rhs_val, rhs_src),
);
}
break :rs lhs_src;
@@ -14162,13 +14307,27 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespace());
- const field_values = try sema.arena.create([4]Value);
+ const backing_integer_val = blk: {
+ if (layout == .Packed) {
+ const struct_obj = struct_ty.castTag(.@"struct").?.data;
+ assert(struct_obj.haveLayout());
+ assert(struct_obj.backing_int_ty.isInt());
+ const backing_int_ty_val = try Value.Tag.ty.create(sema.arena, struct_obj.backing_int_ty);
+ break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val);
+ } else {
+ break :blk Value.initTag(.null_value);
+ }
+ };
+
+ const field_values = try sema.arena.create([5]Value);
field_values.* = .{
// layout: ContainerLayout,
try Value.Tag.enum_field_index.create(
sema.arena,
@enumToInt(layout),
),
+ // backing_integer: ?type,
+ backing_integer_val,
// fields: []const StructField,
fields_val,
// decls: []const Declaration,
@@ -14492,6 +14651,20 @@ fn zirBoolBr(
const rhs_result = try sema.resolveBody(rhs_block, body, inst);
_ = try rhs_block.addBr(block_inst, rhs_result);
+ return finishCondBr(sema, parent_block, &child_block, &then_block, &else_block, lhs, block_inst);
+}
+
+fn finishCondBr(
+ sema: *Sema,
+ parent_block: *Block,
+ child_block: *Block,
+ then_block: *Block,
+ else_block: *Block,
+ cond: Air.Inst.Ref,
+ block_inst: Air.Inst.Index,
+) !Air.Inst.Ref {
+ const gpa = sema.gpa;
+
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
then_block.instructions.items.len + else_block.instructions.items.len +
@typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len + 1);
@@ -14504,7 +14677,7 @@ fn zirBoolBr(
sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items);
_ = try child_block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{
- .operand = lhs,
+ .operand = cond,
.payload = cond_br_payload,
} } });
@@ -14874,10 +15047,83 @@ fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir
const operand = try sema.analyzeLoad(block, src, ret_ptr, src);
return sema.analyzeRet(block, operand, src);
}
+
+ if (sema.wantErrorReturnTracing()) {
+ const is_non_err = try sema.analyzePtrIsNonErr(block, src, ret_ptr);
+ return retWithErrTracing(sema, block, src, is_non_err, .ret_load, ret_ptr);
+ }
+
_ = try block.addUnOp(.ret_load, ret_ptr);
return always_noreturn;
}
+fn retWithErrTracing(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ is_non_err: Air.Inst.Ref,
+ ret_tag: Air.Inst.Tag,
+ operand: Air.Inst.Ref,
+) CompileError!Zir.Inst.Index {
+ const need_check = switch (is_non_err) {
+ .bool_true => {
+ _ = try block.addUnOp(ret_tag, operand);
+ return always_noreturn;
+ },
+ .bool_false => false,
+ else => true,
+ };
+ const gpa = sema.gpa;
+ const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace");
+ const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty);
+ const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty);
+ const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
+ const return_err_fn = try sema.getBuiltin(block, src, "returnError");
+ const args: [1]Air.Inst.Ref = .{err_return_trace};
+
+ if (!need_check) {
+ _ = try sema.analyzeCall(block, return_err_fn, src, src, .never_inline, false, &args, null);
+ _ = try block.addUnOp(ret_tag, operand);
+ return always_noreturn;
+ }
+
+ var then_block = block.makeSubBlock();
+ defer then_block.instructions.deinit(gpa);
+ _ = try then_block.addUnOp(ret_tag, operand);
+
+ var else_block = block.makeSubBlock();
+ defer else_block.instructions.deinit(gpa);
+ _ = try sema.analyzeCall(&else_block, return_err_fn, src, src, .never_inline, false, &args, null);
+ _ = try else_block.addUnOp(ret_tag, operand);
+
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
+ then_block.instructions.items.len + else_block.instructions.items.len +
+ @typeInfo(Air.Block).Struct.fields.len + 1);
+
+ const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{
+ .then_body_len = @intCast(u32, then_block.instructions.items.len),
+ .else_body_len = @intCast(u32, else_block.instructions.items.len),
+ });
+ sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items);
+ sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items);
+
+ _ = try block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{
+ .operand = is_non_err,
+ .payload = cond_br_payload,
+ } } });
+
+ return always_noreturn;
+}
+
+fn wantErrorReturnTracing(sema: *Sema) bool {
+ // TODO implement this feature in all the backends and then delete this check.
+ const backend_supports_error_return_tracing = sema.mod.comp.bin_file.options.use_llvm;
+
+ return sema.fn_ret_ty.isError() and
+ sema.mod.comp.bin_file.options.error_return_tracing and
+ backend_supports_error_return_tracing;
+}
+
fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void {
assert(sema.fn_ret_ty.zigTypeTag() == .ErrorUnion);
@@ -14923,27 +15169,15 @@ fn analyzeRet(
return always_noreturn;
}
- // TODO implement this feature in all the backends and then delete this check.
- const backend_supports_error_return_tracing =
- sema.mod.comp.bin_file.options.use_llvm;
+ try sema.resolveTypeLayout(block, src, sema.fn_ret_ty);
- if (sema.fn_ret_ty.isError() and
- sema.mod.comp.bin_file.options.error_return_tracing and
- backend_supports_error_return_tracing)
- ret_err: {
- if (try sema.resolveMaybeUndefVal(block, src, operand)) |ret_val| {
- if (ret_val.tag() != .@"error") break :ret_err;
- }
- const return_err_fn = try sema.getBuiltin(block, src, "returnError");
- const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace");
- const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty);
- const ptr_stack_trace_ty = try Type.Tag.optional_single_mut_pointer.create(sema.arena, stack_trace_ty);
- const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
- const args: [1]Air.Inst.Ref = .{err_return_trace};
- _ = try sema.analyzeCall(block, return_err_fn, src, src, .never_inline, false, &args, null);
+ if (sema.wantErrorReturnTracing()) {
+ // Avoid adding a frame to the error return trace in case the value is comptime-known
+ // to be not an error.
+ const is_non_err = try sema.analyzeIsNonErr(block, src, operand);
+ return retWithErrTracing(sema, block, src, is_non_err, .ret, operand);
}
- try sema.resolveTypeLayout(block, src, sema.fn_ret_ty);
_ = try block.addUnOp(.ret, operand);
return always_noreturn;
}
@@ -16023,13 +16257,14 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
return block.addUnOp(.tag_name, casted_operand);
}
-fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
- const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const src = inst_data.src();
+ const name_strategy = @intToEnum(Zir.Inst.NameStrategy, extended.small);
+ const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
+ const src = LazySrcLoc.nodeOffset(extra.node);
const type_info_ty = try sema.resolveBuiltinTypeFields(block, src, "Type");
- const uncasted_operand = try sema.resolveInst(inst_data.operand);
- const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const uncasted_operand = try sema.resolveInst(extra.operand);
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src);
const val = try sema.resolveConstValue(block, operand_src, type_info, "operand to @Type must be comptime known");
const union_val = val.cast(Value.Payload.Union).?.data;
@@ -16109,7 +16344,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
if (!try sema.intFitsInType(block, src, alignment_val, Type.u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
- const abi_align = @intCast(u29, alignment_val.toUnsignedInt(target));
+ const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(target, sema.kit(block, src))).?);
var buffer: Value.ToTypeBuffer = undefined;
const unresolved_elem_ty = child_val.toType(&buffer);
@@ -16274,22 +16509,31 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
const struct_val = union_val.val.castTag(.aggregate).?.data;
// layout: containerlayout,
const layout_val = struct_val[0];
+ // backing_int: ?type,
+ const backing_int_val = struct_val[1];
// fields: []const enumfield,
- const fields_val = struct_val[1];
+ const fields_val = struct_val[2];
// decls: []const declaration,
- const decls_val = struct_val[2];
+ const decls_val = struct_val[3];
// is_tuple: bool,
- const is_tuple_val = struct_val[3];
+ const is_tuple_val = struct_val[4];
+ assert(struct_val.len == 5);
+
+ const layout = layout_val.toEnum(std.builtin.Type.ContainerLayout);
// Decls
if (decls_val.sliceLen(mod) > 0) {
return sema.fail(block, src, "reified structs must have no decls", .{});
}
+ if (layout != .Packed and !backing_int_val.isNull()) {
+ return sema.fail(block, src, "non-packed struct does not support backing integer type", .{});
+ }
+
return if (is_tuple_val.toBool())
try sema.reifyTuple(block, src, fields_val)
else
- try sema.reifyStruct(block, inst, src, layout_val, fields_val);
+ try sema.reifyStruct(block, inst, src, layout, backing_int_val, fields_val, name_strategy);
},
.Enum => {
const struct_val = union_val.val.castTag(.aggregate).?.data;
@@ -16335,10 +16579,10 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
};
const enum_ty = Type.initPayload(&enum_ty_payload.base);
const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty);
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = enum_val,
- }, .anon, "enum", null);
+ }, name_strategy, "enum", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
@@ -16349,7 +16593,6 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
.tag_ty_inferred = false,
.fields = .{},
.values = .{},
- .node_offset = src.node_offset.x,
.namespace = .{
.parent = block.namespace,
.ty = enum_ty,
@@ -16432,17 +16675,16 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
};
const opaque_ty = Type.initPayload(&opaque_ty_payload.base);
const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty);
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = opaque_val,
- }, .anon, "opaque", null);
+ }, name_strategy, "opaque", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
opaque_obj.* = .{
.owner_decl = new_decl_index,
- .node_offset = src.node_offset.x,
.namespace = .{
.parent = block.namespace,
.ty = opaque_ty,
@@ -16491,10 +16733,10 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
};
const union_ty = Type.initPayload(&union_payload.base);
const new_union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty);
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = new_union_val,
- }, .anon, "union", null);
+ }, name_strategy, "union", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
@@ -16502,7 +16744,6 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
.owner_decl = new_decl_index,
.tag_ty = Type.initTag(.@"null"),
.fields = .{},
- .node_offset = src.node_offset.x,
.zir_index = inst,
.layout = layout,
.status = .have_field_types,
@@ -16785,8 +17026,10 @@ fn reifyStruct(
block: *Block,
inst: Zir.Inst.Index,
src: LazySrcLoc,
- layout_val: Value,
+ layout: std.builtin.Type.ContainerLayout,
+ backing_int_val: Value,
fields_val: Value,
+ name_strategy: Zir.Inst.NameStrategy,
) CompileError!Air.Inst.Ref {
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
@@ -16796,19 +17039,18 @@ fn reifyStruct(
const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj);
const new_struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty);
const mod = sema.mod;
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = new_struct_val,
- }, .anon, "struct", null);
+ }, name_strategy, "struct", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
struct_obj.* = .{
.owner_decl = new_decl_index,
.fields = .{},
- .node_offset = src.node_offset.x,
.zir_index = inst,
- .layout = layout_val.toEnum(std.builtin.Type.ContainerLayout),
+ .layout = layout,
.status = .have_field_types,
.known_non_opv = false,
.namespace = .{
@@ -16874,6 +17116,41 @@ fn reifyStruct(
};
}
+ if (layout == .Packed) {
+ struct_obj.status = .layout_wip;
+
+ for (struct_obj.fields.values()) |field, index| {
+ sema.resolveTypeLayout(block, src, field.ty) catch |err| switch (err) {
+ error.AnalysisFail => {
+ const msg = sema.err orelse return err;
+ try sema.addFieldErrNote(block, struct_ty, index, msg, "while checking this field", .{});
+ return err;
+ },
+ else => return err,
+ };
+ }
+
+ var fields_bit_sum: u64 = 0;
+ for (struct_obj.fields.values()) |field| {
+ fields_bit_sum += field.ty.bitSize(target);
+ }
+
+ if (backing_int_val.optionalValue()) |payload| {
+ var buf: Value.ToTypeBuffer = undefined;
+ const backing_int_ty = payload.toType(&buf);
+ try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum);
+ struct_obj.backing_int_ty = try backing_int_ty.copy(new_decl_arena_allocator);
+ } else {
+ var buf: Type.Payload.Bits = .{
+ .base = .{ .tag = .int_unsigned },
+ .data = @intCast(u16, fields_bit_sum),
+ };
+ struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(new_decl_arena_allocator);
+ }
+
+ struct_obj.status = .have_layout;
+ }
+
try new_decl.finalizeNewArena(&new_decl_arena);
return sema.analyzeDeclVal(block, src, new_decl_index);
}
@@ -17095,17 +17372,10 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
}
try sema.requireRuntimeBlock(block, src, operand_src);
- if (block.wantSafety() and !dest_ty.isAnyError()) {
- const err_int_inst = try block.addBitCast(Type.u16, operand);
- // TODO: Output a switch instead of chained OR's.
- var found_match: Air.Inst.Ref = undefined;
- for (dest_ty.errorSetNames()) |dest_err_name, i| {
- const dest_err_int = (try sema.mod.getErrorValue(dest_err_name)).value;
- const dest_err_int_inst = try sema.addIntUnsigned(Type.u16, dest_err_int);
- const next_match = try block.addBinOp(.cmp_eq, dest_err_int_inst, err_int_inst);
- found_match = if (i == 0) next_match else try block.addBinOp(.bool_or, found_match, next_match);
- }
- try sema.addSafetyCheck(block, found_match, .invalid_error_code);
+ if (block.wantSafety() and !dest_ty.isAnyError() and sema.mod.comp.bin_file.options.use_llvm) {
+ const err_int_inst = try block.addBitCast(Type.err_int, operand);
+ const ok = try block.addTyOp(.error_set_has_value, dest_ty, err_int_inst);
+ try sema.addSafetyCheck(block, ok, .invalid_error_code);
}
return block.addBitCast(dest_ty, operand);
}
@@ -17133,6 +17403,15 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
else
operand;
+ if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |operand_val| {
+ if (!dest_ty.ptrAllowsZero() and operand_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, operand_src);
+ }
+ if (!dest_ty.ptrAllowsZero() and operand_val.isNull()) {
+ return sema.fail(block, operand_src, "null pointer casted to type {}", .{dest_ty.fmt(sema.mod)});
+ }
+ }
+
const dest_elem_ty = dest_ty.elemType2();
try sema.resolveTypeLayout(block, dest_ty_src, dest_elem_ty);
const dest_align = dest_ty.ptrAlignment(target);
@@ -23464,7 +23743,10 @@ fn coerceVarArgParam(
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const inst_ty = sema.typeOf(inst);
+ if (block.is_typeof) return inst;
+
switch (inst_ty.zigTypeTag()) {
+ // TODO consider casting to c_int/f64 if they fit
.ComptimeInt, .ComptimeFloat => return sema.fail(block, inst_src, "integer and float literals in var args function must be casted", .{}),
else => {},
}
@@ -23846,7 +24128,10 @@ fn beginComptimePtrMutation(
const array_len_including_sentinel =
try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel());
const elems = try arena.alloc(Value, array_len_including_sentinel);
- mem.set(Value, elems, repeated_val);
+ if (elems.len > 0) elems[0] = repeated_val;
+ for (elems[1..]) |*elem| {
+ elem.* = try repeated_val.copy(arena);
+ }
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
@@ -25423,6 +25708,27 @@ fn analyzeIsNull(
return block.addUnOp(air_tag, operand);
}
+fn analyzePtrIsNonErrComptimeOnly(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ operand: Air.Inst.Ref,
+) CompileError!Air.Inst.Ref {
+ const ptr_ty = sema.typeOf(operand);
+ assert(ptr_ty.zigTypeTag() == .Pointer);
+ const child_ty = ptr_ty.childType();
+
+ const child_tag = child_ty.zigTypeTag();
+ if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return Air.Inst.Ref.bool_true;
+ if (child_tag == .ErrorSet) return Air.Inst.Ref.bool_false;
+ assert(child_tag == .ErrorUnion);
+
+ _ = block;
+ _ = src;
+
+ return Air.Inst.Ref.none;
+}
+
fn analyzeIsNonErrComptimeOnly(
sema: *Sema,
block: *Block,
@@ -25436,10 +25742,16 @@ fn analyzeIsNonErrComptimeOnly(
assert(ot == .ErrorUnion);
if (Air.refToIndex(operand)) |operand_inst| {
- const air_tags = sema.air_instructions.items(.tag);
- if (air_tags[operand_inst] == .wrap_errunion_payload) {
- return Air.Inst.Ref.bool_true;
+ switch (sema.air_instructions.items(.tag)[operand_inst]) {
+ .wrap_errunion_payload => return Air.Inst.Ref.bool_true,
+ .wrap_errunion_err => return Air.Inst.Ref.bool_false,
+ else => {},
}
+ } else if (operand == .undef) {
+ return sema.addConstUndef(Type.bool);
+ } else {
+ // None of the ref tags can be errors.
+ return Air.Inst.Ref.bool_true;
}
const maybe_operand_val = try sema.resolveMaybeUndefVal(block, src, operand);
@@ -25515,6 +25827,21 @@ fn analyzeIsNonErr(
}
}
+fn analyzePtrIsNonErr(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ operand: Air.Inst.Ref,
+) CompileError!Air.Inst.Ref {
+ const result = try sema.analyzePtrIsNonErrComptimeOnly(block, src, operand);
+ if (result == .none) {
+ try sema.requireRuntimeBlock(block, src, null);
+ return block.addUnOp(.is_non_err_ptr, operand);
+ } else {
+ return result;
+ }
+}
+
fn analyzeSlice(
sema: *Sema,
block: *Block,
@@ -26913,6 +27240,11 @@ fn resolveStructLayout(
else => return err,
};
}
+
+ if (struct_obj.layout == .Packed) {
+ try semaBackingIntType(sema.mod, struct_obj);
+ }
+
struct_obj.status = .have_layout;
// In case of querying the ABI alignment of this struct, we will ask
@@ -26932,6 +27264,109 @@ fn resolveStructLayout(
// otherwise it's a tuple; no need to resolve anything
}
+fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!void {
+ const gpa = mod.gpa;
+ const target = mod.getTarget();
+
+ var fields_bit_sum: u64 = 0;
+ for (struct_obj.fields.values()) |field| {
+ fields_bit_sum += field.ty.bitSize(target);
+ }
+
+ const decl_index = struct_obj.owner_decl;
+ const decl = mod.declPtr(decl_index);
+ var decl_arena = decl.value_arena.?.promote(gpa);
+ defer decl.value_arena.?.* = decl_arena.state;
+ const decl_arena_allocator = decl_arena.allocator();
+
+ const zir = struct_obj.namespace.file_scope.zir;
+ const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended;
+ assert(extended.opcode == .struct_decl);
+ const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
+
+ if (small.has_backing_int) {
+ var extra_index: usize = extended.operand;
+ extra_index += @boolToInt(small.has_src_node);
+ extra_index += @boolToInt(small.has_fields_len);
+ extra_index += @boolToInt(small.has_decls_len);
+
+ const backing_int_body_len = zir.extra[extra_index];
+ extra_index += 1;
+
+ var analysis_arena = std.heap.ArenaAllocator.init(gpa);
+ defer analysis_arena.deinit();
+
+ var sema: Sema = .{
+ .mod = mod,
+ .gpa = gpa,
+ .arena = analysis_arena.allocator(),
+ .perm_arena = decl_arena_allocator,
+ .code = zir,
+ .owner_decl = decl,
+ .owner_decl_index = decl_index,
+ .func = null,
+ .fn_ret_ty = Type.void,
+ .owner_func = null,
+ };
+ defer sema.deinit();
+
+ var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope);
+ defer wip_captures.deinit();
+
+ var block: Block = .{
+ .parent = null,
+ .sema = &sema,
+ .src_decl = decl_index,
+ .namespace = &struct_obj.namespace,
+ .wip_capture_scope = wip_captures.scope,
+ .instructions = .{},
+ .inlining = null,
+ .is_comptime = true,
+ };
+ defer {
+ assert(block.instructions.items.len == 0);
+ block.params.deinit(gpa);
+ }
+
+ const backing_int_src: LazySrcLoc = .{ .node_offset_container_tag = 0 };
+ const backing_int_ty = blk: {
+ if (backing_int_body_len == 0) {
+ const backing_int_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
+ break :blk try sema.resolveType(&block, backing_int_src, backing_int_ref);
+ } else {
+ const body = zir.extra[extra_index..][0..backing_int_body_len];
+ const ty_ref = try sema.resolveBody(&block, body, struct_obj.zir_index);
+ break :blk try sema.analyzeAsType(&block, backing_int_src, ty_ref);
+ }
+ };
+
+ try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum);
+ struct_obj.backing_int_ty = try backing_int_ty.copy(decl_arena_allocator);
+ } else {
+ var buf: Type.Payload.Bits = .{
+ .base = .{ .tag = .int_unsigned },
+ .data = @intCast(u16, fields_bit_sum),
+ };
+ struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(decl_arena_allocator);
+ }
+}
+
+fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void {
+ const target = sema.mod.getTarget();
+
+ if (!backing_int_ty.isInt()) {
+ return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(sema.mod)});
+ }
+ if (backing_int_ty.bitSize(target) != fields_bit_sum) {
+ return sema.fail(
+ block,
+ src,
+ "backing integer type '{}' has bit size {} but the struct fields have a total bit size of {}",
+ .{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(target), fields_bit_sum },
+ );
+ }
+}
+
fn resolveUnionLayout(
sema: *Sema,
block: *Block,
@@ -27239,7 +27674,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
var extra_index: usize = extended.operand;
- const src = LazySrcLoc.nodeOffset(struct_obj.node_offset);
+ const src = LazySrcLoc.nodeOffset(0);
extra_index += @boolToInt(small.has_src_node);
const fields_len = if (small.has_fields_len) blk: {
@@ -27254,12 +27689,26 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
break :decls_len decls_len;
} else 0;
+ // The backing integer cannot be handled until `resolveStructLayout()`.
+ if (small.has_backing_int) {
+ const backing_int_body_len = zir.extra[extra_index];
+ extra_index += 1; // backing_int_body_len
+ if (backing_int_body_len == 0) {
+ extra_index += 1; // backing_int_ref
+ } else {
+ extra_index += backing_int_body_len; // backing_int_body_inst
+ }
+ }
+
// Skip over decls.
var decls_it = zir.declIteratorInner(extra_index, decls_len);
while (decls_it.next()) |_| {}
extra_index = decls_it.extra_index;
if (fields_len == 0) {
+ if (struct_obj.layout == .Packed) {
+ try semaBackingIntType(mod, struct_obj);
+ }
struct_obj.status = .have_layout;
return;
}
@@ -27358,12 +27807,12 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
if (gop.found_existing) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const field_src = enumFieldSrcLoc(decl, tree.*, struct_obj.node_offset, field_i);
+ const field_src = enumFieldSrcLoc(decl, tree.*, 0, field_i);
const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{s}'", .{field_name});
errdefer msg.destroy(gpa);
const prev_field_index = struct_obj.fields.getIndex(field_name).?;
- const prev_field_src = enumFieldSrcLoc(decl, tree.*, struct_obj.node_offset, prev_field_index);
+ const prev_field_src = enumFieldSrcLoc(decl, tree.*, 0, prev_field_index);
try sema.mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl), msg, "other field here", .{});
try sema.errNote(&block_scope, src, msg, "struct declared here", .{});
break :msg msg;
@@ -27420,7 +27869,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
if (field_ty.zigTypeTag() == .Opaque) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const field_src = enumFieldSrcLoc(decl, tree.*, struct_obj.node_offset, i);
+ const field_src = enumFieldSrcLoc(decl, tree.*, 0, i);
const msg = try sema.errMsg(&block_scope, field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(sema.gpa);
@@ -27432,7 +27881,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
if (struct_obj.layout == .Extern and !sema.validateExternType(field.ty, .other)) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const fields_src = enumFieldSrcLoc(decl, tree.*, struct_obj.node_offset, i);
+ const fields_src = enumFieldSrcLoc(decl, tree.*, 0, i);
const msg = try sema.errMsg(&block_scope, fields_src, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
@@ -27445,7 +27894,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
} else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty))) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const fields_src = enumFieldSrcLoc(decl, tree.*, struct_obj.node_offset, i);
+ const fields_src = enumFieldSrcLoc(decl, tree.*, 0, i);
const msg = try sema.errMsg(&block_scope, fields_src, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
@@ -27502,7 +27951,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small);
var extra_index: usize = extended.operand;
- const src = LazySrcLoc.nodeOffset(union_obj.node_offset);
+ const src = LazySrcLoc.nodeOffset(0);
extra_index += @boolToInt(small.has_src_node);
const tag_type_ref: Zir.Inst.Ref = if (small.has_tag_type) blk: {
@@ -27726,12 +28175,12 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
if (gop.found_existing) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const field_src = enumFieldSrcLoc(decl, tree.*, union_obj.node_offset, field_i);
+ const field_src = enumFieldSrcLoc(decl, tree.*, 0, field_i);
const msg = try sema.errMsg(&block_scope, field_src, "duplicate union field: '{s}'", .{field_name});
errdefer msg.destroy(gpa);
const prev_field_index = union_obj.fields.getIndex(field_name).?;
- const prev_field_src = enumFieldSrcLoc(decl, tree.*, union_obj.node_offset, prev_field_index);
+ const prev_field_src = enumFieldSrcLoc(decl, tree.*, 0, prev_field_index);
try sema.mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl), msg, "other field here", .{});
try sema.errNote(&block_scope, src, msg, "union declared here", .{});
break :msg msg;
@@ -27744,7 +28193,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
if (!enum_has_field) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const field_src = enumFieldSrcLoc(decl, tree.*, union_obj.node_offset, field_i);
+ const field_src = enumFieldSrcLoc(decl, tree.*, 0, field_i);
const msg = try sema.errMsg(&block_scope, field_src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(sema.mod) });
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, union_obj.tag_ty);
@@ -27757,7 +28206,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
if (field_ty.zigTypeTag() == .Opaque) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const field_src = enumFieldSrcLoc(decl, tree.*, union_obj.node_offset, field_i);
+ const field_src = enumFieldSrcLoc(decl, tree.*, 0, field_i);
const msg = try sema.errMsg(&block_scope, field_src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{});
errdefer msg.destroy(sema.gpa);
@@ -27769,7 +28218,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
if (union_obj.layout == .Extern and !sema.validateExternType(field_ty, .union_field)) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const field_src = enumFieldSrcLoc(decl, tree.*, union_obj.node_offset, field_i);
+ const field_src = enumFieldSrcLoc(decl, tree.*, 0, field_i);
const msg = try sema.errMsg(&block_scope, field_src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
@@ -27782,7 +28231,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
} else if (union_obj.layout == .Packed and !(validatePackedType(field_ty))) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const fields_src = enumFieldSrcLoc(decl, tree.*, union_obj.node_offset, field_i);
+ const fields_src = enumFieldSrcLoc(decl, tree.*, 0, field_i);
const msg = try sema.errMsg(&block_scope, fields_src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
@@ -27874,7 +28323,6 @@ fn generateUnionTagTypeNumbered(
.tag_ty = int_ty,
.fields = .{},
.values = .{},
- .node_offset = 0,
};
// Here we pre-allocate the maps using the decl arena.
try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
@@ -27932,7 +28380,6 @@ fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: usize, may
enum_obj.* = .{
.owner_decl = new_decl_index,
.fields = .{},
- .node_offset = 0,
};
// Here we pre-allocate the maps using the decl arena.
try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
@@ -28243,7 +28690,7 @@ fn enumFieldSrcLoc(
=> tree.containerDeclArg(enum_node),
// Container was constructed with `@Type`.
- else => return LazySrcLoc.nodeOffset(node_offset),
+ else => return LazySrcLoc.nodeOffset(0),
};
var it_index: usize = 0;
for (container_decl.ast.members) |member_node| {
diff --git a/src/Zir.zig b/src/Zir.zig
index 6427420840..7a1db54ea2 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -490,14 +490,6 @@ pub const Inst = struct {
/// Merge two error sets into one, `E1 || E2`.
/// Uses the `pl_node` field with payload `Bin`.
merge_error_sets,
- /// Given a reference to a function and a parameter index, returns the
- /// type of the parameter. The only usage of this instruction is for the
- /// result location of parameters of function calls. In the case of a function's
- /// parameter type being `anytype`, it is the type coercion's job to detect this
- /// scenario and skip the coercion, so that semantic analysis of this instruction
- /// is not in a position where it must create an invalid type.
- /// Uses the `param_type` union field.
- param_type,
/// Turns an R-Value into a const L-Value. In other words, it takes a value,
/// stores it in a memory location, and returns a const pointer to it. If the value
/// is `comptime`, the memory location is global static constant data. Otherwise,
@@ -839,8 +831,6 @@ pub const Inst = struct {
round,
/// Implement builtin `@tagName`. Uses `un_node`.
tag_name,
- /// Implement builtin `@Type`. Uses `un_node`.
- reify,
/// Implement builtin `@typeName`. Uses `un_node`.
type_name,
/// Implement builtin `@Frame`. Uses `un_node`.
@@ -1097,7 +1087,6 @@ pub const Inst = struct {
.mul,
.mulwrap,
.mul_sat,
- .param_type,
.ref,
.shl,
.shl_sat,
@@ -1197,7 +1186,6 @@ pub const Inst = struct {
.trunc,
.round,
.tag_name,
- .reify,
.type_name,
.frame_type,
.frame_size,
@@ -1400,7 +1388,6 @@ pub const Inst = struct {
.mul,
.mulwrap,
.mul_sat,
- .param_type,
.ref,
.shl,
.shl_sat,
@@ -1484,7 +1471,6 @@ pub const Inst = struct {
.trunc,
.round,
.tag_name,
- .reify,
.type_name,
.frame_type,
.frame_size,
@@ -1573,7 +1559,6 @@ pub const Inst = struct {
.mulwrap = .pl_node,
.mul_sat = .pl_node,
- .param_type = .param_type,
.param = .pl_tok,
.param_comptime = .pl_tok,
.param_anytype = .str_tok,
@@ -1759,7 +1744,6 @@ pub const Inst = struct {
.trunc = .un_node,
.round = .un_node,
.tag_name = .un_node,
- .reify = .un_node,
.type_name = .un_node,
.frame_type = .un_node,
.frame_size = .un_node,
@@ -1980,6 +1964,10 @@ pub const Inst = struct {
/// Implement builtin `@intToError`.
/// `operand` is payload index to `UnNode`.
int_to_error,
+ /// Implement builtin `@Type`.
+ /// `operand` is payload index to `UnNode`.
+ /// `small` contains `NameStrategy
+ reify,
pub const InstData = struct {
opcode: Extended,
@@ -2541,10 +2529,6 @@ pub const Inst = struct {
/// Points to a `Block`.
payload_index: u32,
},
- param_type: struct {
- callee: Ref,
- param_index: u32,
- },
@"unreachable": struct {
/// Offset from Decl AST node index.
/// `Tag` determines which kind of AST node this points to.
@@ -2615,7 +2599,6 @@ pub const Inst = struct {
ptr_type,
int_type,
bool_br,
- param_type,
@"unreachable",
@"break",
switch_capture,
@@ -2795,7 +2778,9 @@ pub const Inst = struct {
};
/// Stored inside extra, with trailing arguments according to `args_len`.
- /// Each argument is a `Ref`.
+ /// Implicit 0. arg_0_start: u32, // always same as `args_len`
+ /// 1. arg_end: u32, // for each `args_len`
+ /// arg_N_start is the same as arg_N-1_end
pub const Call = struct {
// Note: Flags *must* come first so that unusedResultExpr
// can find it when it goes to modify them.
@@ -3100,13 +3085,16 @@ pub const Inst = struct {
/// 0. src_node: i32, // if has_src_node
/// 1. fields_len: u32, // if has_fields_len
/// 2. decls_len: u32, // if has_decls_len
- /// 3. decl_bits: u32 // for every 8 decls
+ /// 3. backing_int_body_len: u32, // if has_backing_int
+ /// 4. backing_int_ref: Ref, // if has_backing_int and backing_int_body_len is 0
+ /// 5. backing_int_body_inst: Inst, // if has_backing_int and backing_int_body_len is > 0
+ /// 6. decl_bits: u32 // for every 8 decls
/// - sets of 4 bits:
/// 0b000X: whether corresponding decl is pub
/// 0b00X0: whether corresponding decl is exported
/// 0b0X00: whether corresponding decl has an align expression
/// 0bX000: whether corresponding decl has a linksection or an address space expression
- /// 4. decl: { // for every decls_len
+ /// 7. decl: { // for every decls_len
/// src_hash: [4]u32, // hash of source bytes
/// line: u32, // line number of decl, relative to parent
/// name: u32, // null terminated string index
@@ -3124,13 +3112,13 @@ pub const Inst = struct {
/// address_space: Ref,
/// }
/// }
- /// 5. flags: u32 // for every 8 fields
+ /// 8. flags: u32 // for every 8 fields
/// - sets of 4 bits:
/// 0b000X: whether corresponding field has an align expression
/// 0b00X0: whether corresponding field has a default expression
/// 0b0X00: whether corresponding field is comptime
/// 0bX000: whether corresponding field has a type expression
- /// 6. fields: { // for every fields_len
+ /// 9. fields: { // for every fields_len
/// field_name: u32,
/// doc_comment: u32, // 0 if no doc comment
/// field_type: Ref, // if corresponding bit is not set. none means anytype.
@@ -3138,7 +3126,7 @@ pub const Inst = struct {
/// align_body_len: u32, // if corresponding bit is set
/// init_body_len: u32, // if corresponding bit is set
/// }
- /// 7. bodies: { // for every fields_len
+ /// 10. bodies: { // for every fields_len
/// field_type_body_inst: Inst, // for each field_type_body_len
/// align_body_inst: Inst, // for each align_body_len
/// init_body_inst: Inst, // for each init_body_len
@@ -3148,11 +3136,12 @@ pub const Inst = struct {
has_src_node: bool,
has_fields_len: bool,
has_decls_len: bool,
+ has_backing_int: bool,
known_non_opv: bool,
known_comptime_only: bool,
name_strategy: NameStrategy,
layout: std.builtin.Type.ContainerLayout,
- _: u7 = undefined,
+ _: u6 = undefined,
};
};
@@ -3619,6 +3608,16 @@ pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator {
break :decls_len decls_len;
} else 0;
+ if (small.has_backing_int) {
+ const backing_int_body_len = zir.extra[extra_index];
+ extra_index += 1; // backing_int_body_len
+ if (backing_int_body_len == 0) {
+ extra_index += 1; // backing_int_ref
+ } else {
+ extra_index += backing_int_body_len; // backing_int_body_inst
+ }
+ }
+
return declIteratorInner(zir, extra_index, decls_len);
},
.enum_decl => {
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 988317284b..d256f9a558 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -778,6 +778,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
=> return self.fail("TODO implement optimized float mode", .{}),
.is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
+ .error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index be2891ef6f..e8f0507614 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -247,6 +247,31 @@ const BigTomb = struct {
log.debug("%{d} => {}", .{ bt.inst, result });
const branch = &bt.function.branch_stack.items[bt.function.branch_stack.items.len - 1];
branch.inst_table.putAssumeCapacityNoClobber(bt.inst, result);
+
+ switch (result) {
+ .register => |reg| {
+ // In some cases (such as bitcast), an operand
+ // may be the same MCValue as the result. If
+ // that operand died and was a register, it
+ // was freed by processDeath. We have to
+ // "re-allocate" the register.
+ if (bt.function.register_manager.isRegFree(reg)) {
+ bt.function.register_manager.getRegAssumeFree(reg, bt.inst);
+ }
+ },
+ .register_c_flag,
+ .register_v_flag,
+ => |reg| {
+ if (bt.function.register_manager.isRegFree(reg)) {
+ bt.function.register_manager.getRegAssumeFree(reg, bt.inst);
+ }
+ bt.function.cpsr_flags_inst = bt.inst;
+ },
+ .cpsr_flags => {
+ bt.function.cpsr_flags_inst = bt.inst;
+ },
+ else => {},
+ }
}
bt.function.finishAirBookkeeping();
}
@@ -332,7 +357,7 @@ pub fn generate(
};
for (function.dbg_arg_relocs.items) |reloc| {
- try function.genArgDbgInfo(reloc.inst, reloc.index, call_info.stack_byte_count);
+ try function.genArgDbgInfo(reloc.inst, reloc.index);
}
var mir = Mir{
@@ -351,7 +376,8 @@ pub fn generate(
.prev_di_pc = 0,
.prev_di_line = module_fn.lbrace_line,
.prev_di_column = module_fn.lbrace_column,
- .prologue_stack_space = call_info.stack_byte_count + function.saved_regs_stack_space,
+ .stack_size = function.max_end_stack,
+ .saved_regs_stack_space = function.saved_regs_stack_space,
};
defer emit.deinit();
@@ -464,6 +490,7 @@ fn gen(self: *Self) !void {
const total_stack_size = self.max_end_stack + self.saved_regs_stack_space;
const aligned_total_stack_end = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align);
const stack_size = aligned_total_stack_end - self.saved_regs_stack_space;
+ self.max_end_stack = stack_size;
if (Instruction.Operand.fromU32(stack_size)) |op| {
self.mir_instructions.set(sub_reloc, .{
.tag = .sub,
@@ -769,6 +796,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
=> return self.fail("TODO implement optimized float mode", .{}),
.is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
+ .error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
@@ -1812,7 +1840,7 @@ fn errUnionErr(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCV
switch (error_union_mcv) {
.register => return self.fail("TODO errUnionErr for registers", .{}),
.stack_argument_offset => |off| {
- return MCValue{ .stack_argument_offset = off - err_offset };
+ return MCValue{ .stack_argument_offset = off + err_offset };
},
.stack_offset => |off| {
return MCValue{ .stack_offset = off - err_offset };
@@ -1849,7 +1877,7 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type)
switch (error_union_mcv) {
.register => return self.fail("TODO errUnionPayload for registers", .{}),
.stack_argument_offset => |off| {
- return MCValue{ .stack_argument_offset = off - payload_offset };
+ return MCValue{ .stack_argument_offset = off + payload_offset };
},
.stack_offset => |off| {
return MCValue{ .stack_offset = off - payload_offset };
@@ -1983,7 +2011,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
.dead, .unreach => unreachable,
.register => unreachable, // a slice doesn't fit in one register
.stack_argument_offset => |off| {
- break :result MCValue{ .stack_argument_offset = off - 4 };
+ break :result MCValue{ .stack_argument_offset = off + 4 };
},
.stack_offset => |off| {
break :result MCValue{ .stack_offset = off - 4 };
@@ -2259,16 +2287,17 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
.register_c_flag,
.register_v_flag,
=> unreachable, // cannot hold an address
- .immediate => |imm| try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm }),
- .ptr_stack_offset => |off| try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off }),
+ .immediate => |imm| {
+ try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm });
+ },
+ .ptr_stack_offset => |off| {
+ try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off });
+ },
.register => |reg| {
const reg_lock = self.register_manager.lockReg(reg);
defer if (reg_lock) |reg_locked| self.register_manager.unlockReg(reg_locked);
switch (dst_mcv) {
- .dead => unreachable,
- .undef => unreachable,
- .cpsr_flags => unreachable,
.register => |dst_reg| {
try self.genLdrRegister(dst_reg, reg, elem_ty);
},
@@ -2304,7 +2333,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
}
},
- else => return self.fail("TODO load from register into {}", .{dst_mcv}),
+ else => unreachable, // attempting to load into non-register or non-stack MCValue
}
},
.memory,
@@ -2401,7 +2430,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
// sub src_reg, fp, #off
try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off });
},
- .memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = @intCast(u32, addr) }),
+ .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }),
.stack_argument_offset => |off| {
_ = try self.addInst(.{
.tag = .ldr_ptr_stack_argument,
@@ -2507,7 +2536,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
switch (mcv) {
.dead, .unreach => unreachable,
.stack_argument_offset => |off| {
- break :result MCValue{ .stack_argument_offset = off - struct_field_offset };
+ break :result MCValue{ .stack_argument_offset = off + struct_field_offset };
},
.stack_offset => |off| {
break :result MCValue{ .stack_offset = off - struct_field_offset };
@@ -3347,6 +3376,102 @@ fn genInlineMemcpy(
// end:
}
+fn genInlineMemset(
+ self: *Self,
+ dst: MCValue,
+ val: MCValue,
+ len: MCValue,
+) !void {
+ const dst_reg = switch (dst) {
+ .register => |r| r,
+ else => try self.copyToTmpRegister(Type.initTag(.manyptr_u8), dst),
+ };
+ const dst_reg_lock = self.register_manager.lockReg(dst_reg);
+ defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const val_reg = switch (val) {
+ .register => |r| r,
+ else => try self.copyToTmpRegister(Type.initTag(.u8), val),
+ };
+ const val_reg_lock = self.register_manager.lockReg(val_reg);
+ defer if (val_reg_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const len_reg = switch (len) {
+ .register => |r| r,
+ else => try self.copyToTmpRegister(Type.usize, len),
+ };
+ const len_reg_lock = self.register_manager.lockReg(len_reg);
+ defer if (len_reg_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const count_reg = try self.register_manager.allocReg(null, gp);
+
+ try self.genInlineMemsetCode(dst_reg, val_reg, len_reg, count_reg);
+}
+
+fn genInlineMemsetCode(
+ self: *Self,
+ dst: Register,
+ val: Register,
+ len: Register,
+ count: Register,
+) !void {
+ // mov count, #0
+ _ = try self.addInst(.{
+ .tag = .mov,
+ .data = .{ .rr_op = .{
+ .rd = count,
+ .rn = .r0,
+ .op = Instruction.Operand.imm(0, 0),
+ } },
+ });
+
+ // loop:
+ // cmp count, len
+ _ = try self.addInst(.{
+ .tag = .cmp,
+ .data = .{ .rr_op = .{
+ .rd = .r0,
+ .rn = count,
+ .op = Instruction.Operand.reg(len, Instruction.Operand.Shift.none),
+ } },
+ });
+
+ // bge end
+ _ = try self.addInst(.{
+ .tag = .b,
+ .cond = .ge,
+ .data = .{ .inst = @intCast(u32, self.mir_instructions.len + 4) },
+ });
+
+ // strb val, [src, count]
+ _ = try self.addInst(.{
+ .tag = .strb,
+ .data = .{ .rr_offset = .{
+ .rt = val,
+ .rn = dst,
+ .offset = .{ .offset = Instruction.Offset.reg(count, .none) },
+ } },
+ });
+
+ // add count, count, #1
+ _ = try self.addInst(.{
+ .tag = .add,
+ .data = .{ .rr_op = .{
+ .rd = count,
+ .rn = count,
+ .op = Instruction.Operand.imm(1, 0),
+ } },
+ });
+
+ // b loop
+ _ = try self.addInst(.{
+ .tag = .b,
+ .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 4) },
+ });
+
+ // end:
+}
+
/// Adds a Type to the .debug_info at the current position. The bytes will be populated later,
/// after codegen for this symbol is done.
fn addDbgInfoTypeReloc(self: *Self, ty: Type) error{OutOfMemory}!void {
@@ -3369,9 +3494,7 @@ fn addDbgInfoTypeReloc(self: *Self, ty: Type) error{OutOfMemory}!void {
}
}
-fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, arg_index: u32, stack_byte_count: u32) error{OutOfMemory}!void {
- const prologue_stack_space = stack_byte_count + self.saved_regs_stack_space;
-
+fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, arg_index: u32) error{OutOfMemory}!void {
const mcv = self.args[arg_index];
const ty = self.air.instructions.items(.data)[inst].ty;
const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg_index);
@@ -3404,7 +3527,7 @@ fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, arg_index: u32, stack_byte_c
// const abi_size = @intCast(u32, ty.abiSize(self.target.*));
const adjusted_stack_offset = switch (mcv) {
.stack_offset => |offset| -@intCast(i32, offset),
- .stack_argument_offset => |offset| @intCast(i32, prologue_stack_space - offset),
+ .stack_argument_offset => |offset| @intCast(i32, self.saved_regs_stack_space + offset),
else => unreachable,
};
@@ -3524,7 +3647,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
try self.register_manager.getReg(reg, null);
}
- if (info.return_value == .stack_offset) {
+ // If returning by reference, r0 will contain the address of where
+ // to put the result into. In that case, make sure that r0 remains
+ // untouched by the parameter passing code
+ const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: {
log.debug("airCall: return by reference", .{});
const ret_ty = fn_ty.fnReturnType();
const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*));
@@ -3540,7 +3666,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
try self.genSetReg(ptr_ty, .r0, .{ .ptr_stack_offset = stack_offset });
info.return_value = .{ .stack_offset = stack_offset };
- }
+
+ break :blk self.register_manager.lockRegAssumeUnused(.r0);
+ } else null;
+ defer if (r0_lock) |reg| self.register_manager.unlockReg(reg);
// Make space for the arguments passed via the stack
self.max_end_stack += info.stack_byte_count;
@@ -3559,7 +3688,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
.stack_offset => unreachable,
.stack_argument_offset => |offset| try self.genSetStackArgument(
arg_ty,
- info.stack_byte_count - offset,
+ offset,
arg_mcv,
),
else => unreachable,
@@ -4621,11 +4750,15 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
if (!self.wantSafety())
return; // The already existing value will do just fine.
// TODO Upgrade this to a memset call when we have that available.
- switch (ty.abiSize(self.target.*)) {
- 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
- 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
- 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
- else => return self.fail("TODO implement memset", .{}),
+ switch (abi_size) {
+ 1 => try self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
+ 2 => try self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
+ 4 => try self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
+ else => try self.genInlineMemset(
+ .{ .ptr_stack_offset = stack_offset },
+ .{ .immediate = 0xaa },
+ .{ .immediate = abi_size },
+ ),
}
},
.cpsr_flags,
@@ -5037,9 +5170,9 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
return; // The already existing value will do just fine.
// TODO Upgrade this to a memset call when we have that available.
switch (abi_size) {
- 1 => return self.genSetStackArgument(ty, stack_offset, .{ .immediate = 0xaa }),
- 2 => return self.genSetStackArgument(ty, stack_offset, .{ .immediate = 0xaaaa }),
- 4 => return self.genSetStackArgument(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
+ 1 => try self.genSetStackArgument(ty, stack_offset, .{ .immediate = 0xaa }),
+ 2 => try self.genSetStackArgument(ty, stack_offset, .{ .immediate = 0xaaaa }),
+ 4 => try self.genSetStackArgument(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
else => return self.fail("TODO implement memset", .{}),
}
},
@@ -5653,8 +5786,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
if (ty.abiAlignment(self.target.*) == 8)
nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8);
- nsaa += param_size;
result.args[i] = .{ .stack_argument_offset = nsaa };
+ nsaa += param_size;
}
}
@@ -5687,9 +5820,11 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
for (param_types) |ty, i| {
if (ty.abiSize(self.target.*) > 0) {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
+ const param_alignment = ty.abiAlignment(self.target.*);
- stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, ty.abiAlignment(self.target.*)) + param_size;
+ stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment);
result.args[i] = .{ .stack_argument_offset = stack_offset };
+ stack_offset += param_size;
} else {
result.args[i] = .{ .none = {} };
}
diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig
index 47d508b34a..cf749792f0 100644
--- a/src/arch/arm/Emit.zig
+++ b/src/arch/arm/Emit.zig
@@ -33,9 +33,13 @@ prev_di_column: u32,
/// Relative to the beginning of `code`.
prev_di_pc: usize,
-/// The amount of stack space consumed by all stack arguments as well
-/// as the saved callee-saved registers
-prologue_stack_space: u32,
+/// The amount of stack space consumed by the saved callee-saved
+/// registers in bytes
+saved_regs_stack_space: u32,
+
+/// The final stack frame size of the function (already aligned to the
+/// respective stack alignment). Does not include prologue stack space.
+stack_size: u32,
/// The branch type of every branch
branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .{},
@@ -500,14 +504,15 @@ fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
const r_stack_offset = emit.mir.instructions.items(.data)[inst].r_stack_offset;
+ const rt = r_stack_offset.rt;
- const raw_offset = emit.prologue_stack_space - r_stack_offset.stack_offset;
+ const raw_offset = emit.stack_size + emit.saved_regs_stack_space + r_stack_offset.stack_offset;
switch (tag) {
.ldr_ptr_stack_argument => {
const operand = Instruction.Operand.fromU32(raw_offset) orelse
return emit.fail("TODO mirLoadStack larger offsets", .{});
- try emit.writeInstruction(Instruction.add(cond, r_stack_offset.rt, .fp, operand));
+ try emit.writeInstruction(Instruction.add(cond, rt, .sp, operand));
},
.ldr_stack_argument,
.ldrb_stack_argument,
@@ -516,23 +521,11 @@ fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void {
break :blk Instruction.Offset.imm(@intCast(u12, raw_offset));
} else return emit.fail("TODO mirLoadStack larger offsets", .{});
- const ldr = switch (tag) {
- .ldr_stack_argument => &Instruction.ldr,
- .ldrb_stack_argument => &Instruction.ldrb,
+ switch (tag) {
+ .ldr_stack_argument => try emit.writeInstruction(Instruction.ldr(cond, rt, .sp, .{ .offset = offset })),
+ .ldrb_stack_argument => try emit.writeInstruction(Instruction.ldrb(cond, rt, .sp, .{ .offset = offset })),
else => unreachable,
- };
-
- const ldr_workaround = switch (builtin.zig_backend) {
- .stage1 => ldr.*,
- else => ldr,
- };
-
- try emit.writeInstruction(ldr_workaround(
- cond,
- r_stack_offset.rt,
- .fp,
- .{ .offset = offset },
- ));
+ }
},
.ldrh_stack_argument,
.ldrsb_stack_argument,
@@ -542,24 +535,12 @@ fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void {
break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, raw_offset));
} else return emit.fail("TODO mirLoadStack larger offsets", .{});
- const ldr = switch (tag) {
- .ldrh_stack_argument => &Instruction.ldrh,
- .ldrsb_stack_argument => &Instruction.ldrsb,
- .ldrsh_stack_argument => &Instruction.ldrsh,
+ switch (tag) {
+ .ldrh_stack_argument => try emit.writeInstruction(Instruction.ldrh(cond, rt, .sp, .{ .offset = offset })),
+ .ldrsb_stack_argument => try emit.writeInstruction(Instruction.ldrsb(cond, rt, .sp, .{ .offset = offset })),
+ .ldrsh_stack_argument => try emit.writeInstruction(Instruction.ldrsh(cond, rt, .sp, .{ .offset = offset })),
else => unreachable,
- };
-
- const ldr_workaround = switch (builtin.zig_backend) {
- .stage1 => ldr.*,
- else => ldr,
- };
-
- try emit.writeInstruction(ldr_workaround(
- cond,
- r_stack_offset.rt,
- .fp,
- .{ .offset = offset },
- ));
+ }
},
else => unreachable,
}
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index a5007928b6..06adcff6d4 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -694,6 +694,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
=> return self.fail("TODO implement optimized float mode", .{}),
.is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
+ .error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index bf834a36d9..cd891f0fa3 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -706,6 +706,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
=> @panic("TODO implement optimized float mode"),
.is_named_enum_value => @panic("TODO implement is_named_enum_value"),
+ .error_set_has_value => @panic("TODO implement error_set_has_value"),
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index ef92361de2..8eb3e2175b 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -29,6 +29,8 @@ const errUnionErrorOffset = codegen.errUnionErrorOffset;
const WValue = union(enum) {
/// May be referenced but is unused
none: void,
+ /// The value lives on top of the stack
+ stack: void,
/// Index of the local variable
local: u32,
/// An immediate 32bit value
@@ -55,7 +57,7 @@ const WValue = union(enum) {
/// In wasm function pointers are indexes into a function table,
/// rather than an address in the data section.
function_index: u32,
- /// Offset from the bottom of the stack, with the offset
+ /// Offset from the bottom of the virtual stack, with the offset
/// pointing to where the value lives.
stack_offset: u32,
@@ -71,6 +73,38 @@ const WValue = union(enum) {
else => return 0,
}
}
+
+ /// Promotes a `WValue` to a local when given value is on top of the stack.
+ /// When encountering a `local` or `stack_offset` this is essentially a no-op.
+ /// All other tags are illegal.
+ fn toLocal(value: WValue, gen: *Self, ty: Type) InnerError!WValue {
+ switch (value) {
+ .stack => {
+ const local = try gen.allocLocal(ty);
+ try gen.addLabel(.local_set, local.local);
+ return local;
+ },
+ .local, .stack_offset => return value,
+ else => unreachable,
+ }
+ }
+
+ /// Marks a local as no longer being referenced and essentially allows
+ /// us to re-use it somewhere else within the function.
+ /// The valtype of the local is deducted by using the index of the given.
+ fn free(value: *WValue, gen: *Self) void {
+ if (value.* != .local) return;
+ const local_value = value.local;
+ const index = local_value - gen.args.len - @boolToInt(gen.return_value != .none);
+ const valtype = @intToEnum(wasm.Valtype, gen.locals.items[index]);
+ switch (valtype) {
+ .i32 => gen.free_locals_i32.append(gen.gpa, local_value) catch return, // It's ok to fail any of those, a new local can be allocated instead
+ .i64 => gen.free_locals_i64.append(gen.gpa, local_value) catch return,
+ .f32 => gen.free_locals_f32.append(gen.gpa, local_value) catch return,
+ .f64 => gen.free_locals_f64.append(gen.gpa, local_value) catch return,
+ }
+ value.* = WValue{ .none = {} };
+ }
};
/// Wasm ops, but without input/output/signedness information
@@ -601,6 +635,21 @@ stack_size: u32 = 0,
/// However, local variables or the usage of `@setAlignStack` can overwrite this default.
stack_alignment: u32 = 16,
+// For each individual Wasm valtype we store a seperate free list which
+// allows us to re-use locals that are no longer used. e.g. a temporary local.
+/// A list of indexes which represents a local of valtype `i32`.
+/// It is illegal to store a non-i32 valtype in this list.
+free_locals_i32: std.ArrayListUnmanaged(u32) = .{},
+/// A list of indexes which represents a local of valtype `i64`.
+/// It is illegal to store a non-i32 valtype in this list.
+free_locals_i64: std.ArrayListUnmanaged(u32) = .{},
+/// A list of indexes which represents a local of valtype `f32`.
+/// It is illegal to store a non-i32 valtype in this list.
+free_locals_f32: std.ArrayListUnmanaged(u32) = .{},
+/// A list of indexes which represents a local of valtype `f64`.
+/// It is illegal to store a non-i32 valtype in this list.
+free_locals_f64: std.ArrayListUnmanaged(u32) = .{},
+
const InnerError = error{
OutOfMemory,
/// An error occurred when trying to lower AIR to MIR.
@@ -759,7 +808,7 @@ fn genBlockType(ty: Type, target: std.Target) u8 {
/// Writes the bytecode depending on the given `WValue` in `val`
fn emitWValue(self: *Self, value: WValue) InnerError!void {
switch (value) {
- .none => {}, // no-op
+ .none, .stack => {}, // no-op
.local => |idx| try self.addLabel(.local_get, idx),
.imm32 => |val| try self.addImm32(@bitCast(i32, val)),
.imm64 => |val| try self.addImm64(val),
@@ -781,9 +830,30 @@ fn emitWValue(self: *Self, value: WValue) InnerError!void {
/// Creates one locals for a given `Type`.
/// Returns a corresponding `Wvalue` with `local` as active tag
fn allocLocal(self: *Self, ty: Type) InnerError!WValue {
+ const valtype = typeToValtype(ty, self.target);
+ switch (valtype) {
+ .i32 => if (self.free_locals_i32.popOrNull()) |index| {
+ return WValue{ .local = index };
+ },
+ .i64 => if (self.free_locals_i64.popOrNull()) |index| {
+ return WValue{ .local = index };
+ },
+ .f32 => if (self.free_locals_f32.popOrNull()) |index| {
+ return WValue{ .local = index };
+ },
+ .f64 => if (self.free_locals_f64.popOrNull()) |index| {
+ return WValue{ .local = index };
+ },
+ }
+ // no local was free to be re-used, so allocate a new local instead
+ return self.ensureAllocLocal(ty);
+}
+
+/// Ensures a new local will be created. This is useful when it's useful
+/// to use a zero-initialized local.
+fn ensureAllocLocal(self: *Self, ty: Type) InnerError!WValue {
+ try self.locals.append(self.gpa, genValtype(ty, self.target));
const initial_index = self.local_index;
- const valtype = genValtype(ty, self.target);
- try self.locals.append(self.gpa, valtype);
self.local_index += 1;
return WValue{ .local = initial_index };
}
@@ -1135,9 +1205,9 @@ fn initializeStack(self: *Self) !void {
// Reserve a local to store the current stack pointer
// We can later use this local to set the stack pointer back to the value
// we have stored here.
- self.initial_stack_value = try self.allocLocal(Type.usize);
+ self.initial_stack_value = try self.ensureAllocLocal(Type.usize);
// Also reserve a local to store the bottom stack value
- self.bottom_stack_value = try self.allocLocal(Type.usize);
+ self.bottom_stack_value = try self.ensureAllocLocal(Type.usize);
}
/// Reads the stack pointer from `Context.initial_stack_value` and writes it
@@ -1268,7 +1338,9 @@ fn memcpy(self: *Self, dst: WValue, src: WValue, len: WValue) !void {
else => {
// TODO: We should probably lower this to a call to compiler_rt
// But for now, we implement it manually
- const offset = try self.allocLocal(Type.usize); // local for counter
+ var offset = try self.ensureAllocLocal(Type.usize); // local for counter
+ defer offset.free(self);
+
// outer block to jump to when loop is done
try self.startBlock(.block, wasm.block_empty);
try self.startBlock(.loop, wasm.block_empty);
@@ -1405,7 +1477,7 @@ fn buildPointerOffset(self: *Self, ptr_value: WValue, offset: u64, action: enum
// do not perform arithmetic when offset is 0.
if (offset == 0 and ptr_value.offset() == 0 and action == .modify) return ptr_value;
const result_ptr: WValue = switch (action) {
- .new => try self.allocLocal(Type.usize),
+ .new => try self.ensureAllocLocal(Type.usize),
.modify => ptr_value,
};
try self.emitWValue(ptr_value);
@@ -1622,6 +1694,7 @@ fn genInst(self: *Self, inst: Air.Inst.Index) !WValue {
.err_return_trace,
.set_err_return_trace,
.is_named_enum_value,
+ .error_set_has_value,
=> |tag| return self.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}),
.add_optimized,
@@ -1653,7 +1726,10 @@ fn genInst(self: *Self, inst: Air.Inst.Index) !WValue {
fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
for (body) |inst| {
const result = try self.genInst(inst);
- try self.values.putNoClobber(self.gpa, Air.indexToRef(inst), result);
+ if (result != .none) {
+ assert(result != .stack); // not allowed to store stack values as we cannot keep track of where they are on the stack
+ try self.values.putNoClobber(self.gpa, Air.indexToRef(inst), result);
+ }
}
}
@@ -1727,8 +1803,8 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const fn_info = self.decl.ty.fnInfo();
if (!firstParamSRet(fn_info.cc, fn_info.return_type, self.target)) {
- const result = try self.load(operand, ret_ty, 0);
- try self.emitWValue(result);
+ // leave on the stack
+ _ = try self.load(operand, ret_ty, 0);
}
try self.restoreStackPointer();
@@ -1847,6 +1923,7 @@ fn airStore(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void {
+ assert(!(lhs != .stack and rhs == .stack));
switch (ty.zigTypeTag()) {
.ErrorUnion => {
const pl_ty = ty.errorUnionPayload();
@@ -1880,20 +1957,26 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
.Pointer => {
if (ty.isSlice()) {
// store pointer first
+ // lower it to the stack so we do not have to store rhs into a local first
+ try self.emitWValue(lhs);
const ptr_local = try self.load(rhs, Type.usize, 0);
- try self.store(lhs, ptr_local, Type.usize, 0);
+ try self.store(.{ .stack = {} }, ptr_local, Type.usize, 0 + lhs.offset());
// retrieve length from rhs, and store that alongside lhs as well
+ try self.emitWValue(lhs);
const len_local = try self.load(rhs, Type.usize, self.ptrSize());
- try self.store(lhs, len_local, Type.usize, self.ptrSize());
+ try self.store(.{ .stack = {} }, len_local, Type.usize, self.ptrSize() + lhs.offset());
return;
}
},
.Int => if (ty.intInfo(self.target).bits > 64) {
+ try self.emitWValue(lhs);
const lsb = try self.load(rhs, Type.u64, 0);
+ try self.store(.{ .stack = {} }, lsb, Type.u64, 0 + lhs.offset());
+
+ try self.emitWValue(lhs);
const msb = try self.load(rhs, Type.u64, 8);
- try self.store(lhs, lsb, Type.u64, 0);
- try self.store(lhs, msb, Type.u64, 8);
+ try self.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset());
return;
},
else => {},
@@ -1932,9 +2015,12 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
return new_local;
}
- return self.load(operand, ty, 0);
+ const stack_loaded = try self.load(operand, ty, 0);
+ return stack_loaded.toLocal(self, ty);
}
+/// Loads an operand from the linear memory section.
+/// NOTE: Leaves the value on the stack.
fn load(self: *Self, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
// load local's value from memory by its stack position
try self.emitWValue(operand);
@@ -1952,10 +2038,7 @@ fn load(self: *Self, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
.{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(self.target) },
);
- // store the result in a local
- const result = try self.allocLocal(ty);
- try self.addLabel(.local_set, result.local);
- return result;
+ return WValue{ .stack = {} };
}
fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -2025,10 +2108,14 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
const rhs = try self.resolveInst(bin_op.rhs);
const ty = self.air.typeOf(bin_op.lhs);
- return self.binOp(lhs, rhs, ty, op);
+ const stack_value = try self.binOp(lhs, rhs, ty, op);
+ return stack_value.toLocal(self, ty);
}
+/// Performs a binary operation on the given `WValue`'s
+/// NOTE: THis leaves the value on top of the stack.
fn binOp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
+ assert(!(lhs != .stack and rhs == .stack));
if (isByRef(ty, self.target)) {
if (ty.zigTypeTag() == .Int) {
return self.binOpBigInt(lhs, rhs, ty, op);
@@ -2054,24 +2141,18 @@ fn binOp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WVa
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
- // save the result in a temporary
- const bin_local = try self.allocLocal(ty);
- try self.addLabel(.local_set, bin_local.local);
- return bin_local;
+ return WValue{ .stack = {} };
}
+/// Performs a binary operation for 16-bit floats.
+/// NOTE: Leaves the result value on the stack
fn binOpFloat16(self: *Self, lhs: WValue, rhs: WValue, op: Op) InnerError!WValue {
- const ext_lhs = try self.fpext(lhs, Type.f16, Type.f32);
- const ext_rhs = try self.fpext(rhs, Type.f16, Type.f32);
-
const opcode: wasm.Opcode = buildOpcode(.{ .op = op, .valtype1 = .f32, .signedness = .unsigned });
- try self.emitWValue(ext_lhs);
- try self.emitWValue(ext_rhs);
+ _ = try self.fpext(lhs, Type.f16, Type.f32);
+ _ = try self.fpext(rhs, Type.f16, Type.f32);
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
- // re-use temporary local
- try self.addLabel(.local_set, ext_lhs.local);
- return self.fptrunc(ext_lhs, Type.f32, Type.f16);
+ return self.fptrunc(.{ .stack = {} }, Type.f32, Type.f16);
}
fn binOpBigInt(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
@@ -2084,13 +2165,16 @@ fn binOpBigInt(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerErr
}
const result = try self.allocStack(ty);
- const lhs_high_bit = try self.load(lhs, Type.u64, 0);
+ var lhs_high_bit = try (try self.load(lhs, Type.u64, 0)).toLocal(self, Type.u64);
+ defer lhs_high_bit.free(self);
+ var rhs_high_bit = try (try self.load(rhs, Type.u64, 0)).toLocal(self, Type.u64);
+ defer rhs_high_bit.free(self);
+ var high_op_res = try (try self.binOp(lhs_high_bit, rhs_high_bit, Type.u64, op)).toLocal(self, Type.u64);
+ defer high_op_res.free(self);
+
const lhs_low_bit = try self.load(lhs, Type.u64, 8);
- const rhs_high_bit = try self.load(rhs, Type.u64, 0);
const rhs_low_bit = try self.load(rhs, Type.u64, 8);
-
const low_op_res = try self.binOp(lhs_low_bit, rhs_low_bit, Type.u64, op);
- const high_op_res = try self.binOp(lhs_high_bit, rhs_high_bit, Type.u64, op);
const lt = if (op == .add) blk: {
break :blk try self.cmp(high_op_res, rhs_high_bit, Type.u64, .lt);
@@ -2098,7 +2182,8 @@ fn binOpBigInt(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerErr
break :blk try self.cmp(lhs_high_bit, rhs_high_bit, Type.u64, .lt);
} else unreachable;
const tmp = try self.intcast(lt, Type.u32, Type.u64);
- const tmp_op = try self.binOp(low_op_res, tmp, Type.u64, op);
+ var tmp_op = try (try self.binOp(low_op_res, tmp, Type.u64, op)).toLocal(self, Type.u64);
+ defer tmp_op.free(self);
try self.store(result, high_op_res, Type.u64, 0);
try self.store(result, tmp_op, Type.u64, 8);
@@ -2115,40 +2200,22 @@ fn airWrapBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
return self.fail("TODO: Implement wrapping arithmetic for vectors", .{});
}
- return self.wrapBinOp(lhs, rhs, ty, op);
+ return (try self.wrapBinOp(lhs, rhs, ty, op)).toLocal(self, ty);
}
+/// Performs a wrapping binary operation.
+/// Asserts rhs is not a stack value when lhs also isn't.
+/// NOTE: Leaves the result on the stack when its Type is <= 64 bits
fn wrapBinOp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
- const bit_size = ty.intInfo(self.target).bits;
- var wasm_bits = toWasmBits(bit_size) orelse {
- return self.fail("TODO: Implement wrapping arithmetic for integers with bitsize: {d}\n", .{bit_size});
- };
-
- if (wasm_bits == 128) {
- const bin_op = try self.binOpBigInt(lhs, rhs, ty, op);
- return self.wrapOperand(bin_op, ty);
- }
-
- const opcode: wasm.Opcode = buildOpcode(.{
- .op = op,
- .valtype1 = typeToValtype(ty, self.target),
- .signedness = if (ty.isSignedInt()) .signed else .unsigned,
- });
-
- try self.emitWValue(lhs);
- try self.emitWValue(rhs);
- try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
- const bin_local = try self.allocLocal(ty);
- try self.addLabel(.local_set, bin_local.local);
-
+ const bin_local = try self.binOp(lhs, rhs, ty, op);
return self.wrapOperand(bin_local, ty);
}
/// Wraps an operand based on a given type's bitsize.
/// Asserts `Type` is <= 128 bits.
+/// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack.
fn wrapOperand(self: *Self, operand: WValue, ty: Type) InnerError!WValue {
assert(ty.abiSize(self.target) <= 16);
- const result_local = try self.allocLocal(ty);
const bitsize = ty.intInfo(self.target).bits;
const wasm_bits = toWasmBits(bitsize) orelse {
return self.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{bitsize});
@@ -2157,14 +2224,15 @@ fn wrapOperand(self: *Self, operand: WValue, ty: Type) InnerError!WValue {
if (wasm_bits == bitsize) return operand;
if (wasm_bits == 128) {
- const msb = try self.load(operand, Type.u64, 0);
+ assert(operand != .stack);
const lsb = try self.load(operand, Type.u64, 8);
const result_ptr = try self.allocStack(ty);
- try self.store(result_ptr, lsb, Type.u64, 8);
+ try self.emitWValue(result_ptr);
+ try self.store(.{ .stack = {} }, lsb, Type.u64, 8 + result_ptr.offset());
const result = (@as(u64, 1) << @intCast(u6, 64 - (wasm_bits - bitsize))) - 1;
try self.emitWValue(result_ptr);
- try self.emitWValue(msb);
+ _ = try self.load(operand, Type.u64, 0);
try self.addImm64(result);
try self.addTag(.i64_and);
try self.addMemArg(.i64_store, .{ .offset = result_ptr.offset(), .alignment = 8 });
@@ -2181,8 +2249,7 @@ fn wrapOperand(self: *Self, operand: WValue, ty: Type) InnerError!WValue {
try self.addTag(.i64_and);
} else unreachable;
- try self.addLabel(.local_set, result_local.local);
- return result_local;
+ return WValue{ .stack = {} };
}
fn lowerParentPtr(self: *Self, ptr_val: Value, ptr_child_ty: Type) InnerError!WValue {
@@ -2594,10 +2661,14 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: std.math.CompareOperator) Inner
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const operand_ty = self.air.typeOf(bin_op.lhs);
- return self.cmp(lhs, rhs, operand_ty, op);
+ return (try self.cmp(lhs, rhs, operand_ty, op)).toLocal(self, Type.u32); // comparison result is always 32 bits
}
+/// Compares two operands.
+/// Asserts rhs is not a stack value when the lhs isn't a stack value either
+/// NOTE: This leaves the result on top of the stack, rather than a new local.
fn cmp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue {
+ assert(!(lhs != .stack and rhs == .stack));
if (ty.zigTypeTag() == .Optional and !ty.optionalReprIsPayload()) {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&buf);
@@ -2639,15 +2710,12 @@ fn cmp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOper
});
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
- const cmp_tmp = try self.allocLocal(Type.initTag(.i32)); // bool is always i32
- try self.addLabel(.local_set, cmp_tmp.local);
- return cmp_tmp;
+ return WValue{ .stack = {} };
}
+/// Compares 16-bit floats
+/// NOTE: The result value remains on top of the stack.
fn cmpFloat16(self: *Self, lhs: WValue, rhs: WValue, op: std.math.CompareOperator) InnerError!WValue {
- const ext_lhs = try self.fpext(lhs, Type.f16, Type.f32);
- const ext_rhs = try self.fpext(rhs, Type.f16, Type.f32);
-
const opcode: wasm.Opcode = buildOpcode(.{
.op = switch (op) {
.lt => .lt,
@@ -2660,13 +2728,11 @@ fn cmpFloat16(self: *Self, lhs: WValue, rhs: WValue, op: std.math.CompareOperato
.valtype1 = .f32,
.signedness = .unsigned,
});
- try self.emitWValue(ext_lhs);
- try self.emitWValue(ext_rhs);
+ _ = try self.fpext(lhs, Type.f16, Type.f32);
+ _ = try self.fpext(rhs, Type.f16, Type.f32);
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
- const result = try self.allocLocal(Type.initTag(.i32)); // bool is always i32
- try self.addLabel(.local_set, result.local);
- return result;
+ return WValue{ .stack = {} };
}
fn airCmpVector(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -2727,21 +2793,23 @@ fn airNot(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
switch (wasm_bits) {
32 => {
const bin_op = try self.binOp(operand, .{ .imm32 = ~@as(u32, 0) }, operand_ty, .xor);
- return self.wrapOperand(bin_op, operand_ty);
+ return (try self.wrapOperand(bin_op, operand_ty)).toLocal(self, operand_ty);
},
64 => {
const bin_op = try self.binOp(operand, .{ .imm64 = ~@as(u64, 0) }, operand_ty, .xor);
- return self.wrapOperand(bin_op, operand_ty);
+ return (try self.wrapOperand(bin_op, operand_ty)).toLocal(self, operand_ty);
},
128 => {
const result_ptr = try self.allocStack(operand_ty);
+ try self.emitWValue(result_ptr);
const msb = try self.load(operand, Type.u64, 0);
- const lsb = try self.load(operand, Type.u64, 8);
-
const msb_xor = try self.binOp(msb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor);
+ try self.store(.{ .stack = {} }, msb_xor, Type.u64, 0 + result_ptr.offset());
+
+ try self.emitWValue(result_ptr);
+ const lsb = try self.load(operand, Type.u64, 8);
const lsb_xor = try self.binOp(lsb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor);
- try self.store(result_ptr, msb_xor, Type.u64, 0);
- try self.store(result_ptr, lsb_xor, Type.u64, 8);
+ try self.store(result_ptr, lsb_xor, Type.u64, 8 + result_ptr.offset());
return result_ptr;
},
else => unreachable,
@@ -2829,7 +2897,8 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
}
- return self.load(operand, field_ty, offset);
+ const field = try self.load(operand, field_ty, offset);
+ return field.toLocal(self, field_ty);
}
fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3039,7 +3108,9 @@ fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool)
if (op_is_ptr or isByRef(payload_ty, self.target)) {
return self.buildPointerOffset(operand, pl_offset, .new);
}
- return self.load(operand, payload_ty, pl_offset);
+
+ const payload = try self.load(operand, payload_ty, pl_offset);
+ return payload.toLocal(self, payload_ty);
}
fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!WValue {
@@ -3059,7 +3130,8 @@ fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) In
return operand;
}
- return self.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, self.target)));
+ const error_val = try self.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, self.target)));
+ return error_val.toLocal(self, Type.anyerror);
}
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3125,12 +3197,13 @@ fn airIntcast(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
return self.fail("todo Wasm intcast for bitsize > 128", .{});
}
- return self.intcast(operand, operand_ty, ty);
+ return (try self.intcast(operand, operand_ty, ty)).toLocal(self, ty);
}
/// Upcasts or downcasts an integer based on the given and wanted types,
/// and stores the result in a new operand.
/// Asserts type's bitsize <= 128
+/// NOTE: May leave the result on the top of the stack.
fn intcast(self: *Self, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
const given_info = given.intInfo(self.target);
const wanted_info = wanted.intInfo(self.target);
@@ -3153,25 +3226,22 @@ fn intcast(self: *Self, operand: WValue, given: Type, wanted: Type) InnerError!W
} else if (wanted_bits == 128) {
// for 128bit integers we store the integer in the virtual stack, rather than a local
const stack_ptr = try self.allocStack(wanted);
+ try self.emitWValue(stack_ptr);
// for 32 bit integers, we first coerce the value into a 64 bit integer before storing it
// meaning less store operations are required.
const lhs = if (op_bits == 32) blk: {
- const tmp = try self.intcast(
- operand,
- given,
- if (wanted.isSignedInt()) Type.i64 else Type.u64,
- );
- break :blk tmp;
+ break :blk try self.intcast(operand, given, if (wanted.isSignedInt()) Type.i64 else Type.u64);
} else operand;
// store msb first
- try self.store(stack_ptr, lhs, Type.u64, 0);
+ try self.store(.{ .stack = {} }, lhs, Type.u64, 0 + stack_ptr.offset());
// For signed integers we shift msb by 63 (64bit integer - 1 sign bit) and store remaining value
if (wanted.isSignedInt()) {
+ try self.emitWValue(stack_ptr);
const shr = try self.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr);
- try self.store(stack_ptr, shr, Type.u64, 8);
+ try self.store(.{ .stack = {} }, shr, Type.u64, 8 + stack_ptr.offset());
} else {
// Ensure memory of lsb is zero'd
try self.store(stack_ptr, .{ .imm64 = 0 }, Type.u64, 8);
@@ -3179,9 +3249,7 @@ fn intcast(self: *Self, operand: WValue, given: Type, wanted: Type) InnerError!W
return stack_ptr;
} else return self.load(operand, wanted, 0);
- const result = try self.allocLocal(wanted);
- try self.addLabel(.local_set, result.local);
- return result;
+ return WValue{ .stack = {} };
}
fn airIsNull(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!WValue {
@@ -3190,9 +3258,12 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: en
const op_ty = self.air.typeOf(un_op);
const optional_ty = if (op_kind == .ptr) op_ty.childType() else op_ty;
- return self.isNull(operand, optional_ty, opcode);
+ const is_null = try self.isNull(operand, optional_ty, opcode);
+ return is_null.toLocal(self, optional_ty);
}
+/// For a given type and operand, checks if it's considered `null`.
+/// NOTE: Leaves the result on the stack
fn isNull(self: *Self, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue {
try self.emitWValue(operand);
if (!optional_ty.optionalReprIsPayload()) {
@@ -3209,9 +3280,7 @@ fn isNull(self: *Self, operand: WValue, optional_ty: Type, opcode: wasm.Opcode)
try self.addImm32(0);
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
- const is_null_tmp = try self.allocLocal(Type.initTag(.i32));
- try self.addLabel(.local_set, is_null_tmp.local);
- return is_null_tmp;
+ return WValue{ .stack = {} };
}
fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3229,7 +3298,8 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
return self.buildPointerOffset(operand, offset, .new);
}
- return self.load(operand, payload_ty, @intCast(u32, offset));
+ const payload = try self.load(operand, payload_ty, @intCast(u32, offset));
+ return payload.toLocal(self, payload_ty);
}
fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3332,7 +3402,8 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- return self.load(operand, Type.usize, self.ptrSize());
+ const len = try self.load(operand, Type.usize, self.ptrSize());
+ return len.toLocal(self, Type.usize);
}
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3346,8 +3417,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const elem_size = elem_ty.abiSize(self.target);
// load pointer onto stack
- const slice_ptr = try self.load(slice, Type.usize, 0);
- try self.addLabel(.local_get, slice_ptr.local);
+ _ = try self.load(slice, Type.usize, 0);
// calculate index into slice
try self.emitWValue(index);
@@ -3361,7 +3431,9 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (isByRef(elem_ty, self.target)) {
return result;
}
- return self.load(result, elem_ty, 0);
+
+ const elem_val = try self.load(result, elem_ty, 0);
+ return elem_val.toLocal(self, elem_ty);
}
fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3374,8 +3446,7 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const slice = try self.resolveInst(bin_op.lhs);
const index = try self.resolveInst(bin_op.rhs);
- const slice_ptr = try self.load(slice, Type.usize, 0);
- try self.addLabel(.local_get, slice_ptr.local);
+ _ = try self.load(slice, Type.usize, 0);
// calculate index into slice
try self.emitWValue(index);
@@ -3383,7 +3454,7 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addTag(.i32_mul);
try self.addTag(.i32_add);
- const result = try self.allocLocal(Type.initTag(.i32));
+ const result = try self.allocLocal(Type.i32);
try self.addLabel(.local_set, result.local);
return result;
}
@@ -3392,7 +3463,8 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- return self.load(operand, Type.usize, 0);
+ const ptr = try self.load(operand, Type.usize, 0);
+ return ptr.toLocal(self, Type.usize);
}
fn airTrunc(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3407,13 +3479,13 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
return self.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{int_info.bits});
}
- const result = try self.intcast(operand, op_ty, wanted_ty);
+ var result = try self.intcast(operand, op_ty, wanted_ty);
const wanted_bits = wanted_ty.intInfo(self.target).bits;
const wasm_bits = toWasmBits(wanted_bits).?;
if (wasm_bits != wanted_bits) {
- return self.wrapOperand(result, wanted_ty);
+ result = try self.wrapOperand(result, wanted_ty);
}
- return result;
+ return result.toLocal(self, wanted_ty);
}
fn airBoolToInt(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3466,8 +3538,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
// load pointer onto the stack
if (ptr_ty.isSlice()) {
- const ptr_local = try self.load(ptr, Type.usize, 0);
- try self.addLabel(.local_get, ptr_local.local);
+ _ = try self.load(ptr, Type.usize, 0);
} else {
try self.lowerToStack(ptr);
}
@@ -3478,12 +3549,15 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addTag(.i32_mul);
try self.addTag(.i32_add);
- const result = try self.allocLocal(elem_ty);
+ var result = try self.allocLocal(elem_ty);
try self.addLabel(.local_set, result.local);
if (isByRef(elem_ty, self.target)) {
return result;
}
- return self.load(result, elem_ty, 0);
+ defer result.free(self); // only free if it's not returned like above
+
+ const elem_val = try self.load(result, elem_ty, 0);
+ return elem_val.toLocal(self, elem_ty);
}
fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3499,8 +3573,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
// load pointer onto the stack
if (ptr_ty.isSlice()) {
- const ptr_local = try self.load(ptr, Type.usize, 0);
- try self.addLabel(.local_get, ptr_local.local);
+ _ = try self.load(ptr, Type.usize, 0);
} else {
try self.lowerToStack(ptr);
}
@@ -3511,7 +3584,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addTag(.i32_mul);
try self.addTag(.i32_add);
- const result = try self.allocLocal(Type.initTag(.i32));
+ const result = try self.allocLocal(Type.i32);
try self.addLabel(.local_set, result.local);
return result;
}
@@ -3599,7 +3672,7 @@ fn memset(self: *Self, ptr: WValue, len: WValue, value: WValue) InnerError!void
else => {
// TODO: We should probably lower this to a call to compiler_rt
// But for now, we implement it manually
- const offset = try self.allocLocal(Type.usize); // local for counter
+ const offset = try self.ensureAllocLocal(Type.usize); // local for counter
// outer block to jump to when loop is done
try self.startBlock(.block, wasm.block_empty);
try self.startBlock(.loop, wasm.block_empty);
@@ -3656,13 +3729,16 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addTag(.i32_mul);
try self.addTag(.i32_add);
- const result = try self.allocLocal(Type.usize);
+ var result = try self.allocLocal(Type.usize);
try self.addLabel(.local_set, result.local);
if (isByRef(elem_ty, self.target)) {
return result;
}
- return self.load(result, elem_ty, 0);
+ defer result.free(self); // only free if no longer needed and not returned like above
+
+ const elem_val = try self.load(result, elem_ty, 0);
+ return elem_val.toLocal(self, elem_ty);
}
fn airFloatToInt(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3685,11 +3761,8 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
.signedness = if (dest_ty.isSignedInt()) .signed else .unsigned,
});
try self.addTag(Mir.Inst.Tag.fromOpcode(op));
-
- const result = try self.allocLocal(dest_ty);
- try self.addLabel(.local_set, result.local);
-
- return self.wrapOperand(result, dest_ty);
+ const wrapped = try self.wrapOperand(.{ .stack = {} }, dest_ty);
+ return wrapped.toLocal(self, dest_ty);
}
fn airIntToFloat(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3887,24 +3960,19 @@ fn cmpOptionals(self: *Self, lhs: WValue, rhs: WValue, operand_ty: Type, op: std
const payload_ty = operand_ty.optionalChild(&buf);
const offset = @intCast(u32, operand_ty.abiSize(self.target) - payload_ty.abiSize(self.target));
- const lhs_is_null = try self.isNull(lhs, operand_ty, .i32_eq);
- const rhs_is_null = try self.isNull(rhs, operand_ty, .i32_eq);
-
// We store the final result in here that will be validated
// if the optional is truly equal.
- const result = try self.allocLocal(Type.initTag(.i32));
+ var result = try self.ensureAllocLocal(Type.initTag(.i32));
+ defer result.free(self);
try self.startBlock(.block, wasm.block_empty);
- try self.emitWValue(lhs_is_null);
- try self.emitWValue(rhs_is_null);
+ _ = try self.isNull(lhs, operand_ty, .i32_eq);
+ _ = try self.isNull(rhs, operand_ty, .i32_eq);
try self.addTag(.i32_ne); // inverse so we can exit early
try self.addLabel(.br_if, 0);
- const lhs_pl = try self.load(lhs, payload_ty, offset);
- const rhs_pl = try self.load(rhs, payload_ty, offset);
-
- try self.emitWValue(lhs_pl);
- try self.emitWValue(rhs_pl);
+ _ = try self.load(lhs, payload_ty, offset);
+ _ = try self.load(rhs, payload_ty, offset);
const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, self.target) });
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
try self.addLabel(.br_if, 0);
@@ -3916,26 +3984,29 @@ fn cmpOptionals(self: *Self, lhs: WValue, rhs: WValue, operand_ty: Type, op: std
try self.emitWValue(result);
try self.addImm32(0);
try self.addTag(if (op == .eq) .i32_ne else .i32_eq);
- try self.addLabel(.local_set, result.local);
- return result;
+ return WValue{ .stack = {} };
}
/// Compares big integers by checking both its high bits and low bits.
+/// NOTE: Leaves the result of the comparison on top of the stack.
/// TODO: Lower this to compiler_rt call when bitsize > 128
fn cmpBigInt(self: *Self, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
assert(operand_ty.abiSize(self.target) >= 16);
+ assert(!(lhs != .stack and rhs == .stack));
if (operand_ty.intInfo(self.target).bits > 128) {
return self.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.intInfo(self.target).bits});
}
- const lhs_high_bit = try self.load(lhs, Type.u64, 0);
- const lhs_low_bit = try self.load(lhs, Type.u64, 8);
- const rhs_high_bit = try self.load(rhs, Type.u64, 0);
- const rhs_low_bit = try self.load(rhs, Type.u64, 8);
+ var lhs_high_bit = try (try self.load(lhs, Type.u64, 0)).toLocal(self, Type.u64);
+ defer lhs_high_bit.free(self);
+ var rhs_high_bit = try (try self.load(rhs, Type.u64, 0)).toLocal(self, Type.u64);
+ defer rhs_high_bit.free(self);
switch (op) {
.eq, .neq => {
const xor_high = try self.binOp(lhs_high_bit, rhs_high_bit, Type.u64, .xor);
+ const lhs_low_bit = try self.load(lhs, Type.u64, 8);
+ const rhs_low_bit = try self.load(rhs, Type.u64, 8);
const xor_low = try self.binOp(lhs_low_bit, rhs_low_bit, Type.u64, .xor);
const or_result = try self.binOp(xor_high, xor_low, Type.u64, .@"or");
@@ -3947,20 +4018,17 @@ fn cmpBigInt(self: *Self, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.ma
},
else => {
const ty = if (operand_ty.isSignedInt()) Type.i64 else Type.u64;
- const high_bit_eql = try self.cmp(lhs_high_bit, rhs_high_bit, ty, .eq);
- const high_bit_cmp = try self.cmp(lhs_high_bit, rhs_high_bit, ty, op);
- const low_bit_cmp = try self.cmp(lhs_low_bit, rhs_low_bit, ty, op);
-
- try self.emitWValue(low_bit_cmp);
- try self.emitWValue(high_bit_cmp);
- try self.emitWValue(high_bit_eql);
+ // leave those value on top of the stack for '.select'
+ const lhs_low_bit = try self.load(lhs, Type.u64, 8);
+ const rhs_low_bit = try self.load(rhs, Type.u64, 8);
+ _ = try self.cmp(lhs_low_bit, rhs_low_bit, ty, op);
+ _ = try self.cmp(lhs_high_bit, rhs_high_bit, ty, op);
+ _ = try self.cmp(lhs_high_bit, rhs_high_bit, ty, .eq);
try self.addTag(.select);
},
}
- const result = try self.allocLocal(Type.initTag(.i32));
- try self.addLabel(.local_set, result.local);
- return result;
+ return WValue{ .stack = {} };
}
fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -4000,7 +4068,8 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const offset = if (layout.tag_align < layout.payload_align) blk: {
break :blk @intCast(u32, layout.payload_size);
} else @as(u32, 0);
- return self.load(operand, tag_ty, offset);
+ const tag = try self.load(operand, tag_ty, offset);
+ return tag.toLocal(self, tag_ty);
}
fn airFpext(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -4010,19 +4079,20 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const dest_ty = self.air.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
- return self.fpext(operand, self.air.typeOf(ty_op.operand), dest_ty);
+ const extended = try self.fpext(operand, self.air.typeOf(ty_op.operand), dest_ty);
+ return extended.toLocal(self, dest_ty);
}
+/// Extends a float from a given `Type` to a larger wanted `Type`
+/// NOTE: Leaves the result on the stack
fn fpext(self: *Self, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
const given_bits = given.floatBits(self.target);
const wanted_bits = wanted.floatBits(self.target);
if (wanted_bits == 64 and given_bits == 32) {
- const result = try self.allocLocal(wanted);
try self.emitWValue(operand);
try self.addTag(.f64_promote_f32);
- try self.addLabel(.local_set, result.local);
- return result;
+ return WValue{ .stack = {} };
} else if (given_bits == 16) {
// call __extendhfsf2(f16) f32
const f32_result = try self.callIntrinsic(
@@ -4036,11 +4106,9 @@ fn fpext(self: *Self, operand: WValue, given: Type, wanted: Type) InnerError!WVa
return f32_result;
}
if (wanted_bits == 64) {
- const result = try self.allocLocal(wanted);
try self.emitWValue(f32_result);
try self.addTag(.f64_promote_f32);
- try self.addLabel(.local_set, result.local);
- return result;
+ return WValue{ .stack = {} };
}
return self.fail("TODO: Implement 'fpext' for floats with bitsize: {d}", .{wanted_bits});
} else {
@@ -4055,26 +4123,25 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const dest_ty = self.air.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
- return self.fptrunc(operand, self.air.typeOf(ty_op.operand), dest_ty);
+ const trunc = try self.fptrunc(operand, self.air.typeOf(ty_op.operand), dest_ty);
+ return trunc.toLocal(self, dest_ty);
}
+/// Truncates a float from a given `Type` to its wanted `Type`
+/// NOTE: The result value remains on the stack
fn fptrunc(self: *Self, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
const given_bits = given.floatBits(self.target);
const wanted_bits = wanted.floatBits(self.target);
if (wanted_bits == 32 and given_bits == 64) {
- const result = try self.allocLocal(wanted);
try self.emitWValue(operand);
try self.addTag(.f32_demote_f64);
- try self.addLabel(.local_set, result.local);
- return result;
+ return WValue{ .stack = {} };
} else if (wanted_bits == 16) {
const op: WValue = if (given_bits == 64) blk: {
- const tmp = try self.allocLocal(Type.f32);
try self.emitWValue(operand);
try self.addTag(.f32_demote_f64);
- try self.addLabel(.local_set, tmp.local);
- break :blk tmp;
+ break :blk WValue{ .stack = {} };
} else operand;
// call __truncsfhf2(f32) f16
@@ -4159,12 +4226,9 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
switch (wasm_bits) {
128 => {
- const msb = try self.load(operand, Type.u64, 0);
- const lsb = try self.load(operand, Type.u64, 8);
-
- try self.emitWValue(msb);
+ _ = try self.load(operand, Type.u64, 0);
try self.addTag(.i64_popcnt);
- try self.emitWValue(lsb);
+ _ = try self.load(operand, Type.u64, 8);
try self.addTag(.i64_popcnt);
try self.addTag(.i64_add);
try self.addTag(.i32_wrap_i64);
@@ -4268,24 +4332,26 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!W
// for signed integers, we first apply signed shifts by the difference in bits
// to get the signed value, as we store it internally as 2's complement.
- const lhs = if (wasm_bits != int_info.bits and is_signed) blk: {
- break :blk try self.signAbsValue(lhs_op, lhs_ty);
+ var lhs = if (wasm_bits != int_info.bits and is_signed) blk: {
+ break :blk try (try self.signAbsValue(lhs_op, lhs_ty)).toLocal(self, lhs_ty);
} else lhs_op;
- const rhs = if (wasm_bits != int_info.bits and is_signed) blk: {
- break :blk try self.signAbsValue(rhs_op, lhs_ty);
+ var rhs = if (wasm_bits != int_info.bits and is_signed) blk: {
+ break :blk try (try self.signAbsValue(rhs_op, lhs_ty)).toLocal(self, lhs_ty);
} else rhs_op;
- const bin_op = try self.binOp(lhs, rhs, lhs_ty, op);
- const result = if (wasm_bits != int_info.bits) blk: {
- break :blk try self.wrapOperand(bin_op, lhs_ty);
+ var bin_op = try (try self.binOp(lhs, rhs, lhs_ty, op)).toLocal(self, lhs_ty);
+ defer bin_op.free(self);
+ var result = if (wasm_bits != int_info.bits) blk: {
+ break :blk try (try self.wrapOperand(bin_op, lhs_ty)).toLocal(self, lhs_ty);
} else bin_op;
+ defer result.free(self); // no-op when wasm_bits == int_info.bits
const cmp_op: std.math.CompareOperator = if (op == .sub) .gt else .lt;
const overflow_bit: WValue = if (is_signed) blk: {
if (wasm_bits == int_info.bits) {
const cmp_zero = try self.cmp(rhs, zero, lhs_ty, cmp_op);
const lt = try self.cmp(bin_op, lhs, lhs_ty, .lt);
- break :blk try self.binOp(cmp_zero, lt, Type.u32, .xor); // result of cmp_zero and lt is always 32bit
+ break :blk try self.binOp(cmp_zero, lt, Type.u32, .xor);
}
const abs = try self.signAbsValue(bin_op, lhs_ty);
break :blk try self.cmp(abs, bin_op, lhs_ty, .neq);
@@ -4293,11 +4359,22 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!W
try self.cmp(bin_op, lhs, lhs_ty, cmp_op)
else
try self.cmp(bin_op, result, lhs_ty, .neq);
+ var overflow_local = try overflow_bit.toLocal(self, Type.u32);
+ defer overflow_local.free(self);
const result_ptr = try self.allocStack(self.air.typeOfIndex(inst));
try self.store(result_ptr, result, lhs_ty, 0);
const offset = @intCast(u32, lhs_ty.abiSize(self.target));
- try self.store(result_ptr, overflow_bit, Type.initTag(.u1), offset);
+ try self.store(result_ptr, overflow_local, Type.initTag(.u1), offset);
+
+ // in this case, we performed a signAbsValue which created a temporary local
+ // so let's free this so it can be re-used instead.
+ // In the other case we do not want to free it, because that would free the
+ // resolved instructions which may be referenced by other instructions.
+ if (wasm_bits != int_info.bits and is_signed) {
+ lhs.free(self);
+ rhs.free(self);
+ }
return result_ptr;
}
@@ -4310,52 +4387,58 @@ fn airAddSubWithOverflowBigInt(self: *Self, lhs: WValue, rhs: WValue, ty: Type,
return self.fail("TODO: Implement @{{add/sub}}WithOverflow for integer bitsize '{d}'", .{int_info.bits});
}
- const lhs_high_bit = try self.load(lhs, Type.u64, 0);
- const lhs_low_bit = try self.load(lhs, Type.u64, 8);
- const rhs_high_bit = try self.load(rhs, Type.u64, 0);
- const rhs_low_bit = try self.load(rhs, Type.u64, 8);
+ var lhs_high_bit = try (try self.load(lhs, Type.u64, 0)).toLocal(self, Type.u64);
+ defer lhs_high_bit.free(self);
+ var lhs_low_bit = try (try self.load(lhs, Type.u64, 8)).toLocal(self, Type.u64);
+ defer lhs_low_bit.free(self);
+ var rhs_high_bit = try (try self.load(rhs, Type.u64, 0)).toLocal(self, Type.u64);
+ defer rhs_high_bit.free(self);
+ var rhs_low_bit = try (try self.load(rhs, Type.u64, 8)).toLocal(self, Type.u64);
+ defer rhs_low_bit.free(self);
- const low_op_res = try self.binOp(lhs_low_bit, rhs_low_bit, Type.u64, op);
- const high_op_res = try self.binOp(lhs_high_bit, rhs_high_bit, Type.u64, op);
+ var low_op_res = try (try self.binOp(lhs_low_bit, rhs_low_bit, Type.u64, op)).toLocal(self, Type.u64);
+ defer low_op_res.free(self);
+ var high_op_res = try (try self.binOp(lhs_high_bit, rhs_high_bit, Type.u64, op)).toLocal(self, Type.u64);
+ defer high_op_res.free(self);
- const lt = if (op == .add) blk: {
- break :blk try self.cmp(high_op_res, lhs_high_bit, Type.u64, .lt);
+ var lt = if (op == .add) blk: {
+ break :blk try (try self.cmp(high_op_res, lhs_high_bit, Type.u64, .lt)).toLocal(self, Type.u32);
} else if (op == .sub) blk: {
- break :blk try self.cmp(lhs_high_bit, rhs_high_bit, Type.u64, .lt);
+ break :blk try (try self.cmp(lhs_high_bit, rhs_high_bit, Type.u64, .lt)).toLocal(self, Type.u32);
} else unreachable;
- const tmp = try self.intcast(lt, Type.u32, Type.u64);
- const tmp_op = try self.binOp(low_op_res, tmp, Type.u64, op);
+ defer lt.free(self);
+ var tmp = try (try self.intcast(lt, Type.u32, Type.u64)).toLocal(self, Type.u64);
+ defer tmp.free(self);
+ var tmp_op = try (try self.binOp(low_op_res, tmp, Type.u64, op)).toLocal(self, Type.u64);
+ defer tmp_op.free(self);
const overflow_bit = if (is_signed) blk: {
- const xor_op = try self.binOp(lhs_low_bit, tmp_op, Type.u64, .xor);
const xor_low = try self.binOp(lhs_low_bit, rhs_low_bit, Type.u64, .xor);
const to_wrap = if (op == .add) wrap: {
break :wrap try self.binOp(xor_low, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor);
} else xor_low;
+ const xor_op = try self.binOp(lhs_low_bit, tmp_op, Type.u64, .xor);
const wrap = try self.binOp(to_wrap, xor_op, Type.u64, .@"and");
break :blk try self.cmp(wrap, .{ .imm64 = 0 }, Type.i64, .lt); // i64 because signed
} else blk: {
- const eq = try self.cmp(tmp_op, lhs_low_bit, Type.u64, .eq);
- const op_eq = try self.cmp(tmp_op, lhs_low_bit, Type.u64, if (op == .add) .lt else .gt);
-
const first_arg = if (op == .sub) arg: {
break :arg try self.cmp(high_op_res, lhs_high_bit, Type.u64, .gt);
} else lt;
try self.emitWValue(first_arg);
- try self.emitWValue(op_eq);
- try self.emitWValue(eq);
+ _ = try self.cmp(tmp_op, lhs_low_bit, Type.u64, if (op == .add) .lt else .gt);
+ _ = try self.cmp(tmp_op, lhs_low_bit, Type.u64, .eq);
try self.addTag(.select);
- const overflow_bit = try self.allocLocal(Type.initTag(.u1));
- try self.addLabel(.local_set, overflow_bit.local);
- break :blk overflow_bit;
+ break :blk WValue{ .stack = {} };
};
+ var overflow_local = try overflow_bit.toLocal(self, Type.initTag(.u1));
+ defer overflow_local.free(self);
const result_ptr = try self.allocStack(result_ty);
try self.store(result_ptr, high_op_res, Type.u64, 0);
try self.store(result_ptr, tmp_op, Type.u64, 8);
- try self.store(result_ptr, overflow_bit, Type.initTag(.u1), 16);
+ try self.store(result_ptr, overflow_local, Type.initTag(.u1), 16);
return result_ptr;
}
@@ -4377,24 +4460,31 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
return self.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits});
};
- const shl = try self.binOp(lhs, rhs, lhs_ty, .shl);
- const result = if (wasm_bits != int_info.bits) blk: {
- break :blk try self.wrapOperand(shl, lhs_ty);
+ var shl = try (try self.binOp(lhs, rhs, lhs_ty, .shl)).toLocal(self, lhs_ty);
+ defer shl.free(self);
+ var result = if (wasm_bits != int_info.bits) blk: {
+ break :blk try (try self.wrapOperand(shl, lhs_ty)).toLocal(self, lhs_ty);
} else shl;
+ defer result.free(self); // it's a no-op to free the same local twice (when wasm_bits == int_info.bits)
const overflow_bit = if (wasm_bits != int_info.bits and is_signed) blk: {
+ // emit lhs to stack to we can keep 'wrapped' on the stack also
+ try self.emitWValue(lhs);
const abs = try self.signAbsValue(shl, lhs_ty);
const wrapped = try self.wrapBinOp(abs, rhs, lhs_ty, .shr);
- break :blk try self.cmp(lhs, wrapped, lhs_ty, .neq);
+ break :blk try self.cmp(.{ .stack = {} }, wrapped, lhs_ty, .neq);
} else blk: {
+ try self.emitWValue(lhs);
const shr = try self.binOp(result, rhs, lhs_ty, .shr);
- break :blk try self.cmp(lhs, shr, lhs_ty, .neq);
+ break :blk try self.cmp(.{ .stack = {} }, shr, lhs_ty, .neq);
};
+ var overflow_local = try overflow_bit.toLocal(self, Type.initTag(.u1));
+ defer overflow_local.free(self);
const result_ptr = try self.allocStack(self.air.typeOfIndex(inst));
try self.store(result_ptr, result, lhs_ty, 0);
const offset = @intCast(u32, lhs_ty.abiSize(self.target));
- try self.store(result_ptr, overflow_bit, Type.initTag(.u1), offset);
+ try self.store(result_ptr, overflow_local, Type.initTag(.u1), offset);
return result_ptr;
}
@@ -4412,7 +4502,9 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
// We store the bit if it's overflowed or not in this. As it's zero-initialized
// we only need to update it if an overflow (or underflow) occurred.
- const overflow_bit = try self.allocLocal(Type.initTag(.u1));
+ var overflow_bit = try self.ensureAllocLocal(Type.initTag(.u1));
+ defer overflow_bit.free(self);
+
const int_info = lhs_ty.intInfo(self.target);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return self.fail("TODO: Implement overflow arithmetic for integer bitsize: {d}", .{int_info.bits});
@@ -4433,49 +4525,49 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const new_ty = if (int_info.signedness == .signed) Type.i64 else Type.u64;
const lhs_upcast = try self.intcast(lhs, lhs_ty, new_ty);
const rhs_upcast = try self.intcast(rhs, lhs_ty, new_ty);
- const bin_op = try self.binOp(lhs_upcast, rhs_upcast, new_ty, .mul);
+ const bin_op = try (try self.binOp(lhs_upcast, rhs_upcast, new_ty, .mul)).toLocal(self, new_ty);
if (int_info.signedness == .unsigned) {
const shr = try self.binOp(bin_op, .{ .imm64 = int_info.bits }, new_ty, .shr);
const wrap = try self.intcast(shr, new_ty, lhs_ty);
- const cmp_res = try self.cmp(wrap, zero, lhs_ty, .neq);
- try self.emitWValue(cmp_res);
+ _ = try self.cmp(wrap, zero, lhs_ty, .neq);
try self.addLabel(.local_set, overflow_bit.local);
break :blk try self.intcast(bin_op, new_ty, lhs_ty);
} else {
- const down_cast = try self.intcast(bin_op, new_ty, lhs_ty);
- const shr = try self.binOp(down_cast, .{ .imm32 = int_info.bits - 1 }, lhs_ty, .shr);
+ const down_cast = try (try self.intcast(bin_op, new_ty, lhs_ty)).toLocal(self, lhs_ty);
+ var shr = try (try self.binOp(down_cast, .{ .imm32 = int_info.bits - 1 }, lhs_ty, .shr)).toLocal(self, lhs_ty);
+ defer shr.free(self);
const shr_res = try self.binOp(bin_op, .{ .imm64 = int_info.bits }, new_ty, .shr);
const down_shr_res = try self.intcast(shr_res, new_ty, lhs_ty);
- const cmp_res = try self.cmp(down_shr_res, shr, lhs_ty, .neq);
- try self.emitWValue(cmp_res);
+ _ = try self.cmp(down_shr_res, shr, lhs_ty, .neq);
try self.addLabel(.local_set, overflow_bit.local);
break :blk down_cast;
}
} else if (int_info.signedness == .signed) blk: {
const lhs_abs = try self.signAbsValue(lhs, lhs_ty);
const rhs_abs = try self.signAbsValue(rhs, lhs_ty);
- const bin_op = try self.binOp(lhs_abs, rhs_abs, lhs_ty, .mul);
+ const bin_op = try (try self.binOp(lhs_abs, rhs_abs, lhs_ty, .mul)).toLocal(self, lhs_ty);
const mul_abs = try self.signAbsValue(bin_op, lhs_ty);
- const cmp_op = try self.cmp(mul_abs, bin_op, lhs_ty, .neq);
- try self.emitWValue(cmp_op);
+ _ = try self.cmp(mul_abs, bin_op, lhs_ty, .neq);
try self.addLabel(.local_set, overflow_bit.local);
break :blk try self.wrapOperand(bin_op, lhs_ty);
} else blk: {
- const bin_op = try self.binOp(lhs, rhs, lhs_ty, .mul);
+ var bin_op = try (try self.binOp(lhs, rhs, lhs_ty, .mul)).toLocal(self, lhs_ty);
+ defer bin_op.free(self);
const shift_imm = if (wasm_bits == 32)
WValue{ .imm32 = int_info.bits }
else
WValue{ .imm64 = int_info.bits };
const shr = try self.binOp(bin_op, shift_imm, lhs_ty, .shr);
- const cmp_op = try self.cmp(shr, zero, lhs_ty, .neq);
- try self.emitWValue(cmp_op);
+ _ = try self.cmp(shr, zero, lhs_ty, .neq);
try self.addLabel(.local_set, overflow_bit.local);
break :blk try self.wrapOperand(bin_op, lhs_ty);
};
+ var bin_op_local = try bin_op.toLocal(self, lhs_ty);
+ defer bin_op_local.free(self);
const result_ptr = try self.allocStack(self.air.typeOfIndex(inst));
- try self.store(result_ptr, bin_op, lhs_ty, 0);
+ try self.store(result_ptr, bin_op_local, lhs_ty, 0);
const offset = @intCast(u32, lhs_ty.abiSize(self.target));
try self.store(result_ptr, overflow_bit, Type.initTag(.u1), offset);
@@ -4497,12 +4589,10 @@ fn airMaxMin(self: *Self, inst: Air.Inst.Index, op: enum { max, min }) InnerErro
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const cmp_result = try self.cmp(lhs, rhs, ty, if (op == .max) .gt else .lt);
-
// operands to select from
try self.lowerToStack(lhs);
try self.lowerToStack(rhs);
- try self.emitWValue(cmp_result);
+ _ = try self.cmp(lhs, rhs, ty, if (op == .max) .gt else .lt);
// based on the result from comparison, return operand 0 or 1.
try self.addTag(.select);
@@ -4528,21 +4618,22 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const rhs = try self.resolveInst(bin_op.rhs);
if (ty.floatBits(self.target) == 16) {
- const addend_ext = try self.fpext(addend, ty, Type.f32);
- const lhs_ext = try self.fpext(lhs, ty, Type.f32);
const rhs_ext = try self.fpext(rhs, ty, Type.f32);
+ const lhs_ext = try self.fpext(lhs, ty, Type.f32);
+ const addend_ext = try self.fpext(addend, ty, Type.f32);
// call to compiler-rt `fn fmaf(f32, f32, f32) f32`
- const result = try self.callIntrinsic(
+ var result = try self.callIntrinsic(
"fmaf",
&.{ Type.f32, Type.f32, Type.f32 },
Type.f32,
&.{ rhs_ext, lhs_ext, addend_ext },
);
- return try self.fptrunc(result, Type.f32, ty);
+ defer result.free(self);
+ return try (try self.fptrunc(result, Type.f32, ty)).toLocal(self, ty);
}
const mul_result = try self.binOp(lhs, rhs, ty, .mul);
- return self.binOp(mul_result, addend, ty, .add);
+ return (try self.binOp(mul_result, addend, ty, .add)).toLocal(self, ty);
}
fn airClz(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -4571,17 +4662,16 @@ fn airClz(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addTag(.i32_wrap_i64);
},
128 => {
- const msb = try self.load(operand, Type.u64, 0);
- const lsb = try self.load(operand, Type.u64, 8);
- const neq = try self.cmp(lsb, .{ .imm64 = 0 }, Type.u64, .neq);
+ var lsb = try (try self.load(operand, Type.u64, 8)).toLocal(self, Type.u64);
+ defer lsb.free(self);
try self.emitWValue(lsb);
try self.addTag(.i64_clz);
- try self.emitWValue(msb);
+ _ = try self.load(operand, Type.u64, 0);
try self.addTag(.i64_clz);
try self.emitWValue(.{ .imm64 = 64 });
try self.addTag(.i64_add);
- try self.emitWValue(neq);
+ _ = try self.cmp(lsb, .{ .imm64 = 0 }, Type.u64, .neq);
try self.addTag(.select);
try self.addTag(.i32_wrap_i64);
},
@@ -4618,28 +4708,27 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
32 => {
if (wasm_bits != int_info.bits) {
const val: u32 = @as(u32, 1) << @intCast(u5, int_info.bits);
- const bin_op = try self.binOp(operand, .{ .imm32 = val }, ty, .@"or");
- try self.emitWValue(bin_op);
+ // leave value on the stack
+ _ = try self.binOp(operand, .{ .imm32 = val }, ty, .@"or");
} else try self.emitWValue(operand);
try self.addTag(.i32_ctz);
},
64 => {
if (wasm_bits != int_info.bits) {
const val: u64 = @as(u64, 1) << @intCast(u6, int_info.bits);
- const bin_op = try self.binOp(operand, .{ .imm64 = val }, ty, .@"or");
- try self.emitWValue(bin_op);
+ // leave value on the stack
+ _ = try self.binOp(operand, .{ .imm64 = val }, ty, .@"or");
} else try self.emitWValue(operand);
try self.addTag(.i64_ctz);
try self.addTag(.i32_wrap_i64);
},
128 => {
- const msb = try self.load(operand, Type.u64, 0);
- const lsb = try self.load(operand, Type.u64, 8);
- const neq = try self.cmp(msb, .{ .imm64 = 0 }, Type.u64, .neq);
+ var msb = try (try self.load(operand, Type.u64, 0)).toLocal(self, Type.u64);
+ defer msb.free(self);
try self.emitWValue(msb);
try self.addTag(.i64_ctz);
- try self.emitWValue(lsb);
+ _ = try self.load(operand, Type.u64, 8);
if (wasm_bits != int_info.bits) {
try self.addImm64(@as(u64, 1) << @intCast(u6, int_info.bits - 64));
try self.addTag(.i64_or);
@@ -4651,7 +4740,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
} else {
try self.addTag(.i64_add);
}
- try self.emitWValue(neq);
+ _ = try self.cmp(msb, .{ .imm64 = 0 }, Type.u64, .neq);
try self.addTag(.select);
try self.addTag(.i32_wrap_i64);
},
@@ -4777,7 +4866,8 @@ fn lowerTry(
if (isByRef(pl_ty, self.target)) {
return buildPointerOffset(self, err_union, pl_offset, .new);
}
- return self.load(err_union, pl_ty, pl_offset);
+ const payload = try self.load(err_union, pl_ty, pl_offset);
+ return payload.toLocal(self, pl_ty);
}
fn airByteSwap(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -4807,11 +4897,11 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const res = if (int_info.signedness == .signed) blk: {
break :blk try self.wrapOperand(shr_res, Type.u8);
} else shr_res;
- return self.binOp(lhs, res, ty, .@"or");
+ return (try self.binOp(lhs, res, ty, .@"or")).toLocal(self, ty);
},
24 => {
- const msb = try self.wrapOperand(operand, Type.u16);
- const lsb = try self.wrapBinOp(operand, .{ .imm32 = 16 }, Type.u8, .shr);
+ var msb = try (try self.wrapOperand(operand, Type.u16)).toLocal(self, Type.u16);
+ defer msb.free(self);
const shl_res = try self.binOp(msb, .{ .imm32 = 8 }, Type.u16, .shl);
const lhs = try self.binOp(shl_res, .{ .imm32 = 0xFF0000 }, Type.u16, .@"and");
@@ -4825,22 +4915,26 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const rhs_wrap = try self.wrapOperand(msb, Type.u8);
const rhs_result = try self.binOp(rhs_wrap, .{ .imm32 = 16 }, ty, .shl);
+ const lsb = try self.wrapBinOp(operand, .{ .imm32 = 16 }, Type.u8, .shr);
const tmp = try self.binOp(lhs_result, rhs_result, ty, .@"or");
- return self.binOp(tmp, lsb, ty, .@"or");
+ return (try self.binOp(tmp, lsb, ty, .@"or")).toLocal(self, ty);
},
32 => {
const shl_tmp = try self.binOp(operand, .{ .imm32 = 8 }, ty, .shl);
- const lhs = try self.binOp(shl_tmp, .{ .imm32 = 0xFF00FF00 }, ty, .@"and");
+ var lhs = try (try self.binOp(shl_tmp, .{ .imm32 = 0xFF00FF00 }, ty, .@"and")).toLocal(self, ty);
+ defer lhs.free(self);
const shr_tmp = try self.binOp(operand, .{ .imm32 = 8 }, ty, .shr);
- const rhs = try self.binOp(shr_tmp, .{ .imm32 = 0xFF00FF }, ty, .@"and");
- const tmp_or = try self.binOp(lhs, rhs, ty, .@"or");
+ var rhs = try (try self.binOp(shr_tmp, .{ .imm32 = 0xFF00FF }, ty, .@"and")).toLocal(self, ty);
+ defer rhs.free(self);
+ var tmp_or = try (try self.binOp(lhs, rhs, ty, .@"or")).toLocal(self, ty);
+ defer tmp_or.free(self);
const shl = try self.binOp(tmp_or, .{ .imm32 = 16 }, ty, .shl);
const shr = try self.binOp(tmp_or, .{ .imm32 = 16 }, ty, .shr);
const res = if (int_info.signedness == .signed) blk: {
break :blk try self.wrapOperand(shr, Type.u16);
} else shr;
- return self.binOp(shl, res, ty, .@"or");
+ return (try self.binOp(shl, res, ty, .@"or")).toLocal(self, ty);
},
else => return self.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits}),
}
@@ -4857,7 +4951,7 @@ fn airDiv(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (ty.isSignedInt()) {
return self.divSigned(lhs, rhs, ty);
}
- return self.binOp(lhs, rhs, ty, .div);
+ return (try self.binOp(lhs, rhs, ty, .div)).toLocal(self, ty);
}
fn airDivFloor(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -4869,33 +4963,31 @@ fn airDivFloor(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const rhs = try self.resolveInst(bin_op.rhs);
if (ty.isUnsignedInt()) {
- return self.binOp(lhs, rhs, ty, .div);
+ return (try self.binOp(lhs, rhs, ty, .div)).toLocal(self, ty);
} else if (ty.isSignedInt()) {
const int_bits = ty.intInfo(self.target).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
return self.fail("TODO: `@divFloor` for signed integers larger than '{d}' bits", .{int_bits});
};
const lhs_res = if (wasm_bits != int_bits) blk: {
- break :blk try self.signAbsValue(lhs, ty);
+ break :blk try (try self.signAbsValue(lhs, ty)).toLocal(self, ty);
} else lhs;
const rhs_res = if (wasm_bits != int_bits) blk: {
- break :blk try self.signAbsValue(rhs, ty);
+ break :blk try (try self.signAbsValue(rhs, ty)).toLocal(self, ty);
} else rhs;
- const div_result = try self.binOp(lhs_res, rhs_res, ty, .div);
- const rem_result = try self.binOp(lhs_res, rhs_res, ty, .rem);
-
const zero = switch (wasm_bits) {
32 => WValue{ .imm32 = 0 },
64 => WValue{ .imm64 = 0 },
else => unreachable,
};
- const lhs_less_than_zero = try self.cmp(lhs_res, zero, ty, .lt);
- const rhs_less_than_zero = try self.cmp(rhs_res, zero, ty, .lt);
- try self.emitWValue(div_result);
- try self.emitWValue(lhs_less_than_zero);
- try self.emitWValue(rhs_less_than_zero);
+ const div_result = try self.allocLocal(ty);
+ // leave on stack
+ _ = try self.binOp(lhs_res, rhs_res, ty, .div);
+ try self.addLabel(.local_tee, div_result.local);
+ _ = try self.cmp(lhs_res, zero, ty, .lt);
+ _ = try self.cmp(rhs_res, zero, ty, .lt);
switch (wasm_bits) {
32 => {
try self.addTag(.i32_xor);
@@ -4908,7 +5000,8 @@ fn airDivFloor(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
else => unreachable,
}
try self.emitWValue(div_result);
- try self.emitWValue(rem_result);
+ // leave value on the stack
+ _ = try self.binOp(lhs_res, rhs_res, ty, .rem);
try self.addTag(.select);
} else {
const float_bits = ty.floatBits(self.target);
@@ -4940,9 +5033,7 @@ fn airDivFloor(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
if (is_f16) {
- // we can re-use temporary local
- try self.addLabel(.local_set, lhs_operand.local);
- return self.fptrunc(lhs_operand, Type.f32, Type.f16);
+ _ = try self.fptrunc(.{ .stack = {} }, Type.f32, Type.f16);
}
}
@@ -4962,10 +5053,9 @@ fn divSigned(self: *Self, lhs: WValue, rhs: WValue, ty: Type) InnerError!WValue
}
if (wasm_bits != int_bits) {
- const lhs_abs = try self.signAbsValue(lhs, ty);
- const rhs_abs = try self.signAbsValue(rhs, ty);
- try self.emitWValue(lhs_abs);
- try self.emitWValue(rhs_abs);
+ // Leave both values on the stack
+ _ = try self.signAbsValue(lhs, ty);
+ _ = try self.signAbsValue(rhs, ty);
} else {
try self.emitWValue(lhs);
try self.emitWValue(rhs);
@@ -4977,6 +5067,8 @@ fn divSigned(self: *Self, lhs: WValue, rhs: WValue, ty: Type) InnerError!WValue
return result;
}
+/// Retrieves the absolute value of a signed integer
+/// NOTE: Leaves the result value on the stack.
fn signAbsValue(self: *Self, operand: WValue, ty: Type) InnerError!WValue {
const int_bits = ty.intInfo(self.target).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
@@ -5005,9 +5097,8 @@ fn signAbsValue(self: *Self, operand: WValue, ty: Type) InnerError!WValue {
},
else => unreachable,
}
- const result = try self.allocLocal(ty);
- try self.addLabel(.local_set, result.local);
- return result;
+
+ return WValue{ .stack = {} };
}
fn airCeilFloorTrunc(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
@@ -5034,9 +5125,7 @@ fn airCeilFloorTrunc(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValu
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
if (is_f16) {
- // re-use temporary to save locals
- try self.addLabel(.local_set, op_to_lower.local);
- return self.fptrunc(op_to_lower, Type.f32, Type.f16);
+ _ = try self.fptrunc(.{ .stack = {} }, Type.f32, Type.f16);
}
const result = try self.allocLocal(ty);
@@ -5065,7 +5154,8 @@ fn airSatBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
}
const wasm_bits = toWasmBits(int_info.bits).?;
- const bin_result = try self.binOp(lhs, rhs, ty, op);
+ var bin_result = try (try self.binOp(lhs, rhs, ty, op)).toLocal(self, ty);
+ defer bin_result.free(self);
if (wasm_bits != int_info.bits and op == .add) {
const val: u64 = @intCast(u64, (@as(u65, 1) << @intCast(u7, int_info.bits)) - 1);
const imm_val = switch (wasm_bits) {
@@ -5074,19 +5164,17 @@ fn airSatBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
else => unreachable,
};
- const cmp_result = try self.cmp(bin_result, imm_val, ty, .lt);
try self.emitWValue(bin_result);
try self.emitWValue(imm_val);
- try self.emitWValue(cmp_result);
+ _ = try self.cmp(bin_result, imm_val, ty, .lt);
} else {
- const cmp_result = try self.cmp(bin_result, lhs, ty, if (op == .add) .lt else .gt);
switch (wasm_bits) {
32 => try self.addImm32(if (op == .add) @as(i32, -1) else 0),
64 => try self.addImm64(if (op == .add) @bitCast(u64, @as(i64, -1)) else 0),
else => unreachable,
}
try self.emitWValue(bin_result);
- try self.emitWValue(cmp_result);
+ _ = try self.cmp(bin_result, lhs, ty, if (op == .add) .lt else .gt);
}
try self.addTag(.select);
@@ -5100,8 +5188,12 @@ fn signedSat(self: *Self, lhs_operand: WValue, rhs_operand: WValue, ty: Type, op
const wasm_bits = toWasmBits(int_info.bits).?;
const is_wasm_bits = wasm_bits == int_info.bits;
- const lhs = if (!is_wasm_bits) try self.signAbsValue(lhs_operand, ty) else lhs_operand;
- const rhs = if (!is_wasm_bits) try self.signAbsValue(rhs_operand, ty) else rhs_operand;
+ var lhs = if (!is_wasm_bits) lhs: {
+ break :lhs try (try self.signAbsValue(lhs_operand, ty)).toLocal(self, ty);
+ } else lhs_operand;
+ var rhs = if (!is_wasm_bits) rhs: {
+ break :rhs try (try self.signAbsValue(rhs_operand, ty)).toLocal(self, ty);
+ } else rhs_operand;
const max_val: u64 = @intCast(u64, (@as(u65, 1) << @intCast(u7, int_info.bits - 1)) - 1);
const min_val: i64 = (-@intCast(i64, @intCast(u63, max_val))) - 1;
@@ -5116,38 +5208,38 @@ fn signedSat(self: *Self, lhs_operand: WValue, rhs_operand: WValue, ty: Type, op
else => unreachable,
};
- const bin_result = try self.binOp(lhs, rhs, ty, op);
+ var bin_result = try (try self.binOp(lhs, rhs, ty, op)).toLocal(self, ty);
if (!is_wasm_bits) {
- const cmp_result_lt = try self.cmp(bin_result, max_wvalue, ty, .lt);
+ defer bin_result.free(self); // not returned in this branch
+ defer lhs.free(self); // uses temporary local for absvalue
+ defer rhs.free(self); // uses temporary local for absvalue
try self.emitWValue(bin_result);
try self.emitWValue(max_wvalue);
- try self.emitWValue(cmp_result_lt);
+ _ = try self.cmp(bin_result, max_wvalue, ty, .lt);
try self.addTag(.select);
try self.addLabel(.local_set, bin_result.local); // re-use local
- const cmp_result_gt = try self.cmp(bin_result, min_wvalue, ty, .gt);
try self.emitWValue(bin_result);
try self.emitWValue(min_wvalue);
- try self.emitWValue(cmp_result_gt);
+ _ = try self.cmp(bin_result, min_wvalue, ty, .gt);
try self.addTag(.select);
try self.addLabel(.local_set, bin_result.local); // re-use local
- return self.wrapOperand(bin_result, ty);
+ return (try self.wrapOperand(bin_result, ty)).toLocal(self, ty);
} else {
const zero = switch (wasm_bits) {
32 => WValue{ .imm32 = 0 },
64 => WValue{ .imm64 = 0 },
else => unreachable,
};
- const cmp_bin_result = try self.cmp(bin_result, lhs, ty, .lt);
- const cmp_zero_result = try self.cmp(rhs, zero, ty, if (op == .add) .lt else .gt);
- const xor = try self.binOp(cmp_zero_result, cmp_bin_result, Type.u32, .xor); // comparisons always return i32, so provide u32 as type to xor.
- const cmp_bin_zero_result = try self.cmp(bin_result, zero, ty, .lt);
try self.emitWValue(max_wvalue);
try self.emitWValue(min_wvalue);
- try self.emitWValue(cmp_bin_zero_result);
+ _ = try self.cmp(bin_result, zero, ty, .lt);
try self.addTag(.select);
try self.emitWValue(bin_result);
- try self.emitWValue(xor);
+ // leave on stack
+ const cmp_zero_result = try self.cmp(rhs, zero, ty, if (op == .add) .lt else .gt);
+ const cmp_bin_result = try self.cmp(bin_result, lhs, ty, .lt);
+ _ = try self.binOp(cmp_zero_result, cmp_bin_result, Type.u32, .xor); // comparisons always return i32, so provide u32 as type to xor.
try self.addTag(.select);
try self.addLabel(.local_set, bin_result.local); // re-use local
return bin_result;
@@ -5171,9 +5263,10 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const result = try self.allocLocal(ty);
if (wasm_bits == int_info.bits) {
- const shl = try self.binOp(lhs, rhs, ty, .shl);
- const shr = try self.binOp(shl, rhs, ty, .shr);
- const cmp_result = try self.cmp(lhs, shr, ty, .neq);
+ var shl = try (try self.binOp(lhs, rhs, ty, .shl)).toLocal(self, ty);
+ defer shl.free(self);
+ var shr = try (try self.binOp(shl, rhs, ty, .shr)).toLocal(self, ty);
+ defer shr.free(self);
switch (wasm_bits) {
32 => blk: {
@@ -5181,10 +5274,9 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addImm32(-1);
break :blk;
}
- const less_than_zero = try self.cmp(lhs, .{ .imm32 = 0 }, ty, .lt);
try self.addImm32(std.math.minInt(i32));
try self.addImm32(std.math.maxInt(i32));
- try self.emitWValue(less_than_zero);
+ _ = try self.cmp(lhs, .{ .imm32 = 0 }, ty, .lt);
try self.addTag(.select);
},
64 => blk: {
@@ -5192,16 +5284,15 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addImm64(@bitCast(u64, @as(i64, -1)));
break :blk;
}
- const less_than_zero = try self.cmp(lhs, .{ .imm64 = 0 }, ty, .lt);
try self.addImm64(@bitCast(u64, @as(i64, std.math.minInt(i64))));
try self.addImm64(@bitCast(u64, @as(i64, std.math.maxInt(i64))));
- try self.emitWValue(less_than_zero);
+ _ = try self.cmp(lhs, .{ .imm64 = 0 }, ty, .lt);
try self.addTag(.select);
},
else => unreachable,
}
try self.emitWValue(shl);
- try self.emitWValue(cmp_result);
+ _ = try self.cmp(lhs, shr, ty, .neq);
try self.addTag(.select);
try self.addLabel(.local_set, result.local);
return result;
@@ -5213,10 +5304,12 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
else => unreachable,
};
- const shl_res = try self.binOp(lhs, shift_value, ty, .shl);
- const shl = try self.binOp(shl_res, rhs, ty, .shl);
- const shr = try self.binOp(shl, rhs, ty, .shr);
- const cmp_result = try self.cmp(shl_res, shr, ty, .neq);
+ var shl_res = try (try self.binOp(lhs, shift_value, ty, .shl)).toLocal(self, ty);
+ defer shl_res.free(self);
+ var shl = try (try self.binOp(shl_res, rhs, ty, .shl)).toLocal(self, ty);
+ defer shl.free(self);
+ var shr = try (try self.binOp(shl, rhs, ty, .shr)).toLocal(self, ty);
+ defer shr.free(self);
switch (wasm_bits) {
32 => blk: {
@@ -5225,10 +5318,9 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
break :blk;
}
- const less_than_zero = try self.cmp(shl_res, .{ .imm32 = 0 }, ty, .lt);
try self.addImm32(std.math.minInt(i32));
try self.addImm32(std.math.maxInt(i32));
- try self.emitWValue(less_than_zero);
+ _ = try self.cmp(shl_res, .{ .imm32 = 0 }, ty, .lt);
try self.addTag(.select);
},
64 => blk: {
@@ -5237,29 +5329,30 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
break :blk;
}
- const less_than_zero = try self.cmp(shl_res, .{ .imm64 = 0 }, ty, .lt);
try self.addImm64(@bitCast(u64, @as(i64, std.math.minInt(i64))));
try self.addImm64(@bitCast(u64, @as(i64, std.math.maxInt(i64))));
- try self.emitWValue(less_than_zero);
+ _ = try self.cmp(shl_res, .{ .imm64 = 0 }, ty, .lt);
try self.addTag(.select);
},
else => unreachable,
}
try self.emitWValue(shl);
- try self.emitWValue(cmp_result);
+ _ = try self.cmp(shl_res, shr, ty, .neq);
try self.addTag(.select);
try self.addLabel(.local_set, result.local);
- const shift_result = try self.binOp(result, shift_value, ty, .shr);
+ var shift_result = try self.binOp(result, shift_value, ty, .shr);
if (is_signed) {
- return self.wrapOperand(shift_result, ty);
+ shift_result = try self.wrapOperand(shift_result, ty);
}
- return shift_result;
+ return shift_result.toLocal(self, ty);
}
}
/// Calls a compiler-rt intrinsic by creating an undefined symbol,
/// then lowering the arguments and calling the symbol as a function call.
/// This function call assumes the C-ABI.
+/// Asserts arguments are not stack values when the return value is
+/// passed as the first parameter.
fn callIntrinsic(
self: *Self,
name: []const u8,
@@ -5289,6 +5382,7 @@ fn callIntrinsic(
// Lower all arguments to the stack before we call our function
for (args) |arg, arg_i| {
+ assert(!(want_sret_param and arg == .stack));
assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime());
try self.lowerArg(.C, param_types[arg_i], arg);
}
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 7f7473bc66..0f6b6baee3 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -776,6 +776,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
=> return self.fail("TODO implement optimized float mode", .{}),
.is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
+ .error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
@@ -4370,6 +4371,7 @@ fn genVarDbgInfo(
.dwarf => |dw| {
const dbg_info = &dw.dbg_info;
try dbg_info.append(@enumToInt(link.File.Dwarf.AbbrevKind.variable));
+ const endian = self.target.cpu.arch.endian();
switch (mcv) {
.register => |reg| {
@@ -4390,7 +4392,6 @@ fn genVarDbgInfo(
dbg_info.items[fixup] += @intCast(u8, dbg_info.items.len - fixup - 2);
},
.memory, .got_load, .direct_load => {
- const endian = self.target.cpu.arch.endian();
const ptr_width = @intCast(u8, @divExact(self.target.cpu.arch.ptrBitWidth(), 8));
const is_ptr = switch (tag) {
.dbg_var_ptr => true,
@@ -4425,7 +4426,53 @@ fn genVarDbgInfo(
else => {},
}
},
+ .immediate => |x| {
+ const signedness: std.builtin.Signedness = blk: {
+ if (ty.zigTypeTag() != .Int) break :blk .unsigned;
+ break :blk ty.intInfo(self.target.*).signedness;
+ };
+ try dbg_info.ensureUnusedCapacity(2);
+ const fixup = dbg_info.items.len;
+ dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
+ 1,
+ switch (signedness) {
+ .signed => DW.OP.consts,
+ .unsigned => DW.OP.constu,
+ },
+ });
+ switch (signedness) {
+ .signed => try leb128.writeILEB128(dbg_info.writer(), @bitCast(i64, x)),
+ .unsigned => try leb128.writeULEB128(dbg_info.writer(), x),
+ }
+ try dbg_info.append(DW.OP.stack_value);
+ dbg_info.items[fixup] += @intCast(u8, dbg_info.items.len - fixup - 2);
+ },
+ .undef => {
+ // DW.AT.location, DW.FORM.exprloc
+ // uleb128(exprloc_len)
+ // DW.OP.implicit_value uleb128(len_of_bytes) bytes
+ const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ var implicit_value_len = std.ArrayList(u8).init(self.gpa);
+ defer implicit_value_len.deinit();
+ try leb128.writeULEB128(implicit_value_len.writer(), abi_size);
+ const total_exprloc_len = 1 + implicit_value_len.items.len + abi_size;
+ try leb128.writeULEB128(dbg_info.writer(), total_exprloc_len);
+ try dbg_info.ensureUnusedCapacity(total_exprloc_len);
+ dbg_info.appendAssumeCapacity(DW.OP.implicit_value);
+ dbg_info.appendSliceAssumeCapacity(implicit_value_len.items);
+ dbg_info.appendNTimesAssumeCapacity(0xaa, abi_size);
+ },
+ .none => {
+ try dbg_info.ensureUnusedCapacity(3);
+ dbg_info.appendSliceAssumeCapacity(&[3]u8{ // DW.AT.location, DW.FORM.exprloc
+ 2, DW.OP.lit0, DW.OP.stack_value,
+ });
+ },
else => {
+ try dbg_info.ensureUnusedCapacity(2);
+ dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
+ 1, DW.OP.nop,
+ });
log.debug("TODO generate debug info for {}", .{mcv});
},
}
diff --git a/src/clang_options_data.zig b/src/clang_options_data.zig
index d4320b1619..76e687c7d6 100644
--- a/src/clang_options_data.zig
+++ b/src/clang_options_data.zig
@@ -33,7 +33,14 @@ flagpd1("H"),
.psl = false,
},
flagpd1("I-"),
-flagpd1("M"),
+.{
+ .name = "M",
+ .syntax = .flag,
+ .zig_equivalent = .dep_file_to_stdout,
+ .pd1 = true,
+ .pd2 = false,
+ .psl = false,
+},
.{
.name = "MD",
.syntax = .flag,
@@ -53,7 +60,7 @@ flagpd1("M"),
.{
.name = "MM",
.syntax = .flag,
- .zig_equivalent = .dep_file_mm,
+ .zig_equivalent = .dep_file_to_stdout,
.pd1 = true,
.pd2 = false,
.psl = false,
@@ -1983,7 +1990,7 @@ flagpsl("MT"),
.{
.name = "user-dependencies",
.syntax = .flag,
- .zig_equivalent = .dep_file_mm,
+ .zig_equivalent = .dep_file_to_stdout,
.pd1 = false,
.pd2 = true,
.psl = false,
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 74e4404bce..81a892183f 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -1954,6 +1954,7 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
=> return f.fail("TODO implement optimized float mode", .{}),
.is_named_enum_value => return f.fail("TODO: C backend: implement is_named_enum_value", .{}),
+ .error_set_has_value => return f.fail("TODO: C backend: implement error_set_has_value", .{}),
// zig fmt: on
};
switch (result_value) {
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 9c3efa18cd..0586c99432 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -722,6 +722,10 @@ pub const Object = struct {
dg.addFnAttrString(llvm_func, "no-stack-arg-probe", "");
}
+ if (decl.@"linksection") |section| {
+ llvm_func.setSection(section);
+ }
+
// Remove all the basic blocks of a function in order to start over, generating
// LLVM IR from an empty function body.
while (llvm_func.getFirstBasicBlock()) |bb| {
@@ -917,6 +921,40 @@ pub const Object = struct {
};
try args.append(loaded);
},
+ .multiple_llvm_float => {
+ const llvm_floats = it.llvm_types_buffer[0..it.llvm_types_len];
+ const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_llvm_ty = try dg.lowerType(param_ty);
+ const param_alignment = param_ty.abiAlignment(target);
+ const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty);
+ arg_ptr.setAlignment(param_alignment);
+ var field_types_buf: [8]*const llvm.Type = undefined;
+ const field_types = field_types_buf[0..llvm_floats.len];
+ for (llvm_floats) |float_bits, i| {
+ switch (float_bits) {
+ 64 => field_types[i] = dg.context.doubleType(),
+ 80 => field_types[i] = dg.context.x86FP80Type(),
+ else => {},
+ }
+ }
+ const ints_llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
+ const casted_ptr = builder.buildBitCast(arg_ptr, ints_llvm_ty.pointerType(0), "");
+ for (llvm_floats) |_, i_usize| {
+ const i = @intCast(c_uint, i_usize);
+ const param = llvm_func.getParam(i);
+ const field_ptr = builder.buildStructGEP(casted_ptr, i, "");
+ const store_inst = builder.buildStore(param, field_ptr);
+ store_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
+ }
+
+ const is_by_ref = isByRef(param_ty);
+ const loaded = if (is_by_ref) arg_ptr else l: {
+ const load_inst = builder.buildLoad(arg_ptr, "");
+ load_inst.setAlignment(param_alignment);
+ break :l load_inst;
+ };
+ try args.append(loaded);
+ },
.as_u16 => {
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
@@ -1107,6 +1145,11 @@ pub const Object = struct {
.hidden => llvm_global.setVisibility(.Hidden),
.protected => llvm_global.setVisibility(.Protected),
}
+ if (exports[0].options.section) |section| {
+ const section_z = try module.gpa.dupeZ(u8, section);
+ defer module.gpa.free(section_z);
+ llvm_global.setSection(section_z);
+ }
if (decl.val.castTag(.variable)) |variable| {
if (variable.data.is_threadlocal) {
llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
@@ -1683,8 +1726,7 @@ pub const Object = struct {
if (ty.castTag(.@"struct")) |payload| {
const struct_obj = payload.data;
if (struct_obj.layout == .Packed) {
- var buf: Type.Payload.Bits = undefined;
- const info = struct_obj.packedIntegerType(target, &buf).intInfo(target);
+ const info = struct_obj.backing_int_ty.intInfo(target);
const dwarf_encoding: c_uint = switch (info.signedness) {
.signed => DW.ATE.signed,
.unsigned => DW.ATE.unsigned,
@@ -2184,6 +2226,7 @@ pub const DeclGen = struct {
const target = dg.module.getTarget();
var global = try dg.resolveGlobalDecl(decl_index);
global.setAlignment(decl.getAlignment(target));
+ if (decl.@"linksection") |section| global.setSection(section);
assert(decl.has_tv);
const init_val = if (decl.val.castTag(.variable)) |payload| init_val: {
const variable = payload.data;
@@ -2217,6 +2260,7 @@ pub const DeclGen = struct {
new_global.setLinkage(global.getLinkage());
new_global.setUnnamedAddr(global.getUnnamedAddress());
new_global.setAlignment(global.getAlignment());
+ if (decl.@"linksection") |section| new_global.setSection(section);
new_global.setInitializer(llvm_init);
// replaceAllUsesWith requires the type to be unchanged. So we bitcast
// the new global to the old type and use that as the thing to replace
@@ -2331,6 +2375,14 @@ pub const DeclGen = struct {
dg.addFnAttr(llvm_fn, "noreturn");
}
+ var llvm_arg_i = @as(c_uint, @boolToInt(sret)) + @boolToInt(err_return_tracing);
+ var it = iterateParamTypes(dg, fn_info);
+ while (it.next()) |_| : (llvm_arg_i += 1) {
+ if (!it.byval_attr) continue;
+ const param = llvm_fn.getParam(llvm_arg_i);
+ llvm_fn.addByValAttr(llvm_arg_i, param.typeOf().getElementType());
+ }
+
return llvm_fn;
}
@@ -2679,9 +2731,7 @@ pub const DeclGen = struct {
const struct_obj = t.castTag(.@"struct").?.data;
if (struct_obj.layout == .Packed) {
- var buf: Type.Payload.Bits = undefined;
- const int_ty = struct_obj.packedIntegerType(target, &buf);
- const int_llvm_ty = try dg.lowerType(int_ty);
+ const int_llvm_ty = try dg.lowerType(struct_obj.backing_int_ty);
gop.value_ptr.* = int_llvm_ty;
return int_llvm_ty;
}
@@ -2886,6 +2936,18 @@ pub const DeclGen = struct {
llvm_params.appendAssumeCapacity(big_int_ty);
}
},
+ .multiple_llvm_float => {
+ const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len];
+ try llvm_params.ensureUnusedCapacity(it.llvm_types_len);
+ for (llvm_ints) |float_bits| {
+ const float_ty = switch (float_bits) {
+ 64 => dg.context.doubleType(),
+ 80 => dg.context.x86FP80Type(),
+ else => unreachable,
+ };
+ llvm_params.appendAssumeCapacity(float_ty);
+ }
+ },
.as_u16 => {
try llvm_params.append(dg.context.intType(16));
},
@@ -3330,8 +3392,8 @@ pub const DeclGen = struct {
const struct_obj = tv.ty.castTag(.@"struct").?.data;
if (struct_obj.layout == .Packed) {
- const big_bits = struct_obj.packedIntegerBits(target);
- const int_llvm_ty = dg.context.intType(big_bits);
+ const big_bits = struct_obj.backing_int_ty.bitSize(target);
+ const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits));
const fields = struct_obj.fields.values();
comptime assert(Type.packed_struct_layout_version == 2);
var running_int: *const llvm.Value = int_llvm_ty.constNull();
@@ -4185,6 +4247,7 @@ pub const FuncGen = struct {
.prefetch => try self.airPrefetch(inst),
.is_named_enum_value => try self.airIsNamedEnumValue(inst),
+ .error_set_has_value => try self.airErrorSetHasValue(inst),
.reduce => try self.airReduce(inst, false),
.reduce_optimized => try self.airReduce(inst, true),
@@ -4394,6 +4457,39 @@ pub const FuncGen = struct {
llvm_args.appendAssumeCapacity(load_inst);
}
},
+ .multiple_llvm_float => {
+ const arg = args[it.zig_index - 1];
+ const param_ty = self.air.typeOf(arg);
+ const llvm_floats = it.llvm_types_buffer[0..it.llvm_types_len];
+ const llvm_arg = try self.resolveInst(arg);
+ const is_by_ref = isByRef(param_ty);
+ const arg_ptr = if (is_by_ref) llvm_arg else p: {
+ const p = self.buildAlloca(llvm_arg.typeOf());
+ const store_inst = self.builder.buildStore(llvm_arg, p);
+ store_inst.setAlignment(param_ty.abiAlignment(target));
+ break :p p;
+ };
+
+ var field_types_buf: [8]*const llvm.Type = undefined;
+ const field_types = field_types_buf[0..llvm_floats.len];
+ for (llvm_floats) |float_bits, i| {
+ switch (float_bits) {
+ 64 => field_types[i] = self.dg.context.doubleType(),
+ 80 => field_types[i] = self.dg.context.x86FP80Type(),
+ else => {},
+ }
+ }
+ const ints_llvm_ty = self.dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
+ const casted_ptr = self.builder.buildBitCast(arg_ptr, ints_llvm_ty.pointerType(0), "");
+ try llvm_args.ensureUnusedCapacity(it.llvm_types_len);
+ for (llvm_floats) |_, i_usize| {
+ const i = @intCast(c_uint, i_usize);
+ const field_ptr = self.builder.buildStructGEP(casted_ptr, i, "");
+ const load_inst = self.builder.buildLoad(field_ptr, "");
+ load_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
+ llvm_args.appendAssumeCapacity(load_inst);
+ }
+ },
.as_u16 => {
const arg = args[it.zig_index - 1];
const llvm_arg = try self.resolveInst(arg);
@@ -7888,6 +7984,53 @@ pub const FuncGen = struct {
}
}
+ fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = try self.resolveInst(ty_op.operand);
+ const error_set_ty = self.air.getRefType(ty_op.ty);
+
+ const names = error_set_ty.errorSetNames();
+ const valid_block = self.dg.context.appendBasicBlock(self.llvm_func, "Valid");
+ const invalid_block = self.dg.context.appendBasicBlock(self.llvm_func, "Invalid");
+ const end_block = self.context.appendBasicBlock(self.llvm_func, "End");
+ const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len));
+
+ for (names) |name| {
+ const err_int = self.dg.module.global_error_set.get(name).?;
+ const this_tag_int_value = int: {
+ var tag_val_payload: Value.Payload.U64 = .{
+ .base = .{ .tag = .int_u64 },
+ .data = err_int,
+ };
+ break :int try self.dg.lowerValue(.{
+ .ty = Type.err_int,
+ .val = Value.initPayload(&tag_val_payload.base),
+ });
+ };
+ switch_instr.addCase(this_tag_int_value, valid_block);
+ }
+ self.builder.positionBuilderAtEnd(valid_block);
+ _ = self.builder.buildBr(end_block);
+
+ self.builder.positionBuilderAtEnd(invalid_block);
+ _ = self.builder.buildBr(end_block);
+
+ self.builder.positionBuilderAtEnd(end_block);
+
+ const llvm_type = self.dg.context.intType(1);
+ const incoming_values: [2]*const llvm.Value = .{
+ llvm_type.constInt(1, .False), llvm_type.constInt(0, .False),
+ };
+ const incoming_blocks: [2]*const llvm.BasicBlock = .{
+ valid_block, invalid_block,
+ };
+ const phi_node = self.builder.buildPhi(llvm_type, "");
+ phi_node.addIncoming(&incoming_values, &incoming_blocks, 2);
+ return phi_node;
+ }
+
fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
@@ -8243,8 +8386,8 @@ pub const FuncGen = struct {
.Struct => {
if (result_ty.containerLayout() == .Packed) {
const struct_obj = result_ty.castTag(.@"struct").?.data;
- const big_bits = struct_obj.packedIntegerBits(target);
- const int_llvm_ty = self.dg.context.intType(big_bits);
+ const big_bits = struct_obj.backing_int_ty.bitSize(target);
+ const int_llvm_ty = self.dg.context.intType(@intCast(c_uint, big_bits));
const fields = struct_obj.fields.values();
comptime assert(Type.packed_struct_layout_version == 2);
var running_int: *const llvm.Value = int_llvm_ty.constNull();
@@ -9359,16 +9502,20 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm.
llvm_types_index += 1;
},
.sse => {
- @panic("TODO");
+ llvm_types_buffer[llvm_types_index] = dg.context.doubleType();
+ llvm_types_index += 1;
},
.sseup => {
- @panic("TODO");
+ llvm_types_buffer[llvm_types_index] = dg.context.doubleType();
+ llvm_types_index += 1;
},
.x87 => {
- @panic("TODO");
+ llvm_types_buffer[llvm_types_index] = dg.context.x86FP80Type();
+ llvm_types_index += 1;
},
.x87up => {
- @panic("TODO");
+ llvm_types_buffer[llvm_types_index] = dg.context.x86FP80Type();
+ llvm_types_index += 1;
},
.complex_x87 => {
@panic("TODO");
@@ -9414,6 +9561,7 @@ const ParamTypeIterator = struct {
target: std.Target,
llvm_types_len: u32,
llvm_types_buffer: [8]u16,
+ byval_attr: bool,
const Lowering = enum {
no_bits,
@@ -9421,6 +9569,7 @@ const ParamTypeIterator = struct {
byref,
abi_sized_int,
multiple_llvm_ints,
+ multiple_llvm_float,
slice,
as_u16,
};
@@ -9428,6 +9577,7 @@ const ParamTypeIterator = struct {
pub fn next(it: *ParamTypeIterator) ?Lowering {
if (it.zig_index >= it.fn_info.param_types.len) return null;
const ty = it.fn_info.param_types[it.zig_index];
+ it.byval_attr = false;
return nextInner(it, ty);
}
@@ -9513,6 +9663,7 @@ const ParamTypeIterator = struct {
.memory => {
it.zig_index += 1;
it.llvm_index += 1;
+ it.byval_attr = true;
return .byref;
},
.sse => {
@@ -9532,6 +9683,7 @@ const ParamTypeIterator = struct {
if (classes[0] == .memory) {
it.zig_index += 1;
it.llvm_index += 1;
+ it.byval_attr = true;
return .byref;
}
var llvm_types_buffer: [8]u16 = undefined;
@@ -9543,16 +9695,20 @@ const ParamTypeIterator = struct {
llvm_types_index += 1;
},
.sse => {
- @panic("TODO");
+ llvm_types_buffer[llvm_types_index] = 64;
+ llvm_types_index += 1;
},
.sseup => {
- @panic("TODO");
+ llvm_types_buffer[llvm_types_index] = 64;
+ llvm_types_index += 1;
},
.x87 => {
- @panic("TODO");
+ llvm_types_buffer[llvm_types_index] = 80;
+ llvm_types_index += 1;
},
.x87up => {
- @panic("TODO");
+ llvm_types_buffer[llvm_types_index] = 80;
+ llvm_types_index += 1;
},
.complex_x87 => {
@panic("TODO");
@@ -9566,11 +9722,16 @@ const ParamTypeIterator = struct {
it.llvm_index += 1;
return .abi_sized_int;
}
+ if (classes[0] == .sse and classes[1] == .none) {
+ it.zig_index += 1;
+ it.llvm_index += 1;
+ return .byval;
+ }
it.llvm_types_buffer = llvm_types_buffer;
it.llvm_types_len = llvm_types_index;
it.llvm_index += llvm_types_index;
it.zig_index += 1;
- return .multiple_llvm_ints;
+ return if (classes[0] == .integer) .multiple_llvm_ints else .multiple_llvm_float;
},
},
.wasm32 => {
@@ -9611,6 +9772,7 @@ fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTyp
.target = dg.module.getTarget(),
.llvm_types_buffer = undefined,
.llvm_types_len = 0,
+ .byval_attr = false,
};
}
diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig
index e4357b8060..9daa96eb8f 100644
--- a/src/codegen/llvm/bindings.zig
+++ b/src/codegen/llvm/bindings.zig
@@ -126,6 +126,9 @@ pub const Value = opaque {
pub const setThreadLocalMode = LLVMSetThreadLocalMode;
extern fn LLVMSetThreadLocalMode(Global: *const Value, Mode: ThreadLocalMode) void;
+ pub const setSection = LLVMSetSection;
+ extern fn LLVMSetSection(Global: *const Value, Section: [*:0]const u8) void;
+
pub const deleteGlobal = LLVMDeleteGlobal;
extern fn LLVMDeleteGlobal(GlobalVar: *const Value) void;
@@ -245,6 +248,9 @@ pub const Value = opaque {
pub const addFunctionAttr = ZigLLVMAddFunctionAttr;
extern fn ZigLLVMAddFunctionAttr(Fn: *const Value, attr_name: [*:0]const u8, attr_value: [*:0]const u8) void;
+
+ pub const addByValAttr = ZigLLVMAddByValAttr;
+ extern fn ZigLLVMAddByValAttr(Fn: *const Value, ArgNo: c_uint, type: *const Type) void;
};
pub const Type = opaque {
diff --git a/src/link.zig b/src/link.zig
index a69dcc4c6e..14ae142a3f 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -435,6 +435,7 @@ pub const File = struct {
EmitFail,
NameTooLong,
CurrentWorkingDirectoryUnlinked,
+ LockViolation,
};
/// Called from within the CodeGen to lower a local variable instantion as an unnamed
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 2cd00f5a87..3ae151491f 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -102,7 +102,7 @@ pub const DeclState = struct {
}
pub fn addExprlocReloc(self: *DeclState, target: u32, offset: u32, is_ptr: bool) !void {
- log.debug("{x}: target sym @{d}, via GOT {}", .{ offset, target, is_ptr });
+ log.debug("{x}: target sym %{d}, via GOT {}", .{ offset, target, is_ptr });
try self.exprloc_relocs.append(self.gpa, .{
.@"type" = if (is_ptr) .got_load else .direct_load,
.target = target,
@@ -135,7 +135,7 @@ pub const DeclState = struct {
.@"type" = ty,
.offset = undefined,
});
- log.debug("@{d}: {}", .{ sym_index, ty.fmtDebug() });
+ log.debug("%{d}: {}", .{ sym_index, ty.fmtDebug() });
try self.abbrev_resolver.putNoClobberContext(self.gpa, ty, sym_index, .{
.mod = self.mod,
});
@@ -143,7 +143,7 @@ pub const DeclState = struct {
.mod = self.mod,
}).?;
};
- log.debug("{x}: @{d} + 0", .{ offset, resolv });
+ log.debug("{x}: %{d} + 0", .{ offset, resolv });
try self.abbrev_relocs.append(self.gpa, .{
.target = resolv,
.atom = atom,
@@ -1056,6 +1056,7 @@ pub fn commitDeclState(
break :blk false;
};
if (deferred) {
+ log.debug("resolving %{d} deferred until flush", .{target});
try self.global_abbrev_relocs.append(gpa, .{
.target = null,
.offset = reloc.offset,
@@ -1063,10 +1064,12 @@ pub fn commitDeclState(
.addend = reloc.addend,
});
} else {
+ const value = symbol.atom.off + symbol.offset + reloc.addend;
+ log.debug("{x}: [() => {x}] (%{d}, '{}')", .{ reloc.offset, value, target, ty.fmtDebug() });
mem.writeInt(
u32,
dbg_info_buffer.items[reloc.offset..][0..@sizeOf(u32)],
- symbol.atom.off + symbol.offset + reloc.addend,
+ value,
target_endian,
);
}
@@ -1259,7 +1262,7 @@ fn writeDeclDebugInfo(self: *Dwarf, file: *File, atom: *Atom, dbg_info_buf: []co
debug_info_sect.addr = dwarf_segment.vmaddr + new_offset - dwarf_segment.fileoff;
}
debug_info_sect.size = needed_size;
- d_sym.debug_line_header_dirty = true;
+ d_sym.debug_info_header_dirty = true;
}
const file_pos = debug_info_sect.offset + atom.off;
try pwriteDbgInfoNops(
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index db207af5f5..3da086a382 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -5315,10 +5315,10 @@ fn writeFunctionStarts(self: *MachO, ncmds: *u32, lc_writer: anytype) !void {
}
fn filterDataInCode(
- dices: []const macho.data_in_code_entry,
+ dices: []align(1) const macho.data_in_code_entry,
start_addr: u64,
end_addr: u64,
-) []const macho.data_in_code_entry {
+) []align(1) const macho.data_in_code_entry {
const Predicate = struct {
addr: u64,
@@ -5825,7 +5825,7 @@ pub fn getEntryPoint(self: MachO) error{MissingMainEntrypoint}!SymbolWithLoc {
return global;
}
-pub fn findFirst(comptime T: type, haystack: []const T, start: usize, predicate: anytype) usize {
+pub fn findFirst(comptime T: type, haystack: []align(1) const T, start: usize, predicate: anytype) usize {
if (!@hasDecl(@TypeOf(predicate), "predicate"))
@compileError("Predicate is required to define fn predicate(@This(), T) bool");
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index 4871276f3c..dd818ea936 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -218,7 +218,7 @@ const RelocContext = struct {
base_offset: i32 = 0,
};
-pub fn parseRelocs(self: *Atom, relocs: []const macho.relocation_info, context: RelocContext) !void {
+pub fn parseRelocs(self: *Atom, relocs: []align(1) const macho.relocation_info, context: RelocContext) !void {
const tracy = trace(@src());
defer tracy.end();
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index 3bfe334302..c2aa562db5 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -63,17 +63,16 @@ pub const Reloc = struct {
pub fn populateMissingMetadata(self: *DebugSymbols, allocator: Allocator) !void {
if (self.linkedit_segment_cmd_index == null) {
self.linkedit_segment_cmd_index = @intCast(u8, self.segments.items.len);
- log.debug("found __LINKEDIT segment free space 0x{x} to 0x{x}", .{
- self.base.page_size,
- self.base.page_size * 2,
- });
+ const fileoff = @intCast(u64, self.base.page_size);
+ const needed_size = @intCast(u64, self.base.page_size) * 2;
+ log.debug("found __LINKEDIT segment free space 0x{x} to 0x{x}", .{ fileoff, needed_size });
// TODO this needs reworking
try self.segments.append(allocator, .{
.segname = makeStaticString("__LINKEDIT"),
- .vmaddr = self.base.page_size,
- .vmsize = self.base.page_size,
- .fileoff = self.base.page_size,
- .filesize = self.base.page_size,
+ .vmaddr = fileoff,
+ .vmsize = needed_size,
+ .fileoff = fileoff,
+ .filesize = needed_size,
.maxprot = macho.PROT.READ,
.initprot = macho.PROT.READ,
.cmdsize = @sizeOf(macho.segment_command_64),
@@ -284,6 +283,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: Allocator, options: link.Opti
const lc_writer = lc_buffer.writer();
var ncmds: u32 = 0;
+ self.updateDwarfSegment();
try self.writeLinkeditSegmentData(&ncmds, lc_writer);
self.updateDwarfSegment();
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index 996a85ed4b..935183bbc6 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -24,7 +24,7 @@ mtime: u64,
contents: []align(@alignOf(u64)) const u8,
header: macho.mach_header_64 = undefined,
-in_symtab: []const macho.nlist_64 = undefined,
+in_symtab: []align(1) const macho.nlist_64 = undefined,
in_strtab: []const u8 = undefined,
symtab: std.ArrayListUnmanaged(macho.nlist_64) = .{},
@@ -99,12 +99,13 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
},
.SYMTAB => {
const symtab = cmd.cast(macho.symtab_command).?;
+ // Sadly, SYMTAB may be at an unaligned offset within the object file.
self.in_symtab = @ptrCast(
- [*]const macho.nlist_64,
- @alignCast(@alignOf(macho.nlist_64), &self.contents[symtab.symoff]),
+ [*]align(1) const macho.nlist_64,
+ self.contents.ptr + symtab.symoff,
)[0..symtab.nsyms];
self.in_strtab = self.contents[symtab.stroff..][0..symtab.strsize];
- try self.symtab.appendSlice(allocator, self.in_symtab);
+ try self.symtab.appendUnalignedSlice(allocator, self.in_symtab);
},
else => {},
}
@@ -196,10 +197,10 @@ fn filterSymbolsByAddress(
}
fn filterRelocs(
- relocs: []const macho.relocation_info,
+ relocs: []align(1) const macho.relocation_info,
start_addr: u64,
end_addr: u64,
-) []const macho.relocation_info {
+) []align(1) const macho.relocation_info {
const Predicate = struct {
addr: u64,
@@ -303,8 +304,8 @@ pub fn splitIntoAtomsOneShot(self: *Object, macho_file: *MachO, object_id: u32)
// Read section's list of relocations
const relocs = @ptrCast(
- [*]const macho.relocation_info,
- @alignCast(@alignOf(macho.relocation_info), &self.contents[sect.reloff]),
+ [*]align(1) const macho.relocation_info,
+ self.contents.ptr + sect.reloff,
)[0..sect.nreloc];
// Symbols within this section only.
@@ -472,7 +473,7 @@ fn createAtomFromSubsection(
size: u64,
alignment: u32,
code: ?[]const u8,
- relocs: []const macho.relocation_info,
+ relocs: []align(1) const macho.relocation_info,
indexes: []const SymbolAtIndex,
match: u8,
sect: macho.section_64,
@@ -538,7 +539,7 @@ pub fn getSourceSection(self: Object, index: u16) macho.section_64 {
return self.sections.items[index];
}
-pub fn parseDataInCode(self: Object) ?[]const macho.data_in_code_entry {
+pub fn parseDataInCode(self: Object) ?[]align(1) const macho.data_in_code_entry {
var it = LoadCommandIterator{
.ncmds = self.header.ncmds,
.buffer = self.contents[@sizeOf(macho.mach_header_64)..][0..self.header.sizeofcmds],
@@ -549,8 +550,8 @@ pub fn parseDataInCode(self: Object) ?[]const macho.data_in_code_entry {
const dice = cmd.cast(macho.linkedit_data_command).?;
const ndice = @divExact(dice.datasize, @sizeOf(macho.data_in_code_entry));
return @ptrCast(
- [*]const macho.data_in_code_entry,
- @alignCast(@alignOf(macho.data_in_code_entry), &self.contents[dice.dataoff]),
+ [*]align(1) const macho.data_in_code_entry,
+ self.contents.ptr + dice.dataoff,
)[0..ndice];
},
else => {},
diff --git a/src/main.zig b/src/main.zig
index f192137b3c..971fe19e36 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -718,7 +718,7 @@ fn buildOutputType(
var test_filter: ?[]const u8 = null;
var test_name_prefix: ?[]const u8 = null;
var override_local_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LOCAL_CACHE_DIR");
- var override_global_cache_dir: ?[]const u8 = null;
+ var override_global_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_GLOBAL_CACHE_DIR");
var override_lib_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LIB_DIR");
var main_pkg_path: ?[]const u8 = null;
var clang_preprocessor_mode: Compilation.ClangPreprocessorMode = .no;
@@ -1657,7 +1657,8 @@ fn buildOutputType(
disable_c_depfile = true;
try clang_argv.appendSlice(it.other_args);
},
- .dep_file_mm => { // -MM
+ .dep_file_to_stdout => { // -M, -MM
+ // "Like -MD, but also implies -E and writes to stdout by default"
// "Like -MMD, but also implies -E and writes to stdout by default"
c_out_mode = .preprocessor;
disable_c_depfile = true;
@@ -4226,6 +4227,7 @@ const FmtError = error{
NotOpenForWriting,
UnsupportedEncoding,
ConnectionResetByPeer,
+ LockViolation,
} || fs.File.OpenError;
fn fmtPath(fmt: *Fmt, file_path: []const u8, check_mode: bool, dir: fs.Dir, sub_path: []const u8) FmtError!void {
@@ -4652,7 +4654,7 @@ pub const ClangArgIterator = struct {
lib_dir,
mcpu,
dep_file,
- dep_file_mm,
+ dep_file_to_stdout,
framework_dir,
framework,
nostdlibinc,
diff --git a/src/print_air.zig b/src/print_air.zig
index 23107946f6..04dec25f5f 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -243,6 +243,7 @@ const Writer = struct {
.popcount,
.byte_swap,
.bit_reverse,
+ .error_set_has_value,
=> try w.writeTyOp(s, inst),
.block,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 6e33154bbd..4bc96c4259 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -214,7 +214,6 @@ const Writer = struct {
.trunc,
.round,
.tag_name,
- .reify,
.type_name,
.frame_type,
.frame_size,
@@ -247,7 +246,6 @@ const Writer = struct {
.validate_array_init_ty => try self.writeValidateArrayInitTy(stream, inst),
.array_type_sentinel => try self.writeArrayTypeSentinel(stream, inst),
- .param_type => try self.writeParamType(stream, inst),
.ptr_type => try self.writePtrType(stream, inst),
.int => try self.writeInt(stream, inst),
.int_big => try self.writeIntBig(stream, inst),
@@ -500,6 +498,7 @@ const Writer = struct {
.wasm_memory_size,
.error_to_int,
.int_to_error,
+ .reify,
=> {
const inst_data = self.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(inst_data.node);
@@ -605,16 +604,6 @@ const Writer = struct {
try self.writeSrc(stream, inst_data.src());
}
- fn writeParamType(
- self: *Writer,
- stream: anytype,
- inst: Zir.Inst.Index,
- ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
- const inst_data = self.code.instructions.items(.data)[inst].param_type;
- try self.writeInstRef(stream, inst_data.callee);
- try stream.print(", {d})", .{inst_data.param_index});
- }
-
fn writePtrType(
self: *Writer,
stream: anytype,
@@ -1158,7 +1147,8 @@ const Writer = struct {
fn writeCall(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const extra = self.code.extraData(Zir.Inst.Call, inst_data.payload_index);
- const args = self.code.refSlice(extra.end, extra.data.flags.args_len);
+ const args_len = extra.data.flags.args_len;
+ const body = self.code.extra[extra.end..];
if (extra.data.flags.ensure_result_used) {
try stream.writeAll("nodiscard ");
@@ -1166,10 +1156,27 @@ const Writer = struct {
try stream.print(".{s}, ", .{@tagName(@intToEnum(std.builtin.CallOptions.Modifier, extra.data.flags.packed_modifier))});
try self.writeInstRef(stream, extra.data.callee);
try stream.writeAll(", [");
- for (args) |arg, i| {
- if (i != 0) try stream.writeAll(", ");
- try self.writeInstRef(stream, arg);
+
+ self.indent += 2;
+ if (args_len != 0) {
+ try stream.writeAll("\n");
+ }
+ var i: usize = 0;
+ var arg_start: u32 = args_len;
+ while (i < args_len) : (i += 1) {
+ try stream.writeByteNTimes(' ', self.indent);
+ const arg_end = self.code.extra[extra.end + i];
+ defer arg_start = arg_end;
+ const arg_body = body[arg_start..arg_end];
+ try self.writeBracedBody(stream, arg_body);
+
+ try stream.writeAll(",\n");
}
+ self.indent -= 2;
+ if (args_len != 0) {
+ try stream.writeByteNTimes(' ', self.indent);
+ }
+
try stream.writeAll("]) ");
try self.writeSrc(stream, inst_data.src());
}
@@ -1238,13 +1245,36 @@ const Writer = struct {
try self.writeFlag(stream, "known_non_opv, ", small.known_non_opv);
try self.writeFlag(stream, "known_comptime_only, ", small.known_comptime_only);
- try stream.print("{s}, {s}, ", .{
- @tagName(small.name_strategy), @tagName(small.layout),
- });
+
+ try stream.print("{s}, ", .{@tagName(small.name_strategy)});
+
+ if (small.layout == .Packed and small.has_backing_int) {
+ const backing_int_body_len = self.code.extra[extra_index];
+ extra_index += 1;
+ try stream.writeAll("Packed(");
+ if (backing_int_body_len == 0) {
+ const backing_int_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ try self.writeInstRef(stream, backing_int_ref);
+ } else {
+ const body = self.code.extra[extra_index..][0..backing_int_body_len];
+ extra_index += backing_int_body_len;
+ self.indent += 2;
+ try self.writeBracedDecl(stream, body);
+ self.indent -= 2;
+ }
+ try stream.writeAll("), ");
+ } else {
+ try stream.print("{s}, ", .{@tagName(small.layout)});
+ }
if (decls_len == 0) {
try stream.writeAll("{}, ");
} else {
+ const prev_parent_decl_node = self.parent_decl_node;
+ if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
+ defer self.parent_decl_node = prev_parent_decl_node;
+
try stream.writeAll("{\n");
self.indent += 2;
extra_index = try self.writeDecls(stream, decls_len, extra_index);
@@ -1415,6 +1445,10 @@ const Writer = struct {
if (decls_len == 0) {
try stream.writeAll("{}, ");
} else {
+ const prev_parent_decl_node = self.parent_decl_node;
+ if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
+ defer self.parent_decl_node = prev_parent_decl_node;
+
try stream.writeAll("{\n");
self.indent += 2;
extra_index = try self.writeDecls(stream, decls_len, extra_index);
@@ -1662,6 +1696,10 @@ const Writer = struct {
if (decls_len == 0) {
try stream.writeAll("{}, ");
} else {
+ const prev_parent_decl_node = self.parent_decl_node;
+ if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
+ defer self.parent_decl_node = prev_parent_decl_node;
+
try stream.writeAll("{\n");
self.indent += 2;
extra_index = try self.writeDecls(stream, decls_len, extra_index);
@@ -1755,6 +1793,10 @@ const Writer = struct {
if (decls_len == 0) {
try stream.writeAll("{})");
} else {
+ const prev_parent_decl_node = self.parent_decl_node;
+ if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
+ defer self.parent_decl_node = prev_parent_decl_node;
+
try stream.writeAll("{\n");
self.indent += 2;
_ = try self.writeDecls(stream, decls_len, extra_index);
diff --git a/src/stage1/all_types.hpp b/src/stage1/all_types.hpp
index 4028c3872d..9f9a6151b8 100644
--- a/src/stage1/all_types.hpp
+++ b/src/stage1/all_types.hpp
@@ -1116,6 +1116,7 @@ struct AstNodeContainerDecl {
ContainerLayout layout;
bool auto_enum, is_root; // union(enum)
+ bool unsupported_explicit_backing_int;
};
struct AstNodeErrorSetField {
diff --git a/src/stage1/analyze.cpp b/src/stage1/analyze.cpp
index 08aa8bbf06..90173f384e 100644
--- a/src/stage1/analyze.cpp
+++ b/src/stage1/analyze.cpp
@@ -3034,6 +3034,12 @@ static Error resolve_struct_zero_bits(CodeGen *g, ZigType *struct_type) {
AstNode *decl_node = struct_type->data.structure.decl_node;
+ if (decl_node->data.container_decl.unsupported_explicit_backing_int) {
+ add_node_error(g, decl_node, buf_create_from_str(
+ "the stage1 compiler does not support explicit backing integer types on packed structs"));
+ return ErrorSemanticAnalyzeFail;
+ }
+
if (struct_type->data.structure.resolve_loop_flag_zero_bits) {
if (struct_type->data.structure.resolve_status != ResolveStatusInvalid) {
struct_type->data.structure.resolve_status = ResolveStatusInvalid;
diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp
index e31715030c..a5428945a9 100644
--- a/src/stage1/ir.cpp
+++ b/src/stage1/ir.cpp
@@ -18640,7 +18640,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour
result->special = ConstValSpecialStatic;
result->type = ir_type_info_get_type(ira, "Struct", nullptr);
- ZigValue **fields = alloc_const_vals_ptrs(g, 4);
+ ZigValue **fields = alloc_const_vals_ptrs(g, 5);
result->data.x_struct.fields = fields;
// layout: ContainerLayout
@@ -18648,8 +18648,17 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour
fields[0]->special = ConstValSpecialStatic;
fields[0]->type = ir_type_info_get_type(ira, "ContainerLayout", nullptr);
bigint_init_unsigned(&fields[0]->data.x_enum_tag, type_entry->data.structure.layout);
+
+ // backing_integer: ?type
+ ensure_field_index(result->type, "backing_integer", 1);
+ fields[1]->special = ConstValSpecialStatic;
+ fields[1]->type = get_optional_type(g, g->builtin_types.entry_type);
+ // This is always null in stage1, as stage1 does not support explicit backing integers
+ // for packed structs.
+ fields[1]->data.x_optional = nullptr;
+
// fields: []Type.StructField
- ensure_field_index(result->type, "fields", 1);
+ ensure_field_index(result->type, "fields", 2);
ZigType *type_info_struct_field_type = ir_type_info_get_type(ira, "StructField", nullptr);
if ((err = type_resolve(g, type_info_struct_field_type, ResolveStatusSizeKnown))) {
@@ -18663,7 +18672,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour
struct_field_array->data.x_array.special = ConstArraySpecialNone;
struct_field_array->data.x_array.data.s_none.elements = g->pass1_arena->allocate<ZigValue>(struct_field_count);
- init_const_slice(g, fields[1], struct_field_array, 0, struct_field_count, false, nullptr);
+ init_const_slice(g, fields[2], struct_field_array, 0, struct_field_count, false, nullptr);
for (uint32_t struct_field_index = 0; struct_field_index < struct_field_count; struct_field_index++) {
TypeStructField *struct_field = type_entry->data.structure.fields[struct_field_index];
@@ -18710,18 +18719,18 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour
struct_field_val->parent.data.p_array.elem_index = struct_field_index;
}
// decls: []Type.Declaration
- ensure_field_index(result->type, "decls", 2);
- if ((err = ir_make_type_info_decls(ira, source_node, fields[2],
+ ensure_field_index(result->type, "decls", 3);
+ if ((err = ir_make_type_info_decls(ira, source_node, fields[3],
type_entry->data.structure.decls_scope, false)))
{
return err;
}
// is_tuple: bool
- ensure_field_index(result->type, "is_tuple", 3);
- fields[3]->special = ConstValSpecialStatic;
- fields[3]->type = g->builtin_types.entry_bool;
- fields[3]->data.x_bool = is_tuple(type_entry);
+ ensure_field_index(result->type, "is_tuple", 4);
+ fields[4]->special = ConstValSpecialStatic;
+ fields[4]->type = g->builtin_types.entry_bool;
+ fields[4]->data.x_bool = is_tuple(type_entry);
break;
}
@@ -19313,7 +19322,14 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_
assert(layout_value->type == ir_type_info_get_type(ira, "ContainerLayout", nullptr));
ContainerLayout layout = (ContainerLayout)bigint_as_u32(&layout_value->data.x_enum_tag);
- ZigValue *fields_value = get_const_field(ira, source_node, payload, "fields", 1);
+ ZigType *tag_type = get_const_field_meta_type_optional(ira, source_node, payload, "backing_integer", 1);
+ if (tag_type != nullptr) {
+ ir_add_error_node(ira, source_node, buf_create_from_str(
+ "the stage1 compiler does not support explicit backing integer types on packed structs"));
+ return ira->codegen->invalid_inst_gen->value->type;
+ }
+
+ ZigValue *fields_value = get_const_field(ira, source_node, payload, "fields", 2);
if (fields_value == nullptr)
return ira->codegen->invalid_inst_gen->value->type;
assert(fields_value->special == ConstValSpecialStatic);
@@ -19322,7 +19338,7 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_
ZigValue *fields_len_value = fields_value->data.x_struct.fields[slice_len_index];
size_t fields_len = bigint_as_usize(&fields_len_value->data.x_bigint);
- ZigValue *decls_value = get_const_field(ira, source_node, payload, "decls", 2);
+ ZigValue *decls_value = get_const_field(ira, source_node, payload, "decls", 3);
if (decls_value == nullptr)
return ira->codegen->invalid_inst_gen->value->type;
assert(decls_value->special == ConstValSpecialStatic);
@@ -19335,7 +19351,7 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_
}
bool is_tuple;
- if ((err = get_const_field_bool(ira, source_node, payload, "is_tuple", 3, &is_tuple)))
+ if ((err = get_const_field_bool(ira, source_node, payload, "is_tuple", 4, &is_tuple)))
return ira->codegen->invalid_inst_gen->value->type;
ZigType *entry = new_type_table_entry(ZigTypeIdStruct);
diff --git a/src/stage1/parser.cpp b/src/stage1/parser.cpp
index fdc0777aff..bd778484cb 100644
--- a/src/stage1/parser.cpp
+++ b/src/stage1/parser.cpp
@@ -2902,16 +2902,25 @@ static AstNode *ast_parse_container_decl_auto(ParseContext *pc) {
}
// ContainerDeclType
-// <- KEYWORD_struct
+// <- KEYWORD_struct (LPAREN Expr RPAREN)?
// / KEYWORD_enum (LPAREN Expr RPAREN)?
// / KEYWORD_union (LPAREN (KEYWORD_enum (LPAREN Expr RPAREN)? / Expr) RPAREN)?
// / KEYWORD_opaque
static AstNode *ast_parse_container_decl_type(ParseContext *pc) {
TokenIndex first = eat_token_if(pc, TokenIdKeywordStruct);
if (first != 0) {
+ bool explicit_backing_int = false;
+ if (eat_token_if(pc, TokenIdLParen) != 0) {
+ explicit_backing_int = true;
+ ast_expect(pc, ast_parse_expr);
+ expect_token(pc, TokenIdRParen);
+ }
AstNode *res = ast_create_node(pc, NodeTypeContainerDecl, first);
res->data.container_decl.init_arg_expr = nullptr;
res->data.container_decl.kind = ContainerKindStruct;
+ // We want this to be an error in semantic analysis not parsing to make sharing
+ // the test suite between stage1 and self hosted easier.
+ res->data.container_decl.unsupported_explicit_backing_int = explicit_backing_int;
return res;
}
diff --git a/src/test.zig b/src/test.zig
index 5f4107a402..266b4181e0 100644
--- a/src/test.zig
+++ b/src/test.zig
@@ -1224,10 +1224,6 @@ pub const TestContext = struct {
try aux_thread_pool.init(self.gpa);
defer aux_thread_pool.deinit();
- var case_thread_pool: ThreadPool = undefined;
- try case_thread_pool.init(self.gpa);
- defer case_thread_pool.deinit();
-
// Use the same global cache dir for all the tests, such that we for example don't have to
// rebuild musl libc for every case (when LLVM backend is enabled).
var global_tmp = std.testing.tmpDir(.{});
@@ -1245,9 +1241,6 @@ pub const TestContext = struct {
defer self.gpa.free(global_cache_directory.path.?);
{
- var wait_group: WaitGroup = .{};
- defer wait_group.wait();
-
for (self.cases.items) |*case| {
if (build_options.skip_non_native) {
if (case.target.getCpuArch() != builtin.cpu.arch)
@@ -1267,17 +1260,19 @@ pub const TestContext = struct {
if (std.mem.indexOf(u8, case.name, test_filter) == null) continue;
}
- wait_group.start();
- try case_thread_pool.spawn(workerRunOneCase, .{
+ var prg_node = root_node.start(case.name, case.updates.items.len);
+ prg_node.activate();
+ defer prg_node.end();
+
+ case.result = runOneCase(
self.gpa,
- root_node,
- case,
+ &prg_node,
+ case.*,
zig_lib_directory,
&aux_thread_pool,
global_cache_directory,
host,
- &wait_group,
- });
+ );
}
}
@@ -1295,33 +1290,6 @@ pub const TestContext = struct {
}
}
- fn workerRunOneCase(
- gpa: Allocator,
- root_node: *std.Progress.Node,
- case: *Case,
- zig_lib_directory: Compilation.Directory,
- thread_pool: *ThreadPool,
- global_cache_directory: Compilation.Directory,
- host: std.zig.system.NativeTargetInfo,
- wait_group: *WaitGroup,
- ) void {
- defer wait_group.finish();
-
- var prg_node = root_node.start(case.name, case.updates.items.len);
- prg_node.activate();
- defer prg_node.end();
-
- case.result = runOneCase(
- gpa,
- &prg_node,
- case.*,
- zig_lib_directory,
- thread_pool,
- global_cache_directory,
- host,
- );
- }
-
fn runOneCase(
allocator: Allocator,
root_node: *std.Progress.Node,
diff --git a/src/type.zig b/src/type.zig
index d53158735a..582ea230ef 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -3000,9 +3000,17 @@ pub const Type = extern union {
.lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
};
if (struct_obj.layout == .Packed) {
- var buf: Type.Payload.Bits = undefined;
- const int_ty = struct_obj.packedIntegerType(target, &buf);
- return AbiAlignmentAdvanced{ .scalar = int_ty.abiAlignment(target) };
+ switch (strat) {
+ .sema_kit => |sk| try sk.sema.resolveTypeLayout(sk.block, sk.src, ty),
+ .lazy => |arena| {
+ if (!struct_obj.haveLayout()) {
+ return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) };
+ }
+ },
+ .eager => {},
+ }
+ assert(struct_obj.haveLayout());
+ return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(target) };
}
const fields = ty.structFields();
@@ -3192,17 +3200,16 @@ pub const Type = extern union {
.Packed => {
const struct_obj = ty.castTag(.@"struct").?.data;
switch (strat) {
- .sema_kit => |sk| _ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty),
+ .sema_kit => |sk| try sk.sema.resolveTypeLayout(sk.block, sk.src, ty),
.lazy => |arena| {
- if (!struct_obj.haveFieldTypes()) {
+ if (!struct_obj.haveLayout()) {
return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) };
}
},
.eager => {},
}
- var buf: Type.Payload.Bits = undefined;
- const int_ty = struct_obj.packedIntegerType(target, &buf);
- return AbiSizeAdvanced{ .scalar = int_ty.abiSize(target) };
+ assert(struct_obj.haveLayout());
+ return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(target) };
},
else => {
switch (strat) {
@@ -5771,50 +5778,6 @@ pub const Type = extern union {
}
}
- pub fn getNodeOffset(ty: Type) i32 {
- switch (ty.tag()) {
- .enum_full, .enum_nonexhaustive => {
- const enum_full = ty.cast(Payload.EnumFull).?.data;
- return enum_full.node_offset;
- },
- .enum_numbered => return ty.castTag(.enum_numbered).?.data.node_offset,
- .enum_simple => {
- const enum_simple = ty.castTag(.enum_simple).?.data;
- return enum_simple.node_offset;
- },
- .@"struct" => {
- const struct_obj = ty.castTag(.@"struct").?.data;
- return struct_obj.node_offset;
- },
- .error_set => {
- const error_set = ty.castTag(.error_set).?.data;
- return error_set.node_offset;
- },
- .@"union", .union_safety_tagged, .union_tagged => {
- const union_obj = ty.cast(Payload.Union).?.data;
- return union_obj.node_offset;
- },
- .@"opaque" => {
- const opaque_obj = ty.cast(Payload.Opaque).?.data;
- return opaque_obj.node_offset;
- },
- .atomic_order,
- .atomic_rmw_op,
- .calling_convention,
- .address_space,
- .float_mode,
- .reduce_op,
- .call_options,
- .prefetch_options,
- .export_options,
- .extern_options,
- .type_info,
- => unreachable, // These need to be resolved earlier.
-
- else => unreachable,
- }
- }
-
/// This enum does not directly correspond to `std.builtin.TypeId` because
/// it has extra enum tags in it, as a way of using less memory. For example,
/// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types
@@ -6340,6 +6303,8 @@ pub const Type = extern union {
pub const @"anyopaque" = initTag(.anyopaque);
pub const @"null" = initTag(.@"null");
+ pub const err_int = Type.u16;
+
pub fn ptr(arena: Allocator, mod: *Module, data: Payload.Pointer.Data) !Type {
const target = mod.getTarget();
diff --git a/src/value.zig b/src/value.zig
index 3994040ba6..677a459afe 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1194,6 +1194,16 @@ pub const Value = extern union {
return switch (self.tag()) {
.bool_true, .one => true,
.bool_false, .zero => false,
+ .int_u64 => switch (self.castTag(.int_u64).?.data) {
+ 0 => false,
+ 1 => true,
+ else => unreachable,
+ },
+ .int_i64 => switch (self.castTag(.int_i64).?.data) {
+ 0 => false,
+ 1 => true,
+ else => unreachable,
+ },
else => unreachable,
};
}
@@ -3472,44 +3482,6 @@ pub const Value = extern union {
return fromBigInt(allocator, result_q.toConst());
}
- pub fn intRem(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
- if (ty.zigTypeTag() == .Vector) {
- const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
- scalar.* = try intRemScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
- }
- return Value.Tag.aggregate.create(allocator, result_data);
- }
- return intRemScalar(lhs, rhs, allocator, target);
- }
-
- pub fn intRemScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
- // TODO is this a performance issue? maybe we should try the operation without
- // resorting to BigInt first.
- var lhs_space: Value.BigIntSpace = undefined;
- var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const rhs_bigint = rhs.toBigInt(&rhs_space, target);
- const limbs_q = try allocator.alloc(
- std.math.big.Limb,
- lhs_bigint.limbs.len,
- );
- const limbs_r = try allocator.alloc(
- std.math.big.Limb,
- // TODO: consider reworking Sema to re-use Values rather than
- // always producing new Value objects.
- rhs_bigint.limbs.len,
- );
- const limbs_buffer = try allocator.alloc(
- std.math.big.Limb,
- std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
- );
- var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
- var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
- result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
- return fromBigInt(allocator, result_r.toConst());
- }
-
pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());