aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/AstGen.zig324
-rw-r--r--src/Module.zig4
-rw-r--r--src/Sema.zig341
-rw-r--r--src/Zir.zig262
-rw-r--r--src/print_zir.zig169
-rw-r--r--src/type.zig4
6 files changed, 852 insertions, 252 deletions
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 1a79e044fa..5c4fc88483 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -73,7 +73,7 @@ fn setExtra(astgen: *AstGen, index: usize, extra: anytype) void {
Zir.Inst.Call.Flags => @bitCast(u32, @field(extra, field.name)),
Zir.Inst.BuiltinCall.Flags => @bitCast(u32, @field(extra, field.name)),
Zir.Inst.SwitchBlock.Bits => @bitCast(u32, @field(extra, field.name)),
- Zir.Inst.ExtendedFunc.Bits => @bitCast(u32, @field(extra, field.name)),
+ Zir.Inst.FuncFancy.Bits => @bitCast(u32, @field(extra, field.name)),
else => @compileError("bad field type"),
};
i += 1;
@@ -1205,7 +1205,7 @@ fn fnProtoExpr(
break :is_var_args false;
};
- const align_inst: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: {
+ const align_ref: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: {
break :inst try expr(&block_scope, scope, align_rl, fn_proto.ast.align_expr);
};
@@ -1232,19 +1232,24 @@ fn fnProtoExpr(
if (is_inferred_error) {
return astgen.failTok(maybe_bang, "function prototype may not have inferred error set", .{});
}
- var ret_gz = block_scope.makeSubBlock(scope);
- defer ret_gz.unstack();
- const ret_ty = try expr(&ret_gz, scope, coerced_type_rl, fn_proto.ast.return_type);
- const ret_br = try ret_gz.addBreak(.break_inline, 0, ret_ty);
+ const ret_ty = try expr(&block_scope, scope, coerced_type_rl, fn_proto.ast.return_type);
const result = try block_scope.addFunc(.{
.src_node = fn_proto.ast.proto_node,
+
+ .cc_ref = cc,
+ .cc_gz = null,
+ .align_ref = align_ref,
+ .align_gz = null,
+ .ret_ref = ret_ty,
+ .ret_gz = null,
+ .section_ref = .none,
+ .section_gz = null,
+ .addrspace_ref = .none,
+ .addrspace_gz = null,
+
.param_block = block_inst,
- .ret_gz = &ret_gz,
- .ret_br = ret_br,
.body_gz = null,
- .cc = cc,
- .align_inst = align_inst,
.lib_name = 0,
.is_var_args = is_var_args,
.is_inferred_error = false,
@@ -2262,7 +2267,7 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.field_val_named,
.func,
.func_inferred,
- .func_extended,
+ .func_fancy,
.int,
.int_big,
.float,
@@ -3373,9 +3378,8 @@ fn fnDecl(
const doc_comment_index = try astgen.docCommentAsString(fn_proto.firstToken());
- const has_section_or_addrspace = fn_proto.ast.section_expr != 0 or fn_proto.ast.addrspace_expr != 0;
- // Alignment is passed in the func instruction in this case.
- wip_members.nextDecl(is_pub, is_export, false, has_section_or_addrspace);
+ // align, linksection, and addrspace is passed in the func instruction in this case.
+ wip_members.nextDecl(is_pub, is_export, false, false);
var params_scope = &fn_gz.base;
const is_var_args = is_var_args: {
@@ -3461,17 +3465,49 @@ fn fnDecl(
const maybe_bang = tree.firstToken(fn_proto.ast.return_type) - 1;
const is_inferred_error = token_tags[maybe_bang] == .bang;
- const align_inst: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: {
- break :inst try expr(&decl_gz, params_scope, align_rl, fn_proto.ast.align_expr);
+ // After creating the function ZIR instruction, it will need to update the break
+ // instructions inside the expression blocks for align, addrspace, cc, and ret_ty
+ // to use the function instruction as the "block" to break from.
+
+ var align_gz = decl_gz.makeSubBlock(params_scope);
+ defer align_gz.unstack();
+ const align_ref: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: {
+ const inst = try expr(&decl_gz, params_scope, coerced_align_rl, fn_proto.ast.align_expr);
+ if (align_gz.instructionsSlice().len == 0) {
+ // In this case we will send a len=0 body which can be encoded more efficiently.
+ break :inst inst;
+ }
+ _ = try align_gz.addBreak(.break_inline, 0, inst);
+ break :inst inst;
};
- const addrspace_inst: Zir.Inst.Ref = if (fn_proto.ast.addrspace_expr == 0) .none else inst: {
- break :inst try expr(&decl_gz, params_scope, .{ .ty = .address_space_type }, fn_proto.ast.addrspace_expr);
+
+ var addrspace_gz = decl_gz.makeSubBlock(params_scope);
+ defer addrspace_gz.unstack();
+ const addrspace_ref: Zir.Inst.Ref = if (fn_proto.ast.addrspace_expr == 0) .none else inst: {
+ const inst = try expr(&decl_gz, params_scope, .{ .coerced_ty = .address_space_type }, fn_proto.ast.addrspace_expr);
+ if (addrspace_gz.instructionsSlice().len == 0) {
+ // In this case we will send a len=0 body which can be encoded more efficiently.
+ break :inst inst;
+ }
+ _ = try addrspace_gz.addBreak(.break_inline, 0, inst);
+ break :inst inst;
};
- const section_inst: Zir.Inst.Ref = if (fn_proto.ast.section_expr == 0) .none else inst: {
- break :inst try comptimeExpr(&decl_gz, params_scope, .{ .ty = .const_slice_u8_type }, fn_proto.ast.section_expr);
+
+ var section_gz = decl_gz.makeSubBlock(params_scope);
+ defer section_gz.unstack();
+ const section_ref: Zir.Inst.Ref = if (fn_proto.ast.section_expr == 0) .none else inst: {
+ const inst = try expr(&decl_gz, params_scope, .{ .coerced_ty = .const_slice_u8_type }, fn_proto.ast.section_expr);
+ if (section_gz.instructionsSlice().len == 0) {
+ // In this case we will send a len=0 body which can be encoded more efficiently.
+ break :inst inst;
+ }
+ _ = try section_gz.addBreak(.break_inline, 0, inst);
+ break :inst inst;
};
- const cc: Zir.Inst.Ref = blk: {
+ var cc_gz = decl_gz.makeSubBlock(params_scope);
+ defer cc_gz.unstack();
+ const cc_ref: Zir.Inst.Ref = blk: {
if (fn_proto.ast.callconv_expr != 0) {
if (has_inline_keyword) {
return astgen.failNode(
@@ -3480,12 +3516,18 @@ fn fnDecl(
.{},
);
}
- break :blk try expr(
+ const inst = try expr(
&decl_gz,
params_scope,
- .{ .ty = .calling_convention_type },
+ .{ .coerced_ty = .calling_convention_type },
fn_proto.ast.callconv_expr,
);
+ if (cc_gz.instructionsSlice().len == 0) {
+ // In this case we will send a len=0 body which can be encoded more efficiently.
+ break :blk inst;
+ }
+ _ = try cc_gz.addBreak(.break_inline, 0, inst);
+ break :blk inst;
} else if (is_extern) {
// note: https://github.com/ziglang/zig/issues/5269
break :blk .calling_convention_c;
@@ -3498,8 +3540,15 @@ fn fnDecl(
var ret_gz = decl_gz.makeSubBlock(params_scope);
defer ret_gz.unstack();
- const ret_ty = try expr(&ret_gz, params_scope, coerced_type_rl, fn_proto.ast.return_type);
- const ret_br = try ret_gz.addBreak(.break_inline, 0, ret_ty);
+ const ret_ref: Zir.Inst.Ref = inst: {
+ const inst = try expr(&ret_gz, params_scope, coerced_type_rl, fn_proto.ast.return_type);
+ if (ret_gz.instructionsSlice().len == 0) {
+ // In this case we will send a len=0 body which can be encoded more efficiently.
+ break :inst inst;
+ }
+ _ = try ret_gz.addBreak(.break_inline, 0, inst);
+ break :inst inst;
+ };
const func_inst: Zir.Inst.Ref = if (body_node == 0) func: {
if (!is_extern) {
@@ -3510,12 +3559,18 @@ fn fnDecl(
}
break :func try decl_gz.addFunc(.{
.src_node = decl_node,
+ .cc_ref = cc_ref,
+ .cc_gz = &cc_gz,
+ .align_ref = align_ref,
+ .align_gz = &align_gz,
+ .ret_ref = ret_ref,
.ret_gz = &ret_gz,
- .ret_br = ret_br,
+ .section_ref = section_ref,
+ .section_gz = &section_gz,
+ .addrspace_ref = addrspace_ref,
+ .addrspace_gz = &addrspace_gz,
.param_block = block_inst,
.body_gz = null,
- .cc = cc,
- .align_inst = align_inst,
.lib_name = lib_name,
.is_var_args = is_var_args,
.is_inferred_error = false,
@@ -3549,14 +3604,20 @@ fn fnDecl(
break :func try decl_gz.addFunc(.{
.src_node = decl_node,
+ .cc_ref = cc_ref,
+ .cc_gz = &cc_gz,
+ .align_ref = align_ref,
+ .align_gz = &align_gz,
+ .ret_ref = ret_ref,
+ .ret_gz = &ret_gz,
+ .section_ref = section_ref,
+ .section_gz = &section_gz,
+ .addrspace_ref = addrspace_ref,
+ .addrspace_gz = &addrspace_gz,
.lbrace_line = lbrace_line,
.lbrace_column = lbrace_column,
.param_block = block_inst,
- .ret_gz = &ret_gz,
- .ret_br = ret_br,
.body_gz = &fn_gz,
- .cc = cc,
- .align_inst = align_inst,
.lib_name = lib_name,
.is_var_args = is_var_args,
.is_inferred_error = is_inferred_error,
@@ -3582,10 +3643,6 @@ fn fnDecl(
wip_members.appendToDecl(fn_name_str_index);
wip_members.appendToDecl(block_inst);
wip_members.appendToDecl(doc_comment_index);
- if (has_section_or_addrspace) {
- wip_members.appendToDecl(@enumToInt(section_inst));
- wip_members.appendToDecl(@enumToInt(addrspace_inst));
- }
}
fn globalVarDecl(
@@ -3979,14 +4036,22 @@ fn testDecl(
const func_inst = try decl_block.addFunc(.{
.src_node = node,
+
+ .cc_ref = .none,
+ .cc_gz = null,
+ .align_ref = .none,
+ .align_gz = null,
+ .ret_ref = .void_type,
+ .ret_gz = null,
+ .section_ref = .none,
+ .section_gz = null,
+ .addrspace_ref = .none,
+ .addrspace_gz = null,
+
.lbrace_line = lbrace_line,
.lbrace_column = lbrace_column,
.param_block = block_inst,
- .ret_gz = null,
- .ret_br = 0,
.body_gz = &fn_block,
- .cc = .none,
- .align_inst = .none,
.lib_name = 0,
.is_var_args = false,
.is_inferred_error = true,
@@ -9930,17 +9995,34 @@ const GenZir = struct {
gz.unstack();
}
- /// Supports `body_gz` stacked on `ret_gz` stacked on `gz`. Unstacks `body_gz` and `ret_gz`.
+ /// Must be called with the following stack set up:
+ /// * gz (bottom)
+ /// * align_gz
+ /// * addrspace_gz
+ /// * section_gz
+ /// * cc_gz
+ /// * ret_gz
+ /// * body_gz (top)
+ /// Unstacks all of those except for `gz`.
fn addFunc(gz: *GenZir, args: struct {
src_node: Ast.Node.Index,
lbrace_line: u32 = 0,
lbrace_column: u32 = 0,
- body_gz: ?*GenZir,
param_block: Zir.Inst.Index,
+
+ align_gz: ?*GenZir,
+ addrspace_gz: ?*GenZir,
+ section_gz: ?*GenZir,
+ cc_gz: ?*GenZir,
ret_gz: ?*GenZir,
- ret_br: Zir.Inst.Index,
- cc: Zir.Inst.Ref,
- align_inst: Zir.Inst.Ref,
+ body_gz: ?*GenZir,
+
+ align_ref: Zir.Inst.Ref,
+ addrspace_ref: Zir.Inst.Ref,
+ section_ref: Zir.Inst.Ref,
+ cc_ref: Zir.Inst.Ref,
+ ret_ref: Zir.Inst.Ref,
+
lib_name: u32,
is_var_args: bool,
is_inferred_error: bool,
@@ -9950,11 +10032,13 @@ const GenZir = struct {
assert(args.src_node != 0);
const astgen = gz.astgen;
const gpa = astgen.gpa;
+ const ret_ref = if (args.ret_ref == .void_type) .none else args.ret_ref;
+ const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len);
try astgen.instructions.ensureUnusedCapacity(gpa, 1);
var body: []Zir.Inst.Index = &[0]Zir.Inst.Index{};
- var ret_ty: []Zir.Inst.Index = &[0]Zir.Inst.Index{};
+ var ret_body: []Zir.Inst.Index = &[0]Zir.Inst.Index{};
var src_locs_buffer: [3]u32 = undefined;
var src_locs: []u32 = src_locs_buffer[0..0];
if (args.body_gz) |body_gz| {
@@ -9978,61 +10062,120 @@ const GenZir = struct {
body = body_gz.instructionsSlice();
if (args.ret_gz) |ret_gz|
- ret_ty = ret_gz.instructionsSliceUpto(body_gz);
+ ret_body = ret_gz.instructionsSliceUpto(body_gz);
} else {
if (args.ret_gz) |ret_gz|
- ret_ty = ret_gz.instructionsSlice();
+ ret_body = ret_gz.instructionsSlice();
}
- if (args.cc != .none or args.lib_name != 0 or
- args.is_var_args or args.is_test or args.align_inst != .none or
- args.is_extern)
+ if (args.cc_ref != .none or args.lib_name != 0 or
+ args.is_var_args or args.is_test or args.is_extern or
+ args.align_ref != .none or args.section_ref != .none or
+ args.addrspace_ref != .none)
{
+ var align_body: []Zir.Inst.Index = &.{};
+ var addrspace_body: []Zir.Inst.Index = &.{};
+ var section_body: []Zir.Inst.Index = &.{};
+ var cc_body: []Zir.Inst.Index = &.{};
+ if (args.ret_gz != null) {
+ align_body = args.align_gz.?.instructionsSliceUpto(args.addrspace_gz.?);
+ addrspace_body = args.addrspace_gz.?.instructionsSliceUpto(args.section_gz.?);
+ section_body = args.section_gz.?.instructionsSliceUpto(args.cc_gz.?);
+ cc_body = args.cc_gz.?.instructionsSliceUpto(args.ret_gz.?);
+ }
+
try astgen.extra.ensureUnusedCapacity(
gpa,
- @typeInfo(Zir.Inst.ExtendedFunc).Struct.fields.len +
- ret_ty.len + body.len + src_locs.len +
- @boolToInt(args.lib_name != 0) +
- @boolToInt(args.align_inst != .none) +
- @boolToInt(args.cc != .none),
+ @typeInfo(Zir.Inst.FuncFancy).Struct.fields.len +
+ fancyFnExprExtraLen(align_body, args.align_ref) +
+ fancyFnExprExtraLen(addrspace_body, args.addrspace_ref) +
+ fancyFnExprExtraLen(section_body, args.section_ref) +
+ fancyFnExprExtraLen(cc_body, args.cc_ref) +
+ fancyFnExprExtraLen(ret_body, ret_ref) +
+ body.len + src_locs.len +
+ @boolToInt(args.lib_name != 0),
);
- const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.ExtendedFunc{
+ const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.FuncFancy{
.param_block = args.param_block,
- .ret_body_len = @intCast(u32, ret_ty.len),
.body_len = @intCast(u32, body.len),
.bits = .{
.is_var_args = args.is_var_args,
.is_inferred_error = args.is_inferred_error,
- .has_lib_name = args.lib_name != 0,
- .has_cc = args.cc != .none,
- .has_align = args.align_inst != .none,
.is_test = args.is_test,
.is_extern = args.is_extern,
+ .has_lib_name = args.lib_name != 0,
+
+ .has_align_ref = args.align_ref != .none,
+ .has_addrspace_ref = args.addrspace_ref != .none,
+ .has_section_ref = args.section_ref != .none,
+ .has_cc_ref = args.cc_ref != .none,
+ .has_ret_ty_ref = ret_ref != .none,
+
+ .has_align_body = align_body.len != 0,
+ .has_addrspace_body = addrspace_body.len != 0,
+ .has_section_body = section_body.len != 0,
+ .has_cc_body = cc_body.len != 0,
+ .has_ret_ty_body = ret_body.len != 0,
},
});
if (args.lib_name != 0) {
astgen.extra.appendAssumeCapacity(args.lib_name);
}
- if (args.cc != .none) {
- astgen.extra.appendAssumeCapacity(@enumToInt(args.cc));
+
+ const zir_datas = astgen.instructions.items(.data);
+ if (align_body.len != 0) {
+ astgen.extra.appendAssumeCapacity(@intCast(u32, align_body.len));
+ astgen.extra.appendSliceAssumeCapacity(align_body);
+ zir_datas[align_body[align_body.len - 1]].@"break".block_inst = new_index;
+ } else if (args.align_ref != .none) {
+ astgen.extra.appendAssumeCapacity(@enumToInt(args.align_ref));
+ }
+ if (addrspace_body.len != 0) {
+ astgen.extra.appendAssumeCapacity(@intCast(u32, addrspace_body.len));
+ astgen.extra.appendSliceAssumeCapacity(addrspace_body);
+ zir_datas[addrspace_body[addrspace_body.len - 1]].@"break".block_inst = new_index;
+ } else if (args.addrspace_ref != .none) {
+ astgen.extra.appendAssumeCapacity(@enumToInt(args.addrspace_ref));
}
- if (args.align_inst != .none) {
- astgen.extra.appendAssumeCapacity(@enumToInt(args.align_inst));
+ if (section_body.len != 0) {
+ astgen.extra.appendAssumeCapacity(@intCast(u32, section_body.len));
+ astgen.extra.appendSliceAssumeCapacity(section_body);
+ zir_datas[section_body[section_body.len - 1]].@"break".block_inst = new_index;
+ } else if (args.section_ref != .none) {
+ astgen.extra.appendAssumeCapacity(@enumToInt(args.section_ref));
}
- astgen.extra.appendSliceAssumeCapacity(ret_ty);
+ if (cc_body.len != 0) {
+ astgen.extra.appendAssumeCapacity(@intCast(u32, cc_body.len));
+ astgen.extra.appendSliceAssumeCapacity(cc_body);
+ zir_datas[cc_body[cc_body.len - 1]].@"break".block_inst = new_index;
+ } else if (args.cc_ref != .none) {
+ astgen.extra.appendAssumeCapacity(@enumToInt(args.cc_ref));
+ }
+ if (ret_body.len != 0) {
+ astgen.extra.appendAssumeCapacity(@intCast(u32, ret_body.len));
+ astgen.extra.appendSliceAssumeCapacity(ret_body);
+ zir_datas[ret_body[ret_body.len - 1]].@"break".block_inst = new_index;
+ } else if (ret_ref != .none) {
+ astgen.extra.appendAssumeCapacity(@enumToInt(ret_ref));
+ }
+
astgen.extra.appendSliceAssumeCapacity(body);
astgen.extra.appendSliceAssumeCapacity(src_locs);
- // order is important when unstacking
+
+ // Order is important when unstacking.
if (args.body_gz) |body_gz| body_gz.unstack();
- if (args.ret_gz) |ret_gz| ret_gz.unstack();
+ if (args.ret_gz != null) {
+ args.ret_gz.?.unstack();
+ args.cc_gz.?.unstack();
+ args.section_gz.?.unstack();
+ args.addrspace_gz.?.unstack();
+ args.align_gz.?.unstack();
+ }
+
try gz.instructions.ensureUnusedCapacity(gpa, 1);
- const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len);
- if (args.ret_br != 0) {
- astgen.instructions.items(.data)[args.ret_br].@"break".block_inst = new_index;
- }
astgen.instructions.appendAssumeCapacity(.{
- .tag = .func_extended,
+ .tag = .func_fancy,
.data = .{ .pl_node = .{
.src_node = gz.nodeIndexToRelative(args.src_node),
.payload_index = payload_index,
@@ -10044,27 +10187,40 @@ const GenZir = struct {
try astgen.extra.ensureUnusedCapacity(
gpa,
@typeInfo(Zir.Inst.Func).Struct.fields.len +
- ret_ty.len + body.len + src_locs.len,
+ @maximum(ret_body.len, @boolToInt(ret_ref != .none)) +
+ body.len + src_locs.len,
);
+ const ret_body_len = if (ret_body.len != 0)
+ @intCast(u32, ret_body.len)
+ else
+ @boolToInt(ret_ref != .none);
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.Func{
.param_block = args.param_block,
- .ret_body_len = @intCast(u32, ret_ty.len),
+ .ret_body_len = ret_body_len,
.body_len = @intCast(u32, body.len),
});
- astgen.extra.appendSliceAssumeCapacity(ret_ty);
+ const zir_datas = astgen.instructions.items(.data);
+ if (ret_body.len != 0) {
+ astgen.extra.appendSliceAssumeCapacity(ret_body);
+ zir_datas[ret_body[ret_body.len - 1]].@"break".block_inst = new_index;
+ } else if (ret_ref != .none) {
+ astgen.extra.appendAssumeCapacity(@enumToInt(ret_ref));
+ }
astgen.extra.appendSliceAssumeCapacity(body);
astgen.extra.appendSliceAssumeCapacity(src_locs);
- // order is important when unstacking
+
+ // Order is important when unstacking.
if (args.body_gz) |body_gz| body_gz.unstack();
if (args.ret_gz) |ret_gz| ret_gz.unstack();
+ if (args.cc_gz) |cc_gz| cc_gz.unstack();
+ if (args.section_gz) |section_gz| section_gz.unstack();
+ if (args.addrspace_gz) |addrspace_gz| addrspace_gz.unstack();
+ if (args.align_gz) |align_gz| align_gz.unstack();
+
try gz.instructions.ensureUnusedCapacity(gpa, 1);
const tag: Zir.Inst.Tag = if (args.is_inferred_error) .func_inferred else .func;
- const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len);
- if (args.ret_br != 0) {
- astgen.instructions.items(.data)[args.ret_br].@"break".block_inst = new_index;
- }
astgen.instructions.appendAssumeCapacity(.{
.tag = tag,
.data = .{ .pl_node = .{
@@ -10077,6 +10233,12 @@ const GenZir = struct {
}
}
+ fn fancyFnExprExtraLen(body: []Zir.Inst.Index, ref: Zir.Inst.Ref) usize {
+ // In the case of non-empty body, there is one for the body length,
+ // and then one for each instruction.
+ return body.len + @boolToInt(ref != .none);
+ }
+
fn addVar(gz: *GenZir, args: struct {
align_inst: Zir.Inst.Ref,
lib_name: u32,
diff --git a/src/Module.zig b/src/Module.zig
index 7c19c4dab6..f8d662ae1f 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -1595,9 +1595,9 @@ pub const Fn = struct {
switch (zir_tags[func.zir_body_inst]) {
.func => return false,
.func_inferred => return true,
- .func_extended => {
+ .func_fancy => {
const inst_data = zir.instructions.items(.data)[func.zir_body_inst].pl_node;
- const extra = zir.extraData(Zir.Inst.ExtendedFunc, inst_data.payload_index);
+ const extra = zir.extraData(Zir.Inst.FuncFancy, inst_data.payload_index);
return extra.data.bits.is_inferred_error;
},
else => unreachable,
diff --git a/src/Sema.zig b/src/Sema.zig
index fc64e17789..505810a158 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -747,7 +747,7 @@ fn analyzeBodyInner(
.field_call_bind => try sema.zirFieldCallBind(block, inst),
.func => try sema.zirFunc(block, inst, false),
.func_inferred => try sema.zirFunc(block, inst, true),
- .func_extended => try sema.zirFuncExtended(block, inst),
+ .func_fancy => try sema.zirFuncFancy(block, inst),
.import => try sema.zirImport(block, inst),
.indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst),
.int => try sema.zirInt(block, inst),
@@ -5190,7 +5190,10 @@ fn analyzeCall(
// on parameters, we must now do the same for the return type as we just did with
// each of the parameters, resolving the return type and providing it to the child
// `Sema` so that it can be used for the `ret_ptr` instruction.
- const ret_ty_inst = try sema.resolveBody(&child_block, fn_info.ret_ty_body, module_fn.zir_body_inst);
+ const ret_ty_inst = if (fn_info.ret_ty_body.len != 0)
+ try sema.resolveBody(&child_block, fn_info.ret_ty_body, module_fn.zir_body_inst)
+ else
+ try sema.resolveInst(fn_info.ret_ty_ref);
const ret_ty_src = func_src; // TODO better source location
const bare_return_type = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst);
// Create a fresh inferred error set type for inline/comptime calls.
@@ -6506,9 +6509,34 @@ fn zirFunc(
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Func, inst_data.payload_index);
+ const target = sema.mod.getTarget();
+ const ret_ty_src = inst_data.src(); // TODO better source location
+
var extra_index = extra.end;
- const ret_ty_body = sema.code.extra[extra_index..][0..extra.data.ret_body_len];
- extra_index += ret_ty_body.len;
+
+ const ret_ty: Type = switch (extra.data.ret_body_len) {
+ 0 => Type.void,
+ 1 => blk: {
+ const ret_ty_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
+ extra_index += 1;
+ if (sema.resolveType(block, ret_ty_src, ret_ty_ref)) |ret_ty| {
+ break :blk ret_ty;
+ } else |err| switch (err) {
+ error.GenericPoison => {
+ break :blk Type.initTag(.generic_poison);
+ },
+ else => |e| return e,
+ }
+ },
+ else => blk: {
+ const ret_ty_body = sema.code.extra[extra_index..][0..extra.data.ret_body_len];
+ extra_index += ret_ty_body.len;
+
+ const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, Type.type);
+ var buffer: Value.ToTypeBuffer = undefined;
+ break :blk try ret_ty_val.toType(&buffer).copy(sema.arena);
+ },
+ };
var src_locs: Zir.Inst.Func.SrcLocs = undefined;
const has_body = extra.data.body_len != 0;
@@ -6526,9 +6554,11 @@ fn zirFunc(
block,
inst_data.src_node,
inst,
- ret_ty_body,
+ 0,
+ target_util.defaultAddressSpace(target, .function),
+ FuncLinkSection.default,
cc,
- Value.@"null",
+ ret_ty,
false,
inferred_error_set,
false,
@@ -6538,6 +6568,44 @@ fn zirFunc(
);
}
+// TODO this function and its callsites along with funcCommon need to be reworked
+// to handle when callconv, align, linksection, addrspace depend on comptime values
+// (thus triggering error.GenericPoison)
+fn resolveGenericBody(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ body: []const Zir.Inst.Index,
+ func_inst: Zir.Inst.Index,
+ dest_ty: Type,
+) !Value {
+ assert(body.len != 0);
+
+ const err = err: {
+ // Make sure any nested param instructions don't clobber our work.
+ const prev_params = block.params;
+ block.params = .{};
+ defer {
+ block.params.deinit(sema.gpa);
+ block.params = prev_params;
+ }
+ const uncasted = sema.resolveBody(block, body, func_inst) catch |err| break :err err;
+ const result = sema.coerce(block, dest_ty, uncasted, src) catch |err| break :err err;
+ const val = sema.resolveConstValue(block, src, result) catch |err| break :err err;
+ return val;
+ };
+ switch (err) {
+ error.GenericPoison => {
+ if (dest_ty.tag() == .type) {
+ return Value.initTag(.generic_poison_type);
+ } else {
+ return Value.initTag(.generic_poison);
+ }
+ },
+ else => |e| return e,
+ }
+}
+
/// Given a library name, examines if the library name should end up in
/// `link.File.Options.system_libs` table (for example, libc is always
/// specified via dedicated flag `link.File.Options.link_libc` instead),
@@ -6601,14 +6669,27 @@ fn handleExternLibName(
return sema.gpa.dupeZ(u8, lib_name);
}
+const FuncLinkSection = union(enum) {
+ generic,
+ default,
+ explicit: [*:0]const u8,
+};
+
fn funcCommon(
sema: *Sema,
block: *Block,
src_node_offset: i32,
func_inst: Zir.Inst.Index,
- ret_ty_body: []const Zir.Inst.Index,
- cc: std.builtin.CallingConvention,
- align_val: Value,
+ /// null means generic poison
+ alignment: ?u32,
+ /// null means generic poison
+ address_space: ?std.builtin.AddressSpace,
+ /// outer null means generic poison; inner null means default link section
+ section: FuncLinkSection,
+ /// null means generic poison
+ cc: ?std.builtin.CallingConvention,
+ /// this might be Type.generic_poison
+ bare_return_type: Type,
var_args: bool,
inferred_error_set: bool,
is_extern: bool,
@@ -6618,42 +6699,15 @@ fn funcCommon(
) CompileError!Air.Inst.Ref {
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset };
- // The return type body might be a type expression that depends on generic parameters.
- // In such case we need to use a generic_poison value for the return type and mark
- // the function as generic.
- var is_generic = false;
- const bare_return_type: Type = ret_ty: {
- if (ret_ty_body.len == 0) break :ret_ty Type.void;
-
- const err = err: {
- // Make sure any nested param instructions don't clobber our work.
- const prev_params = block.params;
- block.params = .{};
- defer {
- block.params.deinit(sema.gpa);
- block.params = prev_params;
- }
- if (sema.resolveBody(block, ret_ty_body, func_inst)) |ret_ty_inst| {
- if (sema.analyzeAsType(block, ret_ty_src, ret_ty_inst)) |ret_ty| {
- break :ret_ty ret_ty;
- } else |err| break :err err;
- } else |err| break :err err;
- // Check for generic params.
- for (block.params.items) |param| {
- if (param.ty.tag() == .generic_poison) is_generic = true;
- }
- };
- switch (err) {
- error.GenericPoison => {
- // The type is not available until the generic instantiation.
- is_generic = true;
- break :ret_ty Type.initTag(.generic_poison);
- },
- else => |e| return e,
- }
- };
-
- const mod = sema.mod;
+ var is_generic = bare_return_type.tag() == .generic_poison or
+ alignment == null or
+ address_space == null or
+ section == .generic or
+ cc == null;
+ // Check for generic params.
+ for (block.params.items) |param| {
+ if (param.ty.tag() == .generic_poison) is_generic = true;
+ }
const new_func: *Module.Fn = new_func: {
if (!has_body) break :new_func undefined;
@@ -6670,36 +6724,28 @@ fn funcCommon(
errdefer if (maybe_inferred_error_set_node) |node| sema.gpa.destroy(node);
// Note: no need to errdefer since this will still be in its default state at the end of the function.
- const target = mod.getTarget();
-
+ const target = sema.mod.getTarget();
const fn_ty: Type = fn_ty: {
- const alignment: u32 = if (align_val.tag() == .null_value) 0 else a: {
- const alignment = @intCast(u32, align_val.toUnsignedInt(target));
- if (alignment == target_util.defaultFunctionAlignment(target)) {
- break :a 0;
- } else {
- break :a alignment;
- }
- };
-
// Hot path for some common function types.
// TODO can we eliminate some of these Type tag values? seems unnecessarily complicated.
- if (!is_generic and block.params.items.len == 0 and !var_args and
- alignment == 0 and !inferred_error_set)
+ if (!is_generic and block.params.items.len == 0 and !var_args and !inferred_error_set and
+ alignment.? == 0 and
+ address_space.? == target_util.defaultAddressSpace(target, .function) and
+ section == .default)
{
- if (bare_return_type.zigTypeTag() == .NoReturn and cc == .Unspecified) {
+ if (bare_return_type.zigTypeTag() == .NoReturn and cc.? == .Unspecified) {
break :fn_ty Type.initTag(.fn_noreturn_no_args);
}
- if (bare_return_type.zigTypeTag() == .Void and cc == .Unspecified) {
+ if (bare_return_type.zigTypeTag() == .Void and cc.? == .Unspecified) {
break :fn_ty Type.initTag(.fn_void_no_args);
}
- if (bare_return_type.zigTypeTag() == .NoReturn and cc == .Naked) {
+ if (bare_return_type.zigTypeTag() == .NoReturn and cc.? == .Naked) {
break :fn_ty Type.initTag(.fn_naked_noreturn_no_args);
}
- if (bare_return_type.zigTypeTag() == .Void and cc == .C) {
+ if (bare_return_type.zigTypeTag() == .Void and cc.? == .C) {
break :fn_ty Type.initTag(.fn_ccc_void_no_args);
}
}
@@ -6745,17 +6791,35 @@ fn funcCommon(
});
};
+ // stage1 bug workaround
+ const cc_workaround = cc orelse undefined;
+ const align_workaround = alignment orelse @as(u32, undefined);
+
break :fn_ty try Type.Tag.function.create(sema.arena, .{
.param_types = param_types,
.comptime_params = comptime_params.ptr,
.return_type = return_type,
- .cc = cc,
- .alignment = alignment,
+ .cc = cc_workaround,
+ .cc_is_generic = cc == null,
+ .alignment = align_workaround,
+ .align_is_generic = alignment == null,
+ .section_is_generic = section == .generic,
+ .addrspace_is_generic = address_space == null,
.is_var_args = var_args,
.is_generic = is_generic,
});
};
+ if (sema.owner_decl.owns_tv) {
+ switch (section) {
+ .generic => sema.owner_decl.@"linksection" = undefined,
+ .default => sema.owner_decl.@"linksection" = null,
+ .explicit => |s| sema.owner_decl.@"linksection" = s,
+ }
+ if (alignment) |a| sema.owner_decl.@"align" = a;
+ if (address_space) |a| sema.owner_decl.@"addrspace" = a;
+ }
+
if (is_extern) {
const new_extern_fn = try sema.gpa.create(Module.ExternFn);
errdefer sema.gpa.destroy(new_extern_fn);
@@ -16750,16 +16814,20 @@ fn zirVarExtended(
return result;
}
-fn zirFuncExtended(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- const extra = sema.code.extraData(Zir.Inst.ExtendedFunc, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index);
+ const target = sema.mod.getTarget();
- const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = inst_data.src_node };
const align_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at align
+ const addrspace_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at addrspace
+ const section_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at section
+ const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = inst_data.src_node };
+ const ret_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at the return type
var extra_index: usize = extra.end;
@@ -16769,22 +16837,135 @@ fn zirFuncExtended(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
break :blk lib_name;
} else null;
- const cc: std.builtin.CallingConvention = if (extra.data.bits.has_cc) blk: {
+ const @"align": ?u32 = if (extra.data.bits.has_align_body) blk: {
+ const body_len = sema.code.extra[extra_index];
+ extra_index += 1;
+ const body = sema.code.extra[extra_index..][0..body_len];
+ extra_index += body.len;
+
+ const val = try sema.resolveGenericBody(block, align_src, body, inst, Type.u16);
+ if (val.tag() == .generic_poison) {
+ break :blk null;
+ }
+ const alignment = @intCast(u32, val.toUnsignedInt(target));
+ if (alignment == target_util.defaultFunctionAlignment(target)) {
+ break :blk 0;
+ } else {
+ break :blk alignment;
+ }
+ } else if (extra.data.bits.has_align_ref) blk: {
+ const align_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
+ extra_index += 1;
+ const align_tv = sema.resolveInstConst(block, align_src, align_ref) catch |err| switch (err) {
+ error.GenericPoison => {
+ break :blk null;
+ },
+ else => |e| return e,
+ };
+ const alignment = @intCast(u32, align_tv.val.toUnsignedInt(target));
+ if (alignment == target_util.defaultFunctionAlignment(target)) {
+ break :blk 0;
+ } else {
+ break :blk alignment;
+ }
+ } else 0;
+
+ const @"addrspace": ?std.builtin.AddressSpace = if (extra.data.bits.has_addrspace_body) blk: {
+ const body_len = sema.code.extra[extra_index];
+ extra_index += 1;
+ const body = sema.code.extra[extra_index..][0..body_len];
+ extra_index += body.len;
+
+ const addrspace_ty = try sema.getBuiltinType(block, addrspace_src, "AddressSpace");
+ const val = try sema.resolveGenericBody(block, addrspace_src, body, inst, addrspace_ty);
+ if (val.tag() == .generic_poison) {
+ break :blk null;
+ }
+ break :blk val.toEnum(std.builtin.AddressSpace);
+ } else if (extra.data.bits.has_addrspace_ref) blk: {
+ const addrspace_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
+ extra_index += 1;
+ const addrspace_tv = sema.resolveInstConst(block, addrspace_src, addrspace_ref) catch |err| switch (err) {
+ error.GenericPoison => {
+ break :blk null;
+ },
+ else => |e| return e,
+ };
+ break :blk addrspace_tv.val.toEnum(std.builtin.AddressSpace);
+ } else target_util.defaultAddressSpace(target, .function);
+
+ const @"linksection": FuncLinkSection = if (extra.data.bits.has_section_body) blk: {
+ const body_len = sema.code.extra[extra_index];
+ extra_index += 1;
+ const body = sema.code.extra[extra_index..][0..body_len];
+ extra_index += body.len;
+
+ const val = try sema.resolveGenericBody(block, section_src, body, inst, Type.initTag(.const_slice_u8));
+ if (val.tag() == .generic_poison) {
+ break :blk FuncLinkSection{ .generic = {} };
+ }
+ _ = val;
+ return sema.fail(block, section_src, "TODO implement linksection on functions", .{});
+ } else if (extra.data.bits.has_section_ref) blk: {
+ const section_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
+ extra_index += 1;
+ const section_tv = sema.resolveInstConst(block, section_src, section_ref) catch |err| switch (err) {
+ error.GenericPoison => {
+ break :blk FuncLinkSection{ .generic = {} };
+ },
+ else => |e| return e,
+ };
+ _ = section_tv;
+ return sema.fail(block, section_src, "TODO implement linksection on functions", .{});
+ } else FuncLinkSection{ .default = {} };
+
+ const cc: ?std.builtin.CallingConvention = if (extra.data.bits.has_cc_body) blk: {
+ const body_len = sema.code.extra[extra_index];
+ extra_index += 1;
+ const body = sema.code.extra[extra_index..][0..body_len];
+ extra_index += body.len;
+
+ const cc_ty = try sema.getBuiltinType(block, addrspace_src, "CallingConvention");
+ const val = try sema.resolveGenericBody(block, cc_src, body, inst, cc_ty);
+ if (val.tag() == .generic_poison) {
+ break :blk null;
+ }
+ break :blk val.toEnum(std.builtin.CallingConvention);
+ } else if (extra.data.bits.has_cc_ref) blk: {
const cc_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
- const cc_tv = try sema.resolveInstConst(block, cc_src, cc_ref);
+ const cc_tv = sema.resolveInstConst(block, cc_src, cc_ref) catch |err| switch (err) {
+ error.GenericPoison => {
+ break :blk null;
+ },
+ else => |e| return e,
+ };
break :blk cc_tv.val.toEnum(std.builtin.CallingConvention);
- } else .Unspecified;
+ } else std.builtin.CallingConvention.Unspecified;
- const align_val: Value = if (extra.data.bits.has_align) blk: {
- const align_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
+ const ret_ty: Type = if (extra.data.bits.has_ret_ty_body) blk: {
+ const body_len = sema.code.extra[extra_index];
extra_index += 1;
- const align_tv = try sema.resolveInstConst(block, align_src, align_ref);
- break :blk align_tv.val;
- } else Value.@"null";
+ const body = sema.code.extra[extra_index..][0..body_len];
+ extra_index += body.len;
- const ret_ty_body = sema.code.extra[extra_index..][0..extra.data.ret_body_len];
- extra_index += ret_ty_body.len;
+ const val = try sema.resolveGenericBody(block, ret_src, body, inst, Type.type);
+ var buffer: Value.ToTypeBuffer = undefined;
+ const ty = try val.toType(&buffer).copy(sema.arena);
+ break :blk ty;
+ } else if (extra.data.bits.has_ret_ty_ref) blk: {
+ const ret_ty_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
+ extra_index += 1;
+ const ret_ty_tv = sema.resolveInstConst(block, ret_src, ret_ty_ref) catch |err| switch (err) {
+ error.GenericPoison => {
+ break :blk Type.initTag(.generic_poison);
+ },
+ else => |e| return e,
+ };
+ var buffer: Value.ToTypeBuffer = undefined;
+ const ty = try ret_ty_tv.val.toType(&buffer).copy(sema.arena);
+ break :blk ty;
+ } else Type.void;
var src_locs: Zir.Inst.Func.SrcLocs = undefined;
const has_body = extra.data.body_len != 0;
@@ -16801,9 +16982,11 @@ fn zirFuncExtended(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
block,
inst_data.src_node,
inst,
- ret_ty_body,
+ @"align",
+ @"addrspace",
+ @"linksection",
cc,
- align_val,
+ ret_ty,
is_var_args,
is_inferred_error,
is_extern,
diff --git a/src/Zir.zig b/src/Zir.zig
index 7516a5a873..040f54cc39 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -74,7 +74,7 @@ pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, en
Inst.Call.Flags => @bitCast(Inst.Call.Flags, code.extra[i]),
Inst.BuiltinCall.Flags => @bitCast(Inst.BuiltinCall.Flags, code.extra[i]),
Inst.SwitchBlock.Bits => @bitCast(Inst.SwitchBlock.Bits, code.extra[i]),
- Inst.ExtendedFunc.Bits => @bitCast(Inst.ExtendedFunc.Bits, code.extra[i]),
+ Inst.FuncFancy.Bits => @bitCast(Inst.FuncFancy.Bits, code.extra[i]),
else => @compileError("bad field type"),
};
i += 1;
@@ -424,8 +424,8 @@ pub const Inst = struct {
func_inferred,
/// Represents a function declaration or function prototype, depending on
/// whether body_len is 0.
- /// Uses the `pl_node` union field. `payload_index` points to a `ExtendedFunc`.
- func_extended,
+ /// Uses the `pl_node` union field. `payload_index` points to a `FuncFancy`.
+ func_fancy,
/// Implements the `@import` builtin.
/// Uses the `str_tok` field.
import,
@@ -1062,7 +1062,7 @@ pub const Inst = struct {
.field_val_named,
.func,
.func_inferred,
- .func_extended,
+ .func_fancy,
.has_decl,
.int,
.int_big,
@@ -1346,7 +1346,7 @@ pub const Inst = struct {
.field_val_named,
.func,
.func_inferred,
- .func_extended,
+ .func_fancy,
.has_decl,
.int,
.int_big,
@@ -1599,7 +1599,7 @@ pub const Inst = struct {
.field_call_bind = .pl_node,
.func = .pl_node,
.func_inferred = .pl_node,
- .func_extended = .pl_node,
+ .func_fancy = .pl_node,
.import = .str_tok,
.int = .int,
.int_big = .str,
@@ -2606,29 +2606,100 @@ pub const Inst = struct {
};
/// Trailing:
- /// 0. lib_name: u32, // null terminated string index, if has_lib_name is set
- /// 1. cc: Ref, // if has_cc is set
- /// 2. align: Ref, // if has_align is set
- /// 3. return_type: Index // for each ret_body_len
- /// 4. body: Index // for each body_len
- /// 5. src_locs: Func.SrcLocs // if body_len != 0
- pub const ExtendedFunc = struct {
+ /// if (ret_body_len == 1) {
+ /// 0. return_type: Ref
+ /// }
+ /// if (ret_body_len > 1) {
+ /// 1. return_type: Index // for each ret_body_len
+ /// }
+ /// 2. body: Index // for each body_len
+ /// 3. src_locs: SrcLocs // if body_len != 0
+ pub const Func = struct {
/// If this is 0 it means a void return type.
+ /// If this is 1 it means return_type is a simple Ref
ret_body_len: u32,
/// Points to the block that contains the param instructions for this function.
param_block: Index,
body_len: u32,
+
+ pub const SrcLocs = struct {
+ /// Line index in the source file relative to the parent decl.
+ lbrace_line: u32,
+ /// Line index in the source file relative to the parent decl.
+ rbrace_line: u32,
+ /// lbrace_column is least significant bits u16
+ /// rbrace_column is most significant bits u16
+ columns: u32,
+ };
+ };
+
+ /// Trailing:
+ /// 0. lib_name: u32, // null terminated string index, if has_lib_name is set
+ /// if (has_align_ref and !has_align_body) {
+ /// 1. align: Ref,
+ /// }
+ /// if (has_align_body) {
+ /// 2. align_body_len: u32
+ /// 3. align_body: u32 // for each align_body_len
+ /// }
+ /// if (has_addrspace_ref and !has_addrspace_body) {
+ /// 4. addrspace: Ref,
+ /// }
+ /// if (has_addrspace_body) {
+ /// 5. addrspace_body_len: u32
+ /// 6. addrspace_body: u32 // for each addrspace_body_len
+ /// }
+ /// if (has_section_ref and !has_section_body) {
+ /// 7. section: Ref,
+ /// }
+ /// if (has_section_body) {
+ /// 8. section_body_len: u32
+ /// 9. section_body: u32 // for each section_body_len
+ /// }
+ /// if (has_cc_ref and !has_cc_body) {
+ /// 10. cc: Ref,
+ /// }
+ /// if (has_cc_body) {
+ /// 11. cc_body_len: u32
+ /// 12. cc_body: u32 // for each cc_body_len
+ /// }
+ /// if (has_ret_ty_ref and !has_ret_ty_body) {
+ /// 13. ret_ty: Ref,
+ /// }
+ /// if (has_ret_ty_body) {
+ /// 14. ret_ty_body_len: u32
+ /// 15. ret_ty_body: u32 // for each ret_ty_body_len
+ /// }
+ /// 16. body: Index // for each body_len
+ /// 17. src_locs: Func.SrcLocs // if body_len != 0
+ pub const FuncFancy = struct {
+ /// Points to the block that contains the param instructions for this function.
+ param_block: Index,
+ body_len: u32,
bits: Bits,
+ /// If both has_cc_ref and has_cc_body are false, it means auto calling convention.
+ /// If both has_align_ref and has_align_body are false, it means default alignment.
+ /// If both has_ret_ty_ref and has_ret_ty_body are false, it means void return type.
+ /// If both has_section_ref and has_section_body are false, it means default section.
+ /// If both has_addrspace_ref and has_addrspace_body are false, it means default addrspace.
pub const Bits = packed struct {
is_var_args: bool,
is_inferred_error: bool,
- has_lib_name: bool,
- has_cc: bool,
- has_align: bool,
is_test: bool,
is_extern: bool,
- _: u25 = undefined,
+ has_align_ref: bool,
+ has_align_body: bool,
+ has_addrspace_ref: bool,
+ has_addrspace_body: bool,
+ has_section_ref: bool,
+ has_section_body: bool,
+ has_cc_ref: bool,
+ has_cc_body: bool,
+ has_ret_ty_ref: bool,
+ has_ret_ty_body: bool,
+ has_lib_name: bool,
+ _: u17 = undefined,
};
};
@@ -2650,28 +2721,6 @@ pub const Inst = struct {
};
};
- /// Trailing:
- /// 0. return_type: Index // for each ret_body_len
- /// 1. body: Index // for each body_len
- /// 2. src_locs: SrcLocs // if body_len != 0
- pub const Func = struct {
- /// If this is 0 it means a void return type.
- ret_body_len: u32,
- /// Points to the block that contains the param instructions for this function.
- param_block: Index,
- body_len: u32,
-
- pub const SrcLocs = struct {
- /// Line index in the source file relative to the parent decl.
- lbrace_line: u32,
- /// Line index in the source file relative to the parent decl.
- rbrace_line: u32,
- /// lbrace_column is least significant bits u16
- /// rbrace_column is most significant bits u16
- columns: u32,
- };
- };
-
/// This data is stored inside extra, with trailing operands according to `operands_len`.
/// Each operand is a `Ref`.
pub const MultiOp = struct {
@@ -3473,7 +3522,7 @@ pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator {
switch (tags[decl_inst]) {
// Functions are allowed and yield no iterations.
// There is one case matching this in the extended instruction set below.
- .func, .func_inferred, .func_extended => return declIteratorInner(zir, 0, 0),
+ .func, .func_inferred, .func_fancy => return declIteratorInner(zir, 0, 0),
.extended => {
const extended = datas[decl_inst].extended;
@@ -3579,18 +3628,77 @@ fn findDeclsInner(
const inst_data = datas[inst].pl_node;
const extra = zir.extraData(Inst.Func, inst_data.payload_index);
- const body = zir.extra[extra.end..][0..extra.data.body_len];
+ var extra_index: usize = extra.end;
+ switch (extra.data.ret_body_len) {
+ 0 => {},
+ 1 => extra_index += 1,
+ else => {
+ const body = zir.extra[extra_index..][0..extra.data.ret_body_len];
+ extra_index += body.len;
+ try zir.findDeclsBody(list, body);
+ },
+ }
+ const body = zir.extra[extra_index..][0..extra.data.body_len];
return zir.findDeclsBody(list, body);
},
- .func_extended => {
+ .func_fancy => {
try list.append(inst);
const inst_data = datas[inst].pl_node;
- const extra = zir.extraData(Inst.ExtendedFunc, inst_data.payload_index);
+ const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index);
var extra_index: usize = extra.end;
extra_index += @boolToInt(extra.data.bits.has_lib_name);
- extra_index += @boolToInt(extra.data.bits.has_cc);
- extra_index += @boolToInt(extra.data.bits.has_align);
+
+ if (extra.data.bits.has_align_body) {
+ const body_len = zir.extra[extra_index];
+ extra_index += 1;
+ const body = zir.extra[extra_index..][0..body_len];
+ try zir.findDeclsBody(list, body);
+ extra_index += body.len;
+ } else if (extra.data.bits.has_align_ref) {
+ extra_index += 1;
+ }
+
+ if (extra.data.bits.has_addrspace_body) {
+ const body_len = zir.extra[extra_index];
+ extra_index += 1;
+ const body = zir.extra[extra_index..][0..body_len];
+ try zir.findDeclsBody(list, body);
+ extra_index += body.len;
+ } else if (extra.data.bits.has_addrspace_ref) {
+ extra_index += 1;
+ }
+
+ if (extra.data.bits.has_section_body) {
+ const body_len = zir.extra[extra_index];
+ extra_index += 1;
+ const body = zir.extra[extra_index..][0..body_len];
+ try zir.findDeclsBody(list, body);
+ extra_index += body.len;
+ } else if (extra.data.bits.has_section_ref) {
+ extra_index += 1;
+ }
+
+ if (extra.data.bits.has_cc_body) {
+ const body_len = zir.extra[extra_index];
+ extra_index += 1;
+ const body = zir.extra[extra_index..][0..body_len];
+ try zir.findDeclsBody(list, body);
+ extra_index += body.len;
+ } else if (extra.data.bits.has_cc_ref) {
+ extra_index += 1;
+ }
+
+ if (extra.data.bits.has_ret_ty_body) {
+ const body_len = zir.extra[extra_index];
+ extra_index += 1;
+ const body = zir.extra[extra_index..][0..body_len];
+ try zir.findDeclsBody(list, body);
+ extra_index += body.len;
+ } else if (extra.data.bits.has_ret_ty_ref) {
+ extra_index += 1;
+ }
+
const body = zir.extra[extra_index..][0..extra.data.body_len];
return zir.findDeclsBody(list, body);
},
@@ -3715,6 +3823,7 @@ pub const FnInfo = struct {
param_body_inst: Inst.Index,
ret_ty_body: []const Inst.Index,
body: []const Inst.Index,
+ ret_ty_ref: Zir.Inst.Ref,
total_params_len: u32,
};
@@ -3724,38 +3833,84 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
const info: struct {
param_block: Inst.Index,
body: []const Inst.Index,
+ ret_ty_ref: Inst.Ref,
ret_ty_body: []const Inst.Index,
} = switch (tags[fn_inst]) {
.func, .func_inferred => blk: {
const inst_data = datas[fn_inst].pl_node;
const extra = zir.extraData(Inst.Func, inst_data.payload_index);
+
var extra_index: usize = extra.end;
+ var ret_ty_ref: Inst.Ref = .none;
+ var ret_ty_body: []const Inst.Index = &.{};
- const ret_ty_body = zir.extra[extra_index..][0..extra.data.ret_body_len];
- extra_index += ret_ty_body.len;
+ switch (extra.data.ret_body_len) {
+ 0 => {
+ ret_ty_ref = .void_type;
+ },
+ 1 => {
+ ret_ty_ref = @intToEnum(Inst.Ref, zir.extra[extra_index]);
+ extra_index += 1;
+ },
+ else => {
+ ret_ty_body = zir.extra[extra_index..][0..extra.data.ret_body_len];
+ extra_index += ret_ty_body.len;
+ },
+ }
const body = zir.extra[extra_index..][0..extra.data.body_len];
extra_index += body.len;
break :blk .{
.param_block = extra.data.param_block,
+ .ret_ty_ref = ret_ty_ref,
.ret_ty_body = ret_ty_body,
.body = body,
};
},
- .func_extended => blk: {
+ .func_fancy => blk: {
const inst_data = datas[fn_inst].pl_node;
- const extra = zir.extraData(Inst.ExtendedFunc, inst_data.payload_index);
+ const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index);
+
var extra_index: usize = extra.end;
+ var ret_ty_ref: Inst.Ref = .void_type;
+ var ret_ty_body: []const Inst.Index = &.{};
+
extra_index += @boolToInt(extra.data.bits.has_lib_name);
- extra_index += @boolToInt(extra.data.bits.has_cc);
- extra_index += @boolToInt(extra.data.bits.has_align);
- const ret_ty_body = zir.extra[extra_index..][0..extra.data.ret_body_len];
- extra_index += ret_ty_body.len;
+ if (extra.data.bits.has_align_body) {
+ extra_index += zir.extra[extra_index] + 1;
+ } else if (extra.data.bits.has_align_ref) {
+ extra_index += 1;
+ }
+ if (extra.data.bits.has_addrspace_body) {
+ extra_index += zir.extra[extra_index] + 1;
+ } else if (extra.data.bits.has_addrspace_ref) {
+ extra_index += 1;
+ }
+ if (extra.data.bits.has_section_body) {
+ extra_index += zir.extra[extra_index] + 1;
+ } else if (extra.data.bits.has_section_ref) {
+ extra_index += 1;
+ }
+ if (extra.data.bits.has_cc_body) {
+ extra_index += zir.extra[extra_index] + 1;
+ } else if (extra.data.bits.has_cc_ref) {
+ extra_index += 1;
+ }
+ if (extra.data.bits.has_ret_ty_body) {
+ const body_len = zir.extra[extra_index];
+ extra_index += 1;
+ ret_ty_body = zir.extra[extra_index..][0..body_len];
+ extra_index += ret_ty_body.len;
+ } else if (extra.data.bits.has_ret_ty_ref) {
+ ret_ty_ref = @intToEnum(Inst.Ref, zir.extra[extra_index]);
+ extra_index += 1;
+ }
const body = zir.extra[extra_index..][0..extra.data.body_len];
extra_index += body.len;
break :blk .{
.param_block = extra.data.param_block,
+ .ret_ty_ref = ret_ty_ref,
.ret_ty_body = ret_ty_body,
.body = body,
};
@@ -3778,6 +3933,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
.param_body = param_body,
.param_body_inst = info.param_block,
.ret_ty_body = info.ret_ty_body,
+ .ret_ty_ref = info.ret_ty_ref,
.body = info.body,
.total_params_len = total_params_len,
};
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 6bab2e5628..30098f5372 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -426,7 +426,7 @@ const Writer = struct {
.func => try self.writeFunc(stream, inst, false),
.func_inferred => try self.writeFunc(stream, inst, true),
- .func_extended => try self.writeFuncExtended(stream, inst),
+ .func_fancy => try self.writeFuncFancy(stream, inst),
.@"unreachable" => try self.writeUnreachable(stream, inst),
@@ -1915,10 +1915,24 @@ const Writer = struct {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = self.code.extraData(Zir.Inst.Func, inst_data.payload_index);
+
var extra_index = extra.end;
+ var ret_ty_ref: Zir.Inst.Ref = .none;
+ var ret_ty_body: []const Zir.Inst.Index = &.{};
- const ret_ty_body = self.code.extra[extra_index..][0..extra.data.ret_body_len];
- extra_index += ret_ty_body.len;
+ switch (extra.data.ret_body_len) {
+ 0 => {
+ ret_ty_ref = .void_type;
+ },
+ 1 => {
+ ret_ty_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ },
+ else => {
+ ret_ty_body = self.code.extra[extra_index..][0..extra.data.ret_body_len];
+ extra_index += ret_ty_body.len;
+ },
+ }
const body = self.code.extra[extra_index..][0..extra.data.body_len];
extra_index += body.len;
@@ -1929,43 +1943,96 @@ const Writer = struct {
}
return self.writeFuncCommon(
stream,
- ret_ty_body,
inferred_error_set,
false,
false,
+
.none,
+ &.{},
.none,
+ &.{},
+ .none,
+ &.{},
+ .none,
+ &.{},
+ ret_ty_ref,
+ ret_ty_body,
+
body,
src,
src_locs,
);
}
- fn writeFuncExtended(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ fn writeFuncFancy(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Zir.Inst.ExtendedFunc, inst_data.payload_index);
+ const extra = self.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index);
const src = inst_data.src();
var extra_index: usize = extra.end;
+ var align_ref: Zir.Inst.Ref = .none;
+ var align_body: []const Zir.Inst.Index = &.{};
+ var addrspace_ref: Zir.Inst.Ref = .none;
+ var addrspace_body: []const Zir.Inst.Index = &.{};
+ var section_ref: Zir.Inst.Ref = .none;
+ var section_body: []const Zir.Inst.Index = &.{};
+ var cc_ref: Zir.Inst.Ref = .none;
+ var cc_body: []const Zir.Inst.Index = &.{};
+ var ret_ty_ref: Zir.Inst.Ref = .none;
+ var ret_ty_body: []const Zir.Inst.Index = &.{};
+
if (extra.data.bits.has_lib_name) {
const lib_name = self.code.nullTerminatedString(self.code.extra[extra_index]);
extra_index += 1;
try stream.print("lib_name=\"{}\", ", .{std.zig.fmtEscapes(lib_name)});
}
try self.writeFlag(stream, "test, ", extra.data.bits.is_test);
- const cc: Zir.Inst.Ref = if (!extra.data.bits.has_cc) .none else blk: {
- const cc = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+
+ if (extra.data.bits.has_align_body) {
+ const body_len = self.code.extra[extra_index];
extra_index += 1;
- break :blk cc;
- };
- const align_inst: Zir.Inst.Ref = if (!extra.data.bits.has_align) .none else blk: {
- const align_inst = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ align_body = self.code.extra[extra_index..][0..body_len];
+ extra_index += align_body.len;
+ } else if (extra.data.bits.has_align_ref) {
+ align_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
extra_index += 1;
- break :blk align_inst;
- };
-
- const ret_ty_body = self.code.extra[extra_index..][0..extra.data.ret_body_len];
- extra_index += ret_ty_body.len;
+ }
+ if (extra.data.bits.has_addrspace_body) {
+ const body_len = self.code.extra[extra_index];
+ extra_index += 1;
+ addrspace_body = self.code.extra[extra_index..][0..body_len];
+ extra_index += addrspace_body.len;
+ } else if (extra.data.bits.has_addrspace_ref) {
+ addrspace_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ }
+ if (extra.data.bits.has_section_body) {
+ const body_len = self.code.extra[extra_index];
+ extra_index += 1;
+ section_body = self.code.extra[extra_index..][0..body_len];
+ extra_index += section_body.len;
+ } else if (extra.data.bits.has_section_ref) {
+ section_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ }
+ if (extra.data.bits.has_cc_body) {
+ const body_len = self.code.extra[extra_index];
+ extra_index += 1;
+ cc_body = self.code.extra[extra_index..][0..body_len];
+ extra_index += cc_body.len;
+ } else if (extra.data.bits.has_cc_ref) {
+ cc_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ }
+ if (extra.data.bits.has_ret_ty_body) {
+ const body_len = self.code.extra[extra_index];
+ extra_index += 1;
+ ret_ty_body = self.code.extra[extra_index..][0..body_len];
+ extra_index += ret_ty_body.len;
+ } else if (extra.data.bits.has_ret_ty_ref) {
+ ret_ty_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ }
const body = self.code.extra[extra_index..][0..extra.data.body_len];
extra_index += body.len;
@@ -1976,12 +2043,19 @@ const Writer = struct {
}
return self.writeFuncCommon(
stream,
- ret_ty_body,
extra.data.bits.is_inferred_error,
extra.data.bits.is_var_args,
extra.data.bits.is_extern,
- cc,
- align_inst,
+ align_ref,
+ align_body,
+ addrspace_ref,
+ addrspace_body,
+ section_ref,
+ section_body,
+ cc_ref,
+ cc_body,
+ ret_ty_ref,
+ ret_ty_body,
body,
src,
src_locs,
@@ -2126,30 +2200,33 @@ const Writer = struct {
fn writeFuncCommon(
self: *Writer,
stream: anytype,
- ret_ty_body: []const Zir.Inst.Index,
inferred_error_set: bool,
var_args: bool,
is_extern: bool,
- cc: Zir.Inst.Ref,
- align_inst: Zir.Inst.Ref,
+ align_ref: Zir.Inst.Ref,
+ align_body: []const Zir.Inst.Index,
+ addrspace_ref: Zir.Inst.Ref,
+ addrspace_body: []const Zir.Inst.Index,
+ section_ref: Zir.Inst.Ref,
+ section_body: []const Zir.Inst.Index,
+ cc_ref: Zir.Inst.Ref,
+ cc_body: []const Zir.Inst.Index,
+ ret_ty_ref: Zir.Inst.Ref,
+ ret_ty_body: []const Zir.Inst.Index,
body: []const Zir.Inst.Index,
src: LazySrcLoc,
src_locs: Zir.Inst.Func.SrcLocs,
) !void {
- if (ret_ty_body.len == 0) {
- try stream.writeAll("ret_ty=void");
- } else {
- try stream.writeAll("ret_ty=");
- try self.writeBracedBody(stream, ret_ty_body);
- }
-
- try self.writeOptionalInstRef(stream, ", cc=", cc);
- try self.writeOptionalInstRef(stream, ", align=", align_inst);
- try self.writeFlag(stream, ", vargs", var_args);
- try self.writeFlag(stream, ", extern", is_extern);
- try self.writeFlag(stream, ", inferror", inferred_error_set);
-
- try stream.writeAll(", body=");
+ try self.writeOptionalInstRefOrBody(stream, "align=", align_ref, align_body);
+ try self.writeOptionalInstRefOrBody(stream, "addrspace=", addrspace_ref, addrspace_body);
+ try self.writeOptionalInstRefOrBody(stream, "section=", section_ref, section_body);
+ try self.writeOptionalInstRefOrBody(stream, "cc=", cc_ref, cc_body);
+ try self.writeOptionalInstRefOrBody(stream, "ret_ty=", ret_ty_ref, ret_ty_body);
+ try self.writeFlag(stream, "vargs, ", var_args);
+ try self.writeFlag(stream, "extern, ", is_extern);
+ try self.writeFlag(stream, "inferror, ", inferred_error_set);
+
+ try stream.writeAll("body=");
try self.writeBracedBody(stream, body);
try stream.writeAll(") ");
if (body.len != 0) {
@@ -2199,6 +2276,24 @@ const Writer = struct {
try self.writeInstRef(stream, inst);
}
+ fn writeOptionalInstRefOrBody(
+ self: *Writer,
+ stream: anytype,
+ prefix: []const u8,
+ ref: Zir.Inst.Ref,
+ body: []const Zir.Inst.Index,
+ ) !void {
+ if (body.len != 0) {
+ try stream.writeAll(prefix);
+ try self.writeBracedBody(stream, body);
+ try stream.writeAll(", ");
+ } else if (ref != .none) {
+ try stream.writeAll(prefix);
+ try self.writeInstRef(stream, ref);
+ try stream.writeAll(", ");
+ }
+ }
+
fn writeFlag(
self: *Writer,
stream: anytype,
diff --git a/src/type.zig b/src/type.zig
index 145ae4904a..ebb8bfd7c3 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -6120,6 +6120,10 @@ pub const Type = extern union {
cc: std.builtin.CallingConvention,
is_var_args: bool,
is_generic: bool,
+ align_is_generic: bool = false,
+ cc_is_generic: bool = false,
+ section_is_generic: bool = false,
+ addrspace_is_generic: bool = false,
pub fn paramIsComptime(self: @This(), i: usize) bool {
assert(i < self.param_types.len);