aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Air.zig56
-rw-r--r--src/AstGen.zig297
-rw-r--r--src/BuiltinFn.zig16
-rw-r--r--src/Compilation.zig103
-rw-r--r--src/Liveness.zig11
-rw-r--r--src/Module.zig169
-rw-r--r--src/Sema.zig2719
-rw-r--r--src/Zir.zig294
-rw-r--r--src/arch/aarch64/CodeGen.zig50
-rw-r--r--src/clang.zig3
-rw-r--r--src/codegen.zig60
-rw-r--r--src/codegen/c.zig68
-rw-r--r--src/codegen/llvm.zig694
-rw-r--r--src/codegen/llvm/bindings.zig30
-rw-r--r--src/codegen/spirv.zig1
-rw-r--r--src/codegen/wasm.zig12
-rw-r--r--src/config.zig.in1
-rw-r--r--src/link.zig19
-rw-r--r--src/link/C/zig.h2
-rw-r--r--src/link/Coff.zig4
-rw-r--r--src/link/MachO.zig431
-rw-r--r--src/link/MachO/Atom.zig8
-rw-r--r--src/link/MachO/Object.zig16
-rw-r--r--src/main.zig22
-rw-r--r--src/print_air.zig66
-rw-r--r--src/print_zir.zig127
-rw-r--r--src/stage1/codegen.cpp28
-rw-r--r--src/translate_c.zig49
-rw-r--r--src/translate_c/ast.zig2
-rw-r--r--src/type.zig323
-rw-r--r--src/value.zig453
-rw-r--r--src/zig_clang.cpp5
-rw-r--r--src/zig_clang.h1
33 files changed, 4161 insertions, 1979 deletions
diff --git a/src/Air.zig b/src/Air.zig
index 86e16487bb..d39a78f1ad 100644
--- a/src/Air.zig
+++ b/src/Air.zig
@@ -80,11 +80,27 @@ pub const Inst = struct {
/// is the same as both operands.
/// Uses the `bin_op` field.
mul_sat,
- /// Integer or float division. For integers, wrapping is undefined behavior.
+ /// Float division.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
- div,
+ div_float,
+ /// Truncating integer or float division. For integers, wrapping is undefined behavior.
+ /// Both operands are guaranteed to be the same type, and the result type
+ /// is the same as both operands.
+ /// Uses the `bin_op` field.
+ div_trunc,
+ /// Flooring integer or float division. For integers, wrapping is undefined behavior.
+ /// Both operands are guaranteed to be the same type, and the result type
+ /// is the same as both operands.
+ /// Uses the `bin_op` field.
+ div_floor,
+ /// Integer or float division. Guaranteed no remainder.
+ /// For integers, wrapping is undefined behavior.
+ /// Both operands are guaranteed to be the same type, and the result type
+ /// is the same as both operands.
+ /// Uses the `bin_op` field.
+ div_exact,
/// Integer or float remainder division.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
@@ -360,6 +376,9 @@ pub const Inst = struct {
/// Given a tagged union value, get its tag value.
/// Uses the `ty_op` field.
get_union_tag,
+ /// Constructs a slice from a pointer and a length.
+ /// Uses the `ty_pl` field, payload is `Bin`. lhs is ptr, rhs is len.
+ slice,
/// Given a slice value, return the length.
/// Result type is always usize.
/// Uses the `ty_op` field.
@@ -367,6 +386,12 @@ pub const Inst = struct {
/// Given a slice value, return the pointer.
/// Uses the `ty_op` field.
slice_ptr,
+ /// Given a pointer to a slice, return a pointer to the length of the slice.
+ /// Uses the `ty_op` field.
+ ptr_slice_len_ptr,
+ /// Given a pointer to a slice, return a pointer to the pointer of the slice.
+ /// Uses the `ty_op` field.
+ ptr_slice_ptr_ptr,
/// Given an array value and element index, return the element value at that index.
/// Result type is the element type of the array operand.
/// Uses the `bin_op` field.
@@ -375,10 +400,10 @@ pub const Inst = struct {
/// Result type is the element type of the slice operand.
/// Uses the `bin_op` field.
slice_elem_val,
- /// Given a pointer to a slice, and element index, return the element value at that index.
- /// Result type is the element type of the slice operand (2 element type operations).
- /// Uses the `bin_op` field.
- ptr_slice_elem_val,
+ /// Given a slice value and element index, return a pointer to the element value at that index.
+ /// Result type is a pointer to the element type of the slice operand.
+ /// Uses the `ty_pl` field with payload `Bin`.
+ slice_elem_ptr,
/// Given a pointer value, and element index, return the element value at that index.
/// Result type is the element type of the pointer operand.
/// Uses the `bin_op` field.
@@ -387,11 +412,6 @@ pub const Inst = struct {
/// Result type is pointer to the element type of the pointer operand.
/// Uses the `ty_pl` field with payload `Bin`.
ptr_elem_ptr,
- /// Given a pointer to a pointer, and element index, return the element value of the inner
- /// pointer at that index.
- /// Result type is the element type of the inner pointer operand.
- /// Uses the `bin_op` field.
- ptr_ptr_elem_val,
/// Given a pointer to an array, return a slice.
/// Uses the `ty_op` field.
array_to_slice,
@@ -640,7 +660,10 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.mul,
.mulwrap,
.mul_sat,
- .div,
+ .div_float,
+ .div_trunc,
+ .div_floor,
+ .div_exact,
.rem,
.mod,
.bit_and,
@@ -685,9 +708,11 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.constant,
.struct_field_ptr,
.struct_field_val,
+ .slice_elem_ptr,
.ptr_elem_ptr,
.cmpxchg_weak,
.cmpxchg_strong,
+ .slice,
=> return air.getRefType(datas[inst].ty_pl.ty),
.not,
@@ -707,6 +732,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.wrap_errunion_payload,
.wrap_errunion_err,
.slice_ptr,
+ .ptr_slice_len_ptr,
+ .ptr_slice_ptr_ptr,
.struct_field_ptr_index_0,
.struct_field_ptr_index_1,
.struct_field_ptr_index_2,
@@ -760,11 +787,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
const ptr_ty = air.typeOf(datas[inst].bin_op.lhs);
return ptr_ty.elemType();
},
- .ptr_slice_elem_val, .ptr_ptr_elem_val => {
- const outer_ptr_ty = air.typeOf(datas[inst].bin_op.lhs);
- const inner_ptr_ty = outer_ptr_ty.elemType();
- return inner_ptr_ty.elemType();
- },
.atomic_load => {
const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr);
return ptr_ty.elemType();
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 44234d41f7..59643d5279 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -11,6 +11,8 @@ const StringIndexAdapter = std.hash_map.StringIndexAdapter;
const StringIndexContext = std.hash_map.StringIndexContext;
const Zir = @import("Zir.zig");
+const refToIndex = Zir.refToIndex;
+const indexToRef = Zir.indexToRef;
const trace = @import("tracy.zig").trace;
const BuiltinFn = @import("BuiltinFn.zig");
@@ -57,6 +59,7 @@ fn addExtraAssumeCapacity(astgen: *AstGen, extra: anytype) u32 {
Zir.Inst.Ref => @enumToInt(@field(extra, field.name)),
i32 => @bitCast(u32, @field(extra, field.name)),
Zir.Inst.Call.Flags => @bitCast(u32, @field(extra, field.name)),
+ Zir.Inst.SwitchBlock.Bits => @bitCast(u32, @field(extra, field.name)),
else => @compileError("bad field type"),
});
}
@@ -193,9 +196,6 @@ pub const ResultLoc = union(enum) {
/// The expression must generate a pointer rather than a value. For example, the left hand side
/// of an assignment uses this kind of result location.
ref,
- /// The callee will accept a ref, but it is not necessary, and the `ResultLoc`
- /// may be treated as `none` instead.
- none_or_ref,
/// The expression will be coerced into this type, but it will be evaluated as an rvalue.
ty: Zir.Inst.Ref,
/// Same as `ty` but it is guaranteed that Sema will additionally perform the coercion,
@@ -231,7 +231,7 @@ pub const ResultLoc = union(enum) {
fn strategy(rl: ResultLoc, block_scope: *GenZir) Strategy {
switch (rl) {
// In this branch there will not be any store_to_block_ptr instructions.
- .discard, .none, .none_or_ref, .ty, .coerced_ty, .ref => return .{
+ .discard, .none, .ty, .coerced_ty, .ref => return .{
.tag = .break_operand,
.elide_store_to_block_ptr_instructions = false,
},
@@ -276,17 +276,30 @@ fn typeExpr(gz: *GenZir, scope: *Scope, type_node: Ast.Node.Index) InnerError!Zi
return expr(gz, scope, coerced_type_rl, type_node);
}
+fn reachableTypeExpr(
+ gz: *GenZir,
+ scope: *Scope,
+ type_node: Ast.Node.Index,
+ reachable_node: Ast.Node.Index,
+) InnerError!Zir.Inst.Ref {
+ const prev_force_comptime = gz.force_comptime;
+ gz.force_comptime = true;
+ defer gz.force_comptime = prev_force_comptime;
+
+ return reachableExpr(gz, scope, coerced_type_rl, type_node, reachable_node);
+}
+
/// Same as `expr` but fails with a compile error if the result type is `noreturn`.
fn reachableExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: Ast.Node.Index,
- src_node: Ast.Node.Index,
+ reachable_node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const result_inst = try expr(gz, scope, rl, node);
if (gz.refIsNoReturn(result_inst)) {
- return gz.astgen.failNodeNotes(src_node, "unreachable code", .{}, &[_]u32{
+ return gz.astgen.failNodeNotes(reachable_node, "unreachable code", .{}, &[_]u32{
try gz.astgen.errNoteNode(node, "control flow is diverted here", .{}),
});
}
@@ -634,18 +647,24 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr
return simpleBinOp(gz, scope, rl, node, .bit_and);
},
- .bit_or => return simpleBinOp(gz, scope, rl, node, .bit_or),
- .bit_xor => return simpleBinOp(gz, scope, rl, node, .xor),
+ .bit_or => return simpleBinOp(gz, scope, rl, node, .bit_or),
+ .bit_xor => return simpleBinOp(gz, scope, rl, node, .xor),
.bang_equal => return simpleBinOp(gz, scope, rl, node, .cmp_neq),
.equal_equal => return simpleBinOp(gz, scope, rl, node, .cmp_eq),
.greater_than => return simpleBinOp(gz, scope, rl, node, .cmp_gt),
.greater_or_equal => return simpleBinOp(gz, scope, rl, node, .cmp_gte),
.less_than => return simpleBinOp(gz, scope, rl, node, .cmp_lt),
.less_or_equal => return simpleBinOp(gz, scope, rl, node, .cmp_lte),
-
.array_cat => return simpleBinOp(gz, scope, rl, node, .array_cat),
- .array_mult => return simpleBinOp(gz, scope, rl, node, .array_mul),
+
+ .array_mult => {
+ const result = try gz.addPlNode(.array_mul, node, Zir.Inst.Bin{
+ .lhs = try expr(gz, scope, .none, node_datas[node].lhs),
+ .rhs = try comptimeExpr(gz, scope, .{ .coerced_ty = .usize_type }, node_datas[node].rhs),
+ });
+ return rvalue(gz, rl, result, node);
+ },
.error_union => return simpleBinOp(gz, scope, rl, node, .error_union_type),
.merge_error_sets => return simpleBinOp(gz, scope, rl, node, .merge_error_sets),
@@ -721,62 +740,44 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr
.slice_open => {
const lhs = try expr(gz, scope, .ref, node_datas[node].lhs);
- const start = try expr(gz, scope, .{ .ty = .usize_type }, node_datas[node].rhs);
+ const start = try expr(gz, scope, .{ .coerced_ty = .usize_type }, node_datas[node].rhs);
const result = try gz.addPlNode(.slice_start, node, Zir.Inst.SliceStart{
.lhs = lhs,
.start = start,
});
- switch (rl) {
- .ref, .none_or_ref => return result,
- else => {
- const dereffed = try gz.addUnNode(.load, result, node);
- return rvalue(gz, rl, dereffed, node);
- },
- }
+ return rvalue(gz, rl, result, node);
},
.slice => {
const lhs = try expr(gz, scope, .ref, node_datas[node].lhs);
const extra = tree.extraData(node_datas[node].rhs, Ast.Node.Slice);
- const start = try expr(gz, scope, .{ .ty = .usize_type }, extra.start);
- const end = try expr(gz, scope, .{ .ty = .usize_type }, extra.end);
+ const start = try expr(gz, scope, .{ .coerced_ty = .usize_type }, extra.start);
+ const end = try expr(gz, scope, .{ .coerced_ty = .usize_type }, extra.end);
const result = try gz.addPlNode(.slice_end, node, Zir.Inst.SliceEnd{
.lhs = lhs,
.start = start,
.end = end,
});
- switch (rl) {
- .ref, .none_or_ref => return result,
- else => {
- const dereffed = try gz.addUnNode(.load, result, node);
- return rvalue(gz, rl, dereffed, node);
- },
- }
+ return rvalue(gz, rl, result, node);
},
.slice_sentinel => {
const lhs = try expr(gz, scope, .ref, node_datas[node].lhs);
const extra = tree.extraData(node_datas[node].rhs, Ast.Node.SliceSentinel);
- const start = try expr(gz, scope, .{ .ty = .usize_type }, extra.start);
- const end = if (extra.end != 0) try expr(gz, scope, .{ .ty = .usize_type }, extra.end) else .none;
- const sentinel = try expr(gz, scope, .{ .ty = .usize_type }, extra.sentinel);
+ const start = try expr(gz, scope, .{ .coerced_ty = .usize_type }, extra.start);
+ const end = if (extra.end != 0) try expr(gz, scope, .{ .coerced_ty = .usize_type }, extra.end) else .none;
+ const sentinel = try expr(gz, scope, .none, extra.sentinel);
const result = try gz.addPlNode(.slice_sentinel, node, Zir.Inst.SliceSentinel{
.lhs = lhs,
.start = start,
.end = end,
.sentinel = sentinel,
});
- switch (rl) {
- .ref, .none_or_ref => return result,
- else => {
- const dereffed = try gz.addUnNode(.load, result, node);
- return rvalue(gz, rl, dereffed, node);
- },
- }
+ return rvalue(gz, rl, result, node);
},
.deref => {
const lhs = try expr(gz, scope, .none, node_datas[node].lhs);
switch (rl) {
- .ref, .none_or_ref => return lhs,
+ .ref => return lhs,
else => {
const result = try gz.addUnNode(.load, lhs, node);
return rvalue(gz, rl, result, node);
@@ -1155,16 +1156,6 @@ fn fnProtoExpr(
return astgen.failNode(fn_proto.ast.section_expr, "linksection not allowed on function prototypes", .{});
}
- const maybe_bang = tree.firstToken(fn_proto.ast.return_type) - 1;
- const is_inferred_error = token_tags[maybe_bang] == .bang;
- if (is_inferred_error) {
- return astgen.failTok(maybe_bang, "function prototype may not have inferred error set", .{});
- }
- var ret_gz = gz.makeSubBlock(scope);
- defer ret_gz.instructions.deinit(gpa);
- const ret_ty = try expr(&ret_gz, scope, coerced_type_rl, fn_proto.ast.return_type);
- const ret_br = try ret_gz.addBreak(.break_inline, 0, ret_ty);
-
const cc: Zir.Inst.Ref = if (fn_proto.ast.callconv_expr != 0)
try expr(
gz,
@@ -1175,6 +1166,16 @@ fn fnProtoExpr(
else
Zir.Inst.Ref.none;
+ const maybe_bang = tree.firstToken(fn_proto.ast.return_type) - 1;
+ const is_inferred_error = token_tags[maybe_bang] == .bang;
+ if (is_inferred_error) {
+ return astgen.failTok(maybe_bang, "function prototype may not have inferred error set", .{});
+ }
+ var ret_gz = gz.makeSubBlock(scope);
+ defer ret_gz.instructions.deinit(gpa);
+ const ret_ty = try expr(&ret_gz, scope, coerced_type_rl, fn_proto.ast.return_type);
+ const ret_br = try ret_gz.addBreak(.break_inline, 0, ret_ty);
+
const result = try gz.addFunc(.{
.src_node = fn_proto.ast.proto_node,
.param_block = 0,
@@ -1273,7 +1274,7 @@ fn arrayInitExpr(
return arrayInitExprRlNone(gz, scope, node, array_init.ast.elements, .array_init_anon_ref);
}
},
- .none, .none_or_ref => {
+ .none => {
if (types.array != .none) {
return arrayInitExprRlTy(gz, scope, node, array_init.ast.elements, types.elem, .array_init);
} else {
@@ -1475,7 +1476,7 @@ fn structInitExpr(
return structInitExprRlNone(gz, scope, node, struct_init, .struct_init_anon_ref);
}
},
- .none, .none_or_ref => {
+ .none => {
if (struct_init.ast.type_expr != 0) {
const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr);
return structInitExprRlTy(gz, scope, node, struct_init, ty_inst, .struct_init);
@@ -1691,9 +1692,9 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn
return Zir.Inst.Ref.unreachable_value;
}
block_gz.break_count += 1;
- const prev_rvalue_rl_count = block_gz.rvalue_rl_count;
const operand = try expr(parent_gz, parent_scope, block_gz.break_result_loc, rhs);
- const have_store_to_block = block_gz.rvalue_rl_count != prev_rvalue_rl_count;
+ // if list grew as much as rvalue_rl_count, then a break inside operand already saved the store_to_block_ptr
+ const have_store_to_block = block_gz.rvalue_rl_count > block_gz.labeled_store_to_block_ptr_list.items.len;
const br = try parent_gz.addBreak(.@"break", block_inst, operand);
@@ -2052,7 +2053,6 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.as_node,
.bit_and,
.bitcast,
- .bitcast_result_ptr,
.bit_or,
.block,
.block_inline,
@@ -2109,7 +2109,6 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.negate,
.negate_wrap,
.typeof,
- .typeof_elem,
.xor,
.optional_type,
.optional_payload_safe,
@@ -2136,17 +2135,8 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.slice_sentinel,
.import,
.switch_block,
- .switch_block_multi,
- .switch_block_else,
- .switch_block_else_multi,
- .switch_block_under,
- .switch_block_under_multi,
- .switch_block_ref,
- .switch_block_ref_multi,
- .switch_block_ref_else,
- .switch_block_ref_else_multi,
- .switch_block_ref_under,
- .switch_block_ref_under_multi,
+ .switch_cond,
+ .switch_cond_ref,
.switch_capture,
.switch_capture_ref,
.switch_capture_multi,
@@ -3191,11 +3181,6 @@ fn fnDecl(
break :inst try comptimeExpr(&decl_gz, params_scope, .{ .ty = .const_slice_u8_type }, fn_proto.ast.section_expr);
};
- var ret_gz = decl_gz.makeSubBlock(params_scope);
- defer ret_gz.instructions.deinit(gpa);
- const ret_ty = try expr(&ret_gz, params_scope, coerced_type_rl, fn_proto.ast.return_type);
- const ret_br = try ret_gz.addBreak(.break_inline, 0, ret_ty);
-
const cc: Zir.Inst.Ref = blk: {
if (fn_proto.ast.callconv_expr != 0) {
if (has_inline_keyword) {
@@ -3221,6 +3206,11 @@ fn fnDecl(
}
};
+ var ret_gz = decl_gz.makeSubBlock(params_scope);
+ defer ret_gz.instructions.deinit(gpa);
+ const ret_ty = try expr(&ret_gz, params_scope, coerced_type_rl, fn_proto.ast.return_type);
+ const ret_br = try ret_gz.addBreak(.break_inline, 0, ret_ty);
+
const func_inst: Zir.Inst.Ref = if (body_node == 0) func: {
if (!is_extern) {
return astgen.failTok(fn_proto.ast.fn_token, "non-extern function has no body", .{});
@@ -4912,7 +4902,7 @@ fn tryExpr(
.ref => .ref,
else => .none,
};
- const err_ops = switch (rl) {
+ const err_ops = switch (operand_rl) {
// zig fmt: off
.ref => [3]Zir.Inst.Tag{ .is_non_err_ptr, .err_union_code_ptr, .err_union_payload_unsafe_ptr },
else => [3]Zir.Inst.Tag{ .is_non_err, .err_union_code, .err_union_payload_unsafe },
@@ -5130,11 +5120,12 @@ fn fieldAccess(
rl: ResultLoc,
node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
- if (rl == .ref) {
- return addFieldAccess(.field_ptr, gz, scope, .ref, node);
- } else {
- const access = try addFieldAccess(.field_val, gz, scope, .none_or_ref, node);
- return rvalue(gz, rl, access, node);
+ switch (rl) {
+ .ref => return addFieldAccess(.field_ptr, gz, scope, .ref, node),
+ else => {
+ const access = try addFieldAccess(.field_val, gz, scope, .none, node);
+ return rvalue(gz, rl, access, node);
+ },
}
}
@@ -5178,7 +5169,7 @@ fn arrayAccess(
),
else => return rvalue(gz, rl, try gz.addBin(
.elem_val,
- try expr(gz, scope, .none_or_ref, node_datas[node].lhs),
+ try expr(gz, scope, .none, node_datas[node].lhs),
try expr(gz, scope, .{ .ty = .usize_type }, node_datas[node].rhs),
), node),
}
@@ -5743,7 +5734,8 @@ fn forExpr(
const len = try parent_gz.addUnNode(.indexable_ptr_len, array_ptr, for_full.ast.cond_expr);
const index_ptr = blk: {
- const index_ptr = try parent_gz.addUnNode(.alloc, .usize_type, node);
+ const alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime else .alloc;
+ const index_ptr = try parent_gz.addUnNode(alloc_tag, .usize_type, node);
// initialize to zero
_ = try parent_gz.addBin(.store, index_ptr, .zero_usize);
break :blk index_ptr;
@@ -6031,11 +6023,12 @@ fn switchExpr(
}
const operand_rl: ResultLoc = if (any_payload_is_ref) .ref else .none;
- const operand = try expr(parent_gz, scope, operand_rl, operand_node);
+ const raw_operand = try expr(parent_gz, scope, operand_rl, operand_node);
+ const cond_tag: Zir.Inst.Tag = if (any_payload_is_ref) .switch_cond_ref else .switch_cond;
+ const cond = try parent_gz.addUnNode(cond_tag, raw_operand, operand_node);
// We need the type of the operand to use as the result location for all the prong items.
- const typeof_tag: Zir.Inst.Tag = if (any_payload_is_ref) .typeof_elem else .typeof;
- const operand_ty_inst = try parent_gz.addUnNode(typeof_tag, operand, operand_node);
- const item_rl: ResultLoc = .{ .ty = operand_ty_inst };
+ const cond_ty_inst = try parent_gz.addUnNode(.typeof, cond, operand_node);
+ const item_rl: ResultLoc = .{ .ty = cond_ty_inst };
// These contain the data that goes into the `extra` array for the SwitchBlock/SwitchBlockMulti.
// This is the optional else prong body.
@@ -6053,7 +6046,7 @@ fn switchExpr(
defer block_scope.instructions.deinit(gpa);
// This gets added to the parent block later, after the item expressions.
- const switch_block = try parent_gz.addBlock(undefined, switch_node);
+ const switch_block = try parent_gz.addBlock(.switch_block, switch_node);
// We re-use this same scope for all cases, including the special prong, if any.
var case_scope = parent_gz.makeSubBlock(&block_scope.base);
@@ -6206,44 +6199,32 @@ fn switchExpr(
// Now that the item expressions are generated we can add this.
try parent_gz.instructions.append(gpa, switch_block);
- const ref_bit: u4 = @boolToInt(any_payload_is_ref);
- const multi_bit: u4 = @boolToInt(multi_cases_len != 0);
- const special_prong_bits: u4 = @enumToInt(special_prong);
- comptime {
- assert(@enumToInt(Zir.SpecialProng.none) == 0b00);
- assert(@enumToInt(Zir.SpecialProng.@"else") == 0b01);
- assert(@enumToInt(Zir.SpecialProng.under) == 0b10);
- }
- const zir_tags = astgen.instructions.items(.tag);
- zir_tags[switch_block] = switch ((ref_bit << 3) | (special_prong_bits << 1) | multi_bit) {
- 0b0_00_0 => .switch_block,
- 0b0_00_1 => .switch_block_multi,
- 0b0_01_0 => .switch_block_else,
- 0b0_01_1 => .switch_block_else_multi,
- 0b0_10_0 => .switch_block_under,
- 0b0_10_1 => .switch_block_under_multi,
- 0b1_00_0 => .switch_block_ref,
- 0b1_00_1 => .switch_block_ref_multi,
- 0b1_01_0 => .switch_block_ref_else,
- 0b1_01_1 => .switch_block_ref_else_multi,
- 0b1_10_0 => .switch_block_ref_under,
- 0b1_10_1 => .switch_block_ref_under_multi,
- else => unreachable,
- };
- const payload_index = astgen.extra.items.len;
- const zir_datas = astgen.instructions.items(.data);
- zir_datas[switch_block].pl_node.payload_index = @intCast(u32, payload_index);
- // Documentation for this: `Zir.Inst.SwitchBlock` and `Zir.Inst.SwitchBlockMulti`.
- try astgen.extra.ensureUnusedCapacity(gpa, @as(usize, 2) + // operand, scalar_cases_len
+ try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.SwitchBlock).Struct.fields.len +
@boolToInt(multi_cases_len != 0) +
special_case_payload.items.len +
scalar_cases_payload.items.len +
multi_cases_payload.items.len);
- astgen.extra.appendAssumeCapacity(@enumToInt(operand));
- astgen.extra.appendAssumeCapacity(scalar_cases_len);
+
+ const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.SwitchBlock{
+ .operand = cond,
+ .bits = Zir.Inst.SwitchBlock.Bits{
+ .is_ref = any_payload_is_ref,
+ .has_multi_cases = multi_cases_len != 0,
+ .has_else = special_prong == .@"else",
+ .has_under = special_prong == .under,
+ .scalar_cases_len = @intCast(Zir.Inst.SwitchBlock.Bits.ScalarCasesLen, scalar_cases_len),
+ },
+ });
+
+ const zir_datas = astgen.instructions.items(.data);
+ const zir_tags = astgen.instructions.items(.tag);
+
+ zir_datas[switch_block].pl_node.payload_index = payload_index;
+
if (multi_cases_len != 0) {
astgen.extra.appendAssumeCapacity(multi_cases_len);
}
+
const strat = rl.strategy(&block_scope);
switch (strat.tag) {
.break_operand => {
@@ -6517,7 +6498,8 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
},
.always => {
// Value is always an error. Emit both error defers and regular defers.
- const err_code = try gz.addUnNode(.err_union_code, operand, node);
+ const result = if (rl == .ptr) try gz.addUnNode(.load, rl.ptr, node) else operand;
+ const err_code = try gz.addUnNode(.err_union_code, result, node);
try genDefers(gz, defer_outer, scope, .{ .both = err_code });
try gz.addRet(rl, operand, node);
return Zir.Inst.Ref.unreachable_value;
@@ -6532,7 +6514,8 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
}
// Emit conditional branch for generating errdefers.
- const is_non_err = try gz.addUnNode(.is_non_err, operand, node);
+ const result = if (rl == .ptr) try gz.addUnNode(.load, rl.ptr, node) else operand;
+ const is_non_err = try gz.addUnNode(.is_non_err, result, node);
const condbr = try gz.addCondBr(.condbr, node);
var then_scope = gz.makeSubBlock(scope);
@@ -6545,7 +6528,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
defer else_scope.instructions.deinit(astgen.gpa);
const which_ones: DefersToEmit = if (!defer_counts.need_err_code) .both_sans_err else .{
- .both = try else_scope.addUnNode(.err_union_code, operand, node),
+ .both = try else_scope.addUnNode(.err_union_code, result, node),
};
try genDefers(&else_scope, defer_outer, scope, which_ones);
try else_scope.addRet(rl, operand, node);
@@ -6664,7 +6647,7 @@ fn identifier(
);
switch (rl) {
- .ref, .none_or_ref => return ptr_inst,
+ .ref => return ptr_inst,
else => {
const loaded = try gz.addUnNode(.load, ptr_inst, ident);
return rvalue(gz, rl, loaded, ident);
@@ -6700,7 +6683,7 @@ fn identifier(
// Decl references happen by name rather than ZIR index so that when unrelated
// decls are modified, ZIR code containing references to them can be unmodified.
switch (rl) {
- .ref, .none_or_ref => return gz.addStrTok(.decl_ref, name_str_index, ident_token),
+ .ref => return gz.addStrTok(.decl_ref, name_str_index, ident_token),
else => {
const result = try gz.addStrTok(.decl_val, name_str_index, ident_token);
return rvalue(gz, rl, result, ident);
@@ -7105,7 +7088,7 @@ fn as(
) InnerError!Zir.Inst.Ref {
const dest_type = try typeExpr(gz, scope, lhs);
switch (rl) {
- .none, .none_or_ref, .discard, .ref, .ty, .coerced_ty => {
+ .none, .discard, .ref, .ty, .coerced_ty => {
const result = try reachableExpr(gz, scope, .{ .ty = dest_type }, rhs, node);
return rvalue(gz, rl, result, node);
},
@@ -7128,7 +7111,7 @@ fn unionInit(
const union_type = try typeExpr(gz, scope, params[0]);
const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]);
switch (rl) {
- .none, .none_or_ref, .discard, .ref, .ty, .coerced_ty, .inferred_ptr => {
+ .none, .discard, .ref, .ty, .coerced_ty, .inferred_ptr => {
_ = try gz.addPlNode(.field_type_ref, params[1], Zir.Inst.FieldTypeRef{
.container_type = union_type,
.field_name = field_name,
@@ -7189,42 +7172,13 @@ fn bitCast(
lhs: Ast.Node.Index,
rhs: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
- const astgen = gz.astgen;
- const dest_type = try typeExpr(gz, scope, lhs);
- switch (rl) {
- .none, .none_or_ref, .discard, .ty, .coerced_ty => {
- const operand = try expr(gz, scope, .none, rhs);
- const result = try gz.addPlNode(.bitcast, node, Zir.Inst.Bin{
- .lhs = dest_type,
- .rhs = operand,
- });
- return rvalue(gz, rl, result, node);
- },
- .ref => {
- return astgen.failNode(node, "cannot take address of `@bitCast` result", .{});
- },
- .ptr, .inferred_ptr => |result_ptr| {
- return bitCastRlPtr(gz, scope, node, dest_type, result_ptr, rhs);
- },
- .block_ptr => |block| {
- return bitCastRlPtr(gz, scope, node, dest_type, block.rl_ptr, rhs);
- },
- }
-}
-
-fn bitCastRlPtr(
- gz: *GenZir,
- scope: *Scope,
- node: Ast.Node.Index,
- dest_type: Zir.Inst.Ref,
- result_ptr: Zir.Inst.Ref,
- rhs: Ast.Node.Index,
-) InnerError!Zir.Inst.Ref {
- const casted_result_ptr = try gz.addPlNode(.bitcast_result_ptr, node, Zir.Inst.Bin{
+ const dest_type = try reachableTypeExpr(gz, scope, lhs, node);
+ const operand = try reachableExpr(gz, scope, .none, rhs, node);
+ const result = try gz.addPlNode(.bitcast, node, Zir.Inst.Bin{
.lhs = dest_type,
- .rhs = result_ptr,
+ .rhs = operand,
});
- return expr(gz, scope, .{ .ptr = casted_result_ptr }, rhs);
+ return rvalue(gz, rl, result, node);
}
fn typeOf(
@@ -8383,7 +8337,7 @@ fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index) bool
}
}
-fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) enum { never, always, maybe } {
+fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) BuiltinFn.EvalToError {
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const main_tokens = tree.nodes.items(.main_token);
@@ -8560,10 +8514,10 @@ fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) enum { never
.unwrap_optional,
=> node = node_datas[node].lhs,
- // Forward the question to the RHS sub-expression.
+ // LHS sub-expression may still be an error under the outer optional or error union
.@"catch",
.@"orelse",
- => node = node_datas[node].rhs,
+ => return .maybe,
.block_two,
.block_two_semicolon,
@@ -8590,11 +8544,7 @@ fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) enum { never
// If the builtin is an invalid name, we don't cause an error here; instead
// let it pass, and the error will be "invalid builtin function" later.
const builtin_info = BuiltinFn.list.get(builtin_name) orelse return .maybe;
- if (builtin_info.tag == .err_set_cast) {
- return .always;
- } else {
- return .never;
- }
+ return builtin_info.eval_to_error;
},
}
}
@@ -8799,7 +8749,7 @@ fn rvalue(
) InnerError!Zir.Inst.Ref {
if (gz.endsWithNoReturn()) return result;
switch (rl) {
- .none, .none_or_ref, .coerced_ty => return result,
+ .none, .coerced_ty => return result,
.discard => {
// Emit a compile error for discarding error values.
_ = try gz.addUnNode(.ensure_result_non_error, result, src_node);
@@ -9561,9 +9511,7 @@ const GenZir = struct {
gz.rl_ty_inst = ty_inst;
gz.break_result_loc = parent_rl;
},
- .none_or_ref => {
- gz.break_result_loc = .ref;
- },
+
.discard, .none, .ptr, .ref => {
gz.break_result_loc = parent_rl;
},
@@ -10627,21 +10575,6 @@ fn advanceSourceCursor(astgen: *AstGen, source: []const u8, end: usize) void {
astgen.source_column = column;
}
-const ref_start_index: u32 = Zir.Inst.Ref.typed_value_map.len;
-
-fn indexToRef(inst: Zir.Inst.Index) Zir.Inst.Ref {
- return @intToEnum(Zir.Inst.Ref, ref_start_index + inst);
-}
-
-fn refToIndex(inst: Zir.Inst.Ref) ?Zir.Inst.Index {
- const ref_int = @enumToInt(inst);
- if (ref_int >= ref_start_index) {
- return ref_int - ref_start_index;
- } else {
- return null;
- }
-}
-
fn scanDecls(astgen: *AstGen, namespace: *Scope.Namespace, members: []const Ast.Node.Index) !void {
const gpa = astgen.gpa;
const tree = astgen.tree;
diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig
index e1f4f5bd16..7c5dde03d1 100644
--- a/src/BuiltinFn.zig
+++ b/src/BuiltinFn.zig
@@ -119,10 +119,21 @@ pub const MemLocRequirement = enum {
forward1,
};
+pub const EvalToError = enum {
+ /// The builtin cannot possibly evaluate to an error.
+ never,
+ /// The builtin will always evaluate to an error.
+ always,
+ /// The builtin may or may not evaluate to an error depending on the parameters.
+ maybe,
+};
+
tag: Tag,
/// Info about the builtin call's ability to take advantage of a result location pointer.
needs_mem_loc: MemLocRequirement = .never,
+/// Info about the builtin call's possibility of returning an error.
+eval_to_error: EvalToError = .never,
/// `true` if the builtin call can be the left-hand side of an expression (assigned to).
allows_lvalue: bool = false,
/// The number of parameters to this builtin function. `null` means variable number
@@ -158,6 +169,7 @@ pub const list = list: {
.{
.tag = .as,
.needs_mem_loc = .forward1,
+ .eval_to_error = .maybe,
.param_count = 2,
},
},
@@ -258,6 +270,7 @@ pub const list = list: {
.{
.tag = .call,
.needs_mem_loc = .always,
+ .eval_to_error = .maybe,
.param_count = 3,
},
},
@@ -391,6 +404,7 @@ pub const list = list: {
"@errSetCast",
.{
.tag = .err_set_cast,
+ .eval_to_error = .always,
.param_count = 2,
},
},
@@ -420,6 +434,7 @@ pub const list = list: {
.{
.tag = .field,
.needs_mem_loc = .always,
+ .eval_to_error = .maybe,
.param_count = 2,
.allows_lvalue = true,
},
@@ -512,6 +527,7 @@ pub const list = list: {
"@intToError",
.{
.tag = .int_to_error,
+ .eval_to_error = .always,
.param_count = 1,
},
},
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 3432c38ab5..50d9376c58 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -55,6 +55,10 @@ c_object_work_queue: std.fifo.LinearFifo(*CObject, .Dynamic),
/// since the last compilation, as well as scan for `@import` and queue up
/// additional jobs corresponding to those new files.
astgen_work_queue: std.fifo.LinearFifo(*Module.File, .Dynamic),
+/// These jobs are to inspect the file system stat() and if the embedded file has changed
+/// on disk, mark the corresponding Decl outdated and queue up an `analyze_decl`
+/// task for it.
+embed_file_work_queue: std.fifo.LinearFifo(*Module.EmbedFile, .Dynamic),
/// The ErrorMsg memory is owned by the `CObject`, using Compilation's general purpose allocator.
/// This data is accessed by multiple threads and is protected by `mutex`.
@@ -181,6 +185,10 @@ const Job = union(enum) {
/// It may have already be analyzed, or it may have been determined
/// to be outdated; in this case perform semantic analysis again.
analyze_decl: *Module.Decl,
+ /// The file that was loaded with `@embedFile` has changed on disk
+ /// and has been re-loaded into memory. All Decls that depend on it
+ /// need to be re-analyzed.
+ update_embed_file: *Module.EmbedFile,
/// The source file containing the Decl has been updated, and so the
/// Decl may need its line number information updated in the debug info.
update_line_number: *Module.Decl,
@@ -750,6 +758,8 @@ pub const InitOptions = struct {
subsystem: ?std.Target.SubSystem = null,
/// WASI-only. Type of WASI execution model ("command" or "reactor").
wasi_exec_model: ?std.builtin.WasiExecModel = null,
+ /// (Zig compiler development) Enable dumping linker's state as JSON.
+ enable_link_snapshots: bool = false,
};
fn addPackageTableToCacheHash(
@@ -1434,6 +1444,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
.is_test = options.is_test,
.wasi_exec_model = wasi_exec_model,
.use_stage1 = use_stage1,
+ .enable_link_snapshots = options.enable_link_snapshots,
});
errdefer bin_file.destroy();
comp.* = .{
@@ -1451,6 +1462,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
.work_queue = std.fifo.LinearFifo(Job, .Dynamic).init(gpa),
.c_object_work_queue = std.fifo.LinearFifo(*CObject, .Dynamic).init(gpa),
.astgen_work_queue = std.fifo.LinearFifo(*Module.File, .Dynamic).init(gpa),
+ .embed_file_work_queue = std.fifo.LinearFifo(*Module.EmbedFile, .Dynamic).init(gpa),
.keep_source_files_loaded = options.keep_source_files_loaded,
.use_clang = use_clang,
.clang_argv = options.clang_argv,
@@ -1636,6 +1648,7 @@ pub fn destroy(self: *Compilation) void {
self.work_queue.deinit();
self.c_object_work_queue.deinit();
self.astgen_work_queue.deinit();
+ self.embed_file_work_queue.deinit();
{
var it = self.crt_files.iterator();
@@ -1751,6 +1764,16 @@ pub fn update(self: *Compilation) !void {
}
if (!use_stage1) {
+ // Put a work item in for checking if any files used with `@embedFile` changed.
+ {
+ try self.embed_file_work_queue.ensureUnusedCapacity(module.embed_table.count());
+ var it = module.embed_table.iterator();
+ while (it.next()) |entry| {
+ const embed_file = entry.value_ptr.*;
+ self.embed_file_work_queue.writeItemAssumeCapacity(embed_file);
+ }
+ }
+
try self.work_queue.writeItem(.{ .analyze_pkg = std_pkg });
if (self.bin_file.options.is_test) {
try self.work_queue.writeItem(.{ .analyze_pkg = module.main_pkg });
@@ -1874,6 +1897,7 @@ pub fn totalErrorCount(self: *Compilation) usize {
if (self.bin_file.options.module) |module| {
total += module.failed_exports.count();
+ total += module.failed_embed_files.count();
{
var it = module.failed_files.iterator();
@@ -1971,6 +1995,13 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
}
}
{
+ var it = module.failed_embed_files.iterator();
+ while (it.next()) |entry| {
+ const msg = entry.value_ptr.*;
+ try AllErrors.add(module, &arena, &errors, msg.*);
+ }
+ }
+ {
var it = module.failed_decls.iterator();
while (it.next()) |entry| {
// Skip errors for Decls within files that had a parse failure.
@@ -2069,6 +2100,9 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
var c_obj_prog_node = main_progress_node.start("Compile C Objects", self.c_source_files.len);
defer c_obj_prog_node.end();
+ var embed_file_prog_node = main_progress_node.start("Detect @embedFile updates", self.embed_file_work_queue.count);
+ defer embed_file_prog_node.end();
+
self.work_queue_wait_group.reset();
defer self.work_queue_wait_group.wait();
@@ -2083,6 +2117,13 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
});
}
+ while (self.embed_file_work_queue.readItem()) |embed_file| {
+ self.astgen_wait_group.start();
+ try self.thread_pool.spawn(workerCheckEmbedFile, .{
+ self, embed_file, &embed_file_prog_node, &self.astgen_wait_group,
+ });
+ }
+
while (self.c_object_work_queue.readItem()) |c_object| {
self.work_queue_wait_group.start();
try self.thread_pool.spawn(workerUpdateCObject, .{
@@ -2264,6 +2305,15 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
error.AnalysisFail => continue,
};
},
+ .update_embed_file => |embed_file| {
+ if (build_options.omit_stage2)
+ @panic("sadly stage2 is omitted from this build to save memory on the CI server");
+ const module = self.bin_file.options.module.?;
+ module.updateEmbedFile(embed_file) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.AnalysisFail => continue,
+ };
+ },
.update_line_number => |decl| {
if (build_options.omit_stage2)
@panic("sadly stage2 is omitted from this build to save memory on the CI server");
@@ -2546,6 +2596,29 @@ fn workerAstGenFile(
}
}
+fn workerCheckEmbedFile(
+ comp: *Compilation,
+ embed_file: *Module.EmbedFile,
+ prog_node: *std.Progress.Node,
+ wg: *WaitGroup,
+) void {
+ defer wg.finish();
+
+ var child_prog_node = prog_node.start(embed_file.sub_file_path, 0);
+ child_prog_node.activate();
+ defer child_prog_node.end();
+
+ const mod = comp.bin_file.options.module.?;
+ mod.detectEmbedFileUpdate(embed_file) catch |err| {
+ comp.reportRetryableEmbedFileError(embed_file, err) catch |oom| switch (oom) {
+ // Swallowing this error is OK because it's implied to be OOM when
+ // there is a missing `failed_embed_files` error message.
+ error.OutOfMemory => {},
+ };
+ return;
+ };
+}
+
pub fn obtainCObjectCacheManifest(comp: *const Compilation) Cache.Manifest {
var man = comp.cache_parent.obtain();
@@ -2794,6 +2867,36 @@ fn reportRetryableAstGenError(
}
}
+fn reportRetryableEmbedFileError(
+ comp: *Compilation,
+ embed_file: *Module.EmbedFile,
+ err: anyerror,
+) error{OutOfMemory}!void {
+ const mod = comp.bin_file.options.module.?;
+ const gpa = mod.gpa;
+
+ const src_loc: Module.SrcLoc = embed_file.owner_decl.srcLoc();
+
+ const err_msg = if (embed_file.pkg.root_src_directory.path) |dir_path|
+ try Module.ErrorMsg.create(
+ gpa,
+ src_loc,
+ "unable to load '{s}" ++ std.fs.path.sep_str ++ "{s}': {s}",
+ .{ dir_path, embed_file.sub_file_path, @errorName(err) },
+ )
+ else
+ try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{s}': {s}", .{
+ embed_file.sub_file_path, @errorName(err),
+ });
+ errdefer err_msg.destroy(gpa);
+
+ {
+ const lock = comp.mutex.acquire();
+ defer lock.release();
+ try mod.failed_embed_files.putNoClobber(gpa, embed_file, err_msg);
+ }
+}
+
fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.Progress.Node) !void {
if (!build_options.have_llvm) {
return comp.failCObj(c_object, "clang not available: compiler built without LLVM extensions", .{});
diff --git a/src/Liveness.zig b/src/Liveness.zig
index 6f7c938f4c..499500fddb 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -233,7 +233,10 @@ fn analyzeInst(
.mul,
.mulwrap,
.mul_sat,
- .div,
+ .div_float,
+ .div_trunc,
+ .div_floor,
+ .div_exact,
.rem,
.mod,
.ptr_add,
@@ -252,9 +255,7 @@ fn analyzeInst(
.store,
.array_elem_val,
.slice_elem_val,
- .ptr_slice_elem_val,
.ptr_elem_val,
- .ptr_ptr_elem_val,
.shl,
.shl_exact,
.shl_sat,
@@ -300,6 +301,8 @@ fn analyzeInst(
.wrap_errunion_err,
.slice_ptr,
.slice_len,
+ .ptr_slice_len_ptr,
+ .ptr_slice_ptr_ptr,
.struct_field_ptr_index_0,
.struct_field_ptr_index_1,
.struct_field_ptr_index_2,
@@ -359,7 +362,7 @@ fn analyzeInst(
const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ extra.struct_operand, .none, .none });
},
- .ptr_elem_ptr => {
+ .ptr_elem_ptr, .slice_elem_ptr, .slice => {
const extra = a.air.extraData(Air.Bin, inst_datas[inst].ty_pl.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ extra.lhs, extra.rhs, .none });
},
diff --git a/src/Module.zig b/src/Module.zig
index a42ec3c2e1..de6770d3d7 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -55,11 +55,17 @@ decl_exports: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
/// is performing the export of another Decl.
/// This table owns the Export memory.
export_owners: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
-/// The set of all the files in the Module. We keep track of this in order to iterate
-/// over it and check which source files have been modified on the file system when
+/// The set of all the Zig source files in the Module. We keep track of this in order
+/// to iterate over it and check which source files have been modified on the file system when
/// an update is requested, as well as to cache `@import` results.
/// Keys are fully resolved file paths. This table owns the keys and values.
import_table: std.StringArrayHashMapUnmanaged(*File) = .{},
+/// The set of all the files which have been loaded with `@embedFile` in the Module.
+/// We keep track of this in order to iterate over it and check which files have been
+/// modified on the file system when an update is requested, as well as to cache
+/// `@embedFile` results.
+/// Keys are fully resolved file paths. This table owns the keys and values.
+embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{},
/// The set of all the generic function instantiations. This is used so that when a generic
/// function is called twice with the same comptime parameter arguments, both calls dispatch
@@ -87,6 +93,8 @@ compile_log_decls: std.AutoArrayHashMapUnmanaged(*Decl, i32) = .{},
/// Using a map here for consistency with the other fields here.
/// The ErrorMsg memory is owned by the `File`, using Module's general purpose allocator.
failed_files: std.AutoArrayHashMapUnmanaged(*File, ?*ErrorMsg) = .{},
+/// The ErrorMsg memory is owned by the `EmbedFile`, using Module's general purpose allocator.
+failed_embed_files: std.AutoArrayHashMapUnmanaged(*EmbedFile, *ErrorMsg) = .{},
/// Using a map here for consistency with the other fields here.
/// The ErrorMsg memory is owned by the `Export`, using Module's general purpose allocator.
failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *ErrorMsg) = .{},
@@ -309,6 +317,7 @@ pub const WipCaptureScope = struct {
assert(!self.finalized);
// use a temp to avoid unintentional aliasing due to RLS
const tmp = try self.scope.captures.clone(self.perm_arena);
+ self.scope.captures.deinit(self.gpa);
self.scope.captures = tmp;
self.finalized = true;
}
@@ -763,6 +772,17 @@ pub const Decl = struct {
else => false,
};
}
+
+ pub fn getAlignment(decl: Decl, target: Target) u32 {
+ assert(decl.has_tv);
+ if (decl.align_val.tag() != .null_value) {
+ // Explicit alignment.
+ return @intCast(u32, decl.align_val.toUnsignedInt());
+ } else {
+ // Natural alignment.
+ return decl.ty.abiAlignment(target);
+ }
+ }
};
/// This state is attached to every Decl when Module emit_h is non-null.
@@ -782,6 +802,10 @@ pub const ErrorSet = struct {
/// The length is given by `names_len`.
names_ptr: [*]const []const u8,
+ pub fn names(self: ErrorSet) []const []const u8 {
+ return self.names_ptr[0..self.names_len];
+ }
+
pub fn srcLoc(self: ErrorSet) SrcLoc {
return .{
.file_scope = self.owner_decl.getFileScope(),
@@ -864,10 +888,12 @@ pub const EnumSimple = struct {
/// The Decl that corresponds to the enum itself.
owner_decl: *Decl,
/// Set of field names in declaration order.
- fields: std.StringArrayHashMapUnmanaged(void),
+ fields: NameMap,
/// Offset from `owner_decl`, points to the enum decl AST node.
node_offset: i32,
+ pub const NameMap = EnumFull.NameMap;
+
pub fn srcLoc(self: EnumSimple) SrcLoc {
return .{
.file_scope = self.owner_decl.getFileScope(),
@@ -1530,6 +1556,23 @@ pub const File = struct {
}
};
+/// Represents the contents of a file loaded with `@embedFile`.
+pub const EmbedFile = struct {
+ /// Relative to the owning package's root_src_dir.
+ /// Memory is stored in gpa, owned by EmbedFile.
+ sub_file_path: []const u8,
+ bytes: [:0]const u8,
+ stat_size: u64,
+ stat_inode: std.fs.File.INode,
+ stat_mtime: i128,
+ /// Package that this file is a part of, managed externally.
+ pkg: *Package,
+ /// The Decl that was created from the `@embedFile` to own this resource.
+ /// This is how zig knows what other Decl objects to invalidate if the file
+ /// changes on disk.
+ owner_decl: *Decl,
+};
+
/// This struct holds data necessary to construct API-facing `AllErrors.Message`.
/// Its memory is managed with the general purpose allocator so that they
/// can be created and destroyed in response to incremental updates.
@@ -2360,6 +2403,11 @@ pub fn deinit(mod: *Module) void {
}
mod.failed_files.deinit(gpa);
+ for (mod.failed_embed_files.values()) |msg| {
+ msg.destroy(gpa);
+ }
+ mod.failed_embed_files.deinit(gpa);
+
for (mod.failed_exports.values()) |value| {
value.destroy(gpa);
}
@@ -3056,6 +3104,32 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void {
}
}
+pub fn updateEmbedFile(mod: *Module, embed_file: *EmbedFile) SemaError!void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ // TODO we can potentially relax this if we store some more information along
+ // with decl dependency edges
+ for (embed_file.owner_decl.dependants.keys()) |dep| {
+ switch (dep.analysis) {
+ .unreferenced => unreachable,
+ .in_progress => continue, // already doing analysis, ok
+ .outdated => continue, // already queued for update
+
+ .file_failure,
+ .dependency_failure,
+ .sema_failure,
+ .sema_failure_retryable,
+ .codegen_failure,
+ .codegen_failure_retryable,
+ .complete,
+ => if (dep.generation != mod.generation) {
+ try mod.markOutdatedDecl(dep);
+ },
+ }
+ }
+}
+
pub fn semaPkg(mod: *Module, pkg: *Package) !void {
const file = (try mod.importPkg(pkg)).file;
return mod.semaFile(file);
@@ -3547,6 +3621,84 @@ pub fn importFile(
};
}
+pub fn embedFile(mod: *Module, cur_file: *File, rel_file_path: []const u8) !*EmbedFile {
+ const gpa = mod.gpa;
+
+ // The resolved path is used as the key in the table, to detect if
+ // a file refers to the same as another, despite different relative paths.
+ const cur_pkg_dir_path = cur_file.pkg.root_src_directory.path orelse ".";
+ const resolved_path = try std.fs.path.resolve(gpa, &[_][]const u8{
+ cur_pkg_dir_path, cur_file.sub_file_path, "..", rel_file_path,
+ });
+ var keep_resolved_path = false;
+ defer if (!keep_resolved_path) gpa.free(resolved_path);
+
+ const gop = try mod.embed_table.getOrPut(gpa, resolved_path);
+ if (gop.found_existing) return gop.value_ptr.*;
+ keep_resolved_path = true; // It's now owned by embed_table.
+
+ const new_file = try gpa.create(EmbedFile);
+ errdefer gpa.destroy(new_file);
+
+ const resolved_root_path = try std.fs.path.resolve(gpa, &[_][]const u8{cur_pkg_dir_path});
+ defer gpa.free(resolved_root_path);
+
+ if (!mem.startsWith(u8, resolved_path, resolved_root_path)) {
+ return error.ImportOutsidePkgPath;
+ }
+ // +1 for the directory separator here.
+ const sub_file_path = try gpa.dupe(u8, resolved_path[resolved_root_path.len + 1 ..]);
+ errdefer gpa.free(sub_file_path);
+
+ var file = try cur_file.pkg.root_src_directory.handle.openFile(sub_file_path, .{});
+ defer file.close();
+
+ const stat = try file.stat();
+ const bytes = try file.readToEndAllocOptions(gpa, std.math.maxInt(u32), stat.size, 1, 0);
+
+ log.debug("new embedFile. resolved_root_path={s}, resolved_path={s}, sub_file_path={s}, rel_file_path={s}", .{
+ resolved_root_path, resolved_path, sub_file_path, rel_file_path,
+ });
+
+ gop.value_ptr.* = new_file;
+ new_file.* = .{
+ .sub_file_path = sub_file_path,
+ .bytes = bytes,
+ .stat_size = stat.size,
+ .stat_inode = stat.inode,
+ .stat_mtime = stat.mtime,
+ .pkg = cur_file.pkg,
+ .owner_decl = undefined, // Set by Sema immediately after this function returns.
+ };
+ return new_file;
+}
+
+pub fn detectEmbedFileUpdate(mod: *Module, embed_file: *EmbedFile) !void {
+ var file = try embed_file.pkg.root_src_directory.handle.openFile(embed_file.sub_file_path, .{});
+ defer file.close();
+
+ const stat = try file.stat();
+
+ const unchanged_metadata =
+ stat.size == embed_file.stat_size and
+ stat.mtime == embed_file.stat_mtime and
+ stat.inode == embed_file.stat_inode;
+
+ if (unchanged_metadata) return;
+
+ const gpa = mod.gpa;
+ const bytes = try file.readToEndAllocOptions(gpa, std.math.maxInt(u32), stat.size, 1, 0);
+ gpa.free(embed_file.bytes);
+ embed_file.bytes = bytes;
+ embed_file.stat_size = stat.size;
+ embed_file.stat_mtime = stat.mtime;
+ embed_file.stat_inode = stat.inode;
+
+ const lock = mod.comp.mutex.acquire();
+ defer lock.release();
+ try mod.comp.work_queue.writeItem(.{ .update_embed_file = embed_file });
+}
+
pub fn scanNamespace(
mod: *Module,
namespace: *Namespace,
@@ -4079,7 +4231,13 @@ fn markOutdatedDecl(mod: *Module, decl: *Decl) !void {
decl.analysis = .outdated;
}
-pub fn allocateNewDecl(mod: *Module, name: [:0]const u8, namespace: *Namespace, src_node: Ast.Node.Index, src_scope: ?*CaptureScope) !*Decl {
+pub fn allocateNewDecl(
+ mod: *Module,
+ name: [:0]const u8,
+ namespace: *Namespace,
+ src_node: Ast.Node.Index,
+ src_scope: ?*CaptureScope,
+) !*Decl {
// If we have emit-h then we must allocate a bigger structure to store the emit-h state.
const new_decl: *Decl = if (mod.emit_h != null) blk: {
const parent_struct = try mod.gpa.create(DeclPlusEmitH);
@@ -4101,7 +4259,7 @@ pub fn allocateNewDecl(mod: *Module, name: [:0]const u8, namespace: *Namespace,
.val = undefined,
.align_val = undefined,
.linksection_val = undefined,
- .@"addrspace" = undefined,
+ .@"addrspace" = .generic,
.analysis = .unreferenced,
.deletion_flag = false,
.zir_decl_index = 0,
@@ -4205,7 +4363,6 @@ pub fn createAnonymousDeclFromDeclNamed(
new_decl.val = typed_value.val;
new_decl.align_val = Value.initTag(.null_value);
new_decl.linksection_val = Value.initTag(.null_value);
- new_decl.@"addrspace" = .generic; // default global addrspace
new_decl.has_tv = true;
new_decl.analysis = .complete;
new_decl.generation = mod.generation;
diff --git a/src/Sema.zig b/src/Sema.zig
index 229ae054b2..e7cb40420e 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -224,6 +224,16 @@ pub const Block = struct {
});
}
+ pub fn addBitCast(block: *Block, ty: Type, operand: Air.Inst.Ref) Allocator.Error!Air.Inst.Ref {
+ return block.addInst(.{
+ .tag = .bitcast,
+ .data = .{ .ty_op = .{
+ .ty = try block.sema.addType(ty),
+ .operand = operand,
+ } },
+ });
+ }
+
pub fn addNoOp(block: *Block, tag: Air.Inst.Tag) error{OutOfMemory}!Air.Inst.Ref {
return block.addInst(.{
.tag = tag,
@@ -300,7 +310,7 @@ pub const Block = struct {
.ty = ty,
.payload = try block.sema.addExtra(Air.StructField{
.struct_operand = struct_ptr,
- .field_index = @intCast(u32, field_index),
+ .field_index = field_index,
}),
} },
});
@@ -315,6 +325,60 @@ pub const Block = struct {
});
}
+ pub fn addStructFieldVal(
+ block: *Block,
+ struct_val: Air.Inst.Ref,
+ field_index: u32,
+ field_ty: Type,
+ ) !Air.Inst.Ref {
+ return block.addInst(.{
+ .tag = .struct_field_val,
+ .data = .{ .ty_pl = .{
+ .ty = try block.sema.addType(field_ty),
+ .payload = try block.sema.addExtra(Air.StructField{
+ .struct_operand = struct_val,
+ .field_index = field_index,
+ }),
+ } },
+ });
+ }
+
+ pub fn addSliceElemPtr(
+ block: *Block,
+ slice: Air.Inst.Ref,
+ elem_index: Air.Inst.Ref,
+ elem_ptr_ty: Type,
+ ) !Air.Inst.Ref {
+ return block.addInst(.{
+ .tag = .slice_elem_ptr,
+ .data = .{ .ty_pl = .{
+ .ty = try block.sema.addType(elem_ptr_ty),
+ .payload = try block.sema.addExtra(Air.Bin{
+ .lhs = slice,
+ .rhs = elem_index,
+ }),
+ } },
+ });
+ }
+
+ pub fn addPtrElemPtr(
+ block: *Block,
+ array_ptr: Air.Inst.Ref,
+ elem_index: Air.Inst.Ref,
+ elem_ptr_ty: Type,
+ ) !Air.Inst.Ref {
+ return block.addInst(.{
+ .tag = .ptr_elem_ptr,
+ .data = .{ .ty_pl = .{
+ .ty = try block.sema.addType(elem_ptr_ty),
+ .payload = try block.sema.addExtra(Air.Bin{
+ .lhs = array_ptr,
+ .rhs = elem_index,
+ }),
+ } },
+ });
+ }
+
pub fn addInst(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref {
return Air.indexToRef(try block.addInstAsIndex(inst));
}
@@ -332,6 +396,14 @@ pub const Block = struct {
return result_index;
}
+ fn addUnreachable(block: *Block, src: LazySrcLoc, safety_check: bool) !void {
+ if (safety_check and block.wantSafety()) {
+ _ = try block.sema.safetyPanic(block, src, .unreach);
+ } else {
+ _ = try block.addNoOp(.unreach);
+ }
+ }
+
pub fn startAnonDecl(block: *Block) !WipAnonDecl {
return WipAnonDecl{
.block = block,
@@ -459,7 +531,6 @@ pub fn analyzeBody(
.bit_not => try sema.zirBitNot(block, inst),
.bit_or => try sema.zirBitwise(block, inst, .bit_or),
.bitcast => try sema.zirBitcast(block, inst),
- .bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst),
.suspend_block => try sema.zirSuspendBlock(block, inst),
.bool_not => try sema.zirBoolNot(block, inst),
.bool_br_and => try sema.zirBoolBr(block, inst, false),
@@ -532,18 +603,9 @@ pub fn analyzeBody(
.slice_sentinel => try sema.zirSliceSentinel(block, inst),
.slice_start => try sema.zirSliceStart(block, inst),
.str => try sema.zirStr(block, inst),
- .switch_block => try sema.zirSwitchBlock(block, inst, false, .none),
- .switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none),
- .switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"),
- .switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"),
- .switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under),
- .switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under),
- .switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none),
- .switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none),
- .switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"),
- .switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"),
- .switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under),
- .switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under),
+ .switch_block => try sema.zirSwitchBlock(block, inst),
+ .switch_cond => try sema.zirSwitchCond(block, inst, false),
+ .switch_cond_ref => try sema.zirSwitchCond(block, inst, true),
.switch_capture => try sema.zirSwitchCapture(block, inst, false, false),
.switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true),
.switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false),
@@ -554,7 +616,6 @@ pub fn analyzeBody(
.size_of => try sema.zirSizeOf(block, inst),
.bit_size_of => try sema.zirBitSizeOf(block, inst),
.typeof => try sema.zirTypeof(block, inst),
- .typeof_elem => try sema.zirTypeofElem(block, inst),
.log2_int_type => try sema.zirLog2IntType(block, inst),
.typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst),
.xor => try sema.zirBitwise(block, inst, .xor),
@@ -596,9 +657,6 @@ pub fn analyzeBody(
.pop_count => try sema.zirPopCount(block, inst),
.byte_swap => try sema.zirByteSwap(block, inst),
.bit_reverse => try sema.zirBitReverse(block, inst),
- .div_exact => try sema.zirDivExact(block, inst),
- .div_floor => try sema.zirDivFloor(block, inst),
- .div_trunc => try sema.zirDivTrunc(block, inst),
.shr_exact => try sema.zirShrExact(block, inst),
.bit_offset_of => try sema.zirBitOffsetOf(block, inst),
.offset_of => try sema.zirOffsetOf(block, inst),
@@ -638,19 +696,22 @@ pub fn analyzeBody(
.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon),
.error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func),
- .add => try sema.zirArithmetic(block, inst, .add),
- .addwrap => try sema.zirArithmetic(block, inst, .addwrap),
- .add_sat => try sema.zirArithmetic(block, inst, .add_sat),
- .div => try sema.zirArithmetic(block, inst, .div),
- .mod_rem => try sema.zirArithmetic(block, inst, .mod_rem),
- .mod => try sema.zirArithmetic(block, inst, .mod),
- .rem => try sema.zirArithmetic(block, inst, .rem),
- .mul => try sema.zirArithmetic(block, inst, .mul),
- .mulwrap => try sema.zirArithmetic(block, inst, .mulwrap),
- .mul_sat => try sema.zirArithmetic(block, inst, .mul_sat),
- .sub => try sema.zirArithmetic(block, inst, .sub),
- .subwrap => try sema.zirArithmetic(block, inst, .subwrap),
- .sub_sat => try sema.zirArithmetic(block, inst, .sub_sat),
+ .add => try sema.zirArithmetic(block, inst, .add),
+ .addwrap => try sema.zirArithmetic(block, inst, .addwrap),
+ .add_sat => try sema.zirArithmetic(block, inst, .add_sat),
+ .div => try sema.zirArithmetic(block, inst, .div),
+ .div_exact => try sema.zirArithmetic(block, inst, .div_exact),
+ .div_floor => try sema.zirArithmetic(block, inst, .div_floor),
+ .div_trunc => try sema.zirArithmetic(block, inst, .div_trunc),
+ .mod_rem => try sema.zirArithmetic(block, inst, .mod_rem),
+ .mod => try sema.zirArithmetic(block, inst, .mod),
+ .rem => try sema.zirArithmetic(block, inst, .rem),
+ .mul => try sema.zirArithmetic(block, inst, .mul),
+ .mulwrap => try sema.zirArithmetic(block, inst, .mulwrap),
+ .mul_sat => try sema.zirArithmetic(block, inst, .mul_sat),
+ .sub => try sema.zirArithmetic(block, inst, .sub),
+ .subwrap => try sema.zirArithmetic(block, inst, .subwrap),
+ .sub_sat => try sema.zirArithmetic(block, inst, .sub_sat),
.maximum => try sema.zirMinMax(block, inst, .max),
.minimum => try sema.zirMinMax(block, inst, .min),
@@ -1179,6 +1240,22 @@ fn failWithModRemNegative(sema: *Sema, block: *Block, src: LazySrcLoc, lhs_ty: T
return sema.fail(block, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{ lhs_ty, rhs_ty });
}
+fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, optional_ty: Type) CompileError {
+ return sema.fail(block, src, "expected optional type, found {}", .{optional_ty});
+}
+
+fn failWithErrorSetCodeMissing(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ dest_err_set_ty: Type,
+ src_err_set_ty: Type,
+) CompileError {
+ return sema.fail(block, src, "expected type '{}', found type '{}'", .{
+ dest_err_set_ty, src_err_set_ty,
+ });
+}
+
/// We don't return a pointer to the new error note because the pointer
/// becomes invalid when you add another one.
fn errNote(
@@ -1328,12 +1405,6 @@ pub fn resolveInstValue(
};
}
-fn zirBitcastResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- return sema.fail(block, src, "TODO implement zir_sema.zirBitcastResultPtr", .{});
-}
-
fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -1359,7 +1430,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
// for the inferred allocation.
// This instruction will not make it to codegen; it is only to participate
// in the `stored_inst_list` of the `inferred_alloc`.
- const operand = try block.addTyOp(.bitcast, pointee_ty, .void_value);
+ const operand = try block.addBitCast(pointee_ty, .void_value);
try inferred_alloc.stored_inst_list.append(sema.arena, operand);
},
.inferred_alloc_comptime => {
@@ -1386,7 +1457,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
}
}
try sema.requireRuntimeBlock(block, src);
- const bitcasted_ptr = try block.addTyOp(.bitcast, ptr_ty, ptr);
+ const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr);
return bitcasted_ptr;
}
@@ -1880,7 +1951,7 @@ fn zirRetPtr(
try sema.requireFunctionBlock(block, src);
if (block.is_comptime) {
- return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty);
+ return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty, 0);
}
const ptr_type = try Type.ptr(sema.arena, .{
@@ -1956,44 +2027,38 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
- const array = sema.resolveInst(inst_data.operand);
- const array_ty = sema.typeOf(array);
+ const object = sema.resolveInst(inst_data.operand);
+ const object_ty = sema.typeOf(object);
- if (array_ty.isSlice()) {
- return sema.analyzeSliceLen(block, src, array);
- }
+ const is_pointer_to = object_ty.isSinglePointer();
- if (array_ty.isSinglePointer()) {
- const elem_ty = array_ty.elemType();
- if (elem_ty.isSlice()) {
- const slice_inst = try sema.analyzeLoad(block, src, array, src);
- return sema.analyzeSliceLen(block, src, slice_inst);
- }
- if (!elem_ty.isIndexable()) {
- const msg = msg: {
- const msg = try sema.errMsg(
- block,
- src,
- "type '{}' does not support indexing",
- .{elem_ty},
- );
- errdefer msg.destroy(sema.gpa);
- try sema.errNote(
- block,
- src,
- msg,
- "for loop operand must be an array, slice, tuple, or vector",
- .{},
- );
- break :msg msg;
- };
- return sema.failWithOwnedErrorMsg(msg);
- }
- const result_ptr = try sema.fieldPtr(block, src, array, "len", src);
- return sema.analyzeLoad(block, src, result_ptr, src);
+ const array_ty = if (is_pointer_to)
+ object_ty.childType()
+ else
+ object_ty;
+
+ if (!array_ty.isIndexable()) {
+ const msg = msg: {
+ const msg = try sema.errMsg(
+ block,
+ src,
+ "type '{}' does not support indexing",
+ .{array_ty},
+ );
+ errdefer msg.destroy(sema.gpa);
+ try sema.errNote(
+ block,
+ src,
+ msg,
+ "for loop operand must be an array, slice, tuple, or vector",
+ .{},
+ );
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
}
- return sema.fail(block, src, "TODO implement Sema.zirIndexablePtrLen", .{});
+ return sema.fieldVal(block, src, object, "len", src);
}
fn zirAllocExtended(
@@ -2013,9 +2078,7 @@ fn zirAllocExtended(
const type_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
break :blk try sema.resolveType(block, ty_src, type_ref);
- } else {
- return sema.fail(block, src, "TODO implement Sema.zirAllocExtended inferred", .{});
- };
+ } else undefined;
const alignment: u16 = if (small.has_align) blk: {
const align_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
@@ -2024,22 +2087,47 @@ fn zirAllocExtended(
break :blk alignment;
} else 0;
+ const inferred_alloc_ty = if (small.is_const)
+ Type.initTag(.inferred_alloc_const)
+ else
+ Type.initTag(.inferred_alloc_mut);
+
if (small.is_comptime) {
- return sema.fail(block, src, "TODO implement Sema.zirAllocExtended comptime", .{});
+ if (small.has_type) {
+ return sema.analyzeComptimeAlloc(block, var_ty, alignment);
+ } else {
+ return sema.addConstant(
+ inferred_alloc_ty,
+ try Value.Tag.inferred_alloc_comptime.create(sema.arena, undefined),
+ );
+ }
}
- if (!small.is_const) {
- return sema.fail(block, src, "TODO implement Sema.zirAllocExtended var", .{});
+ if (small.has_type) {
+ if (!small.is_const) {
+ try sema.validateVarType(block, ty_src, var_ty, false);
+ }
+ const ptr_type = try Type.ptr(sema.arena, .{
+ .pointee_type = var_ty,
+ .@"align" = alignment,
+ .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
+ });
+ try sema.requireRuntimeBlock(block, src);
+ try sema.resolveTypeLayout(block, src, var_ty);
+ return block.addTy(.alloc, ptr_type);
}
- const ptr_type = try Type.ptr(sema.arena, .{
- .pointee_type = var_ty,
- .@"align" = alignment,
- .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
- });
- try sema.requireRuntimeBlock(block, src);
- try sema.resolveTypeLayout(block, src, var_ty);
- return block.addTy(.alloc, ptr_type);
+ // `Sema.addConstant` does not add the instruction to the block because it is
+ // not needed in the case of constant values. However here, we plan to "downgrade"
+ // to a normal instruction when we hit `resolve_inferred_alloc`. So we append
+ // to the block even though it is currently a `.constant`.
+ const result = try sema.addConstant(
+ inferred_alloc_ty,
+ try Value.Tag.inferred_alloc.create(sema.arena, .{}),
+ );
+ try sema.requireFunctionBlock(block, src);
+ try block.instructions.append(sema.gpa, Air.refToIndex(result).?);
+ return result;
}
fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -2049,7 +2137,7 @@ fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
- return sema.analyzeComptimeAlloc(block, var_ty);
+ return sema.analyzeComptimeAlloc(block, var_ty, 0);
}
fn zirAllocInferredComptime(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -2071,7 +2159,7 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
const var_decl_src = inst_data.src();
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
if (block.is_comptime) {
- return sema.analyzeComptimeAlloc(block, var_ty);
+ return sema.analyzeComptimeAlloc(block, var_ty, 0);
}
const ptr_type = try Type.ptr(sema.arena, .{
.pointee_type = var_ty,
@@ -2091,7 +2179,7 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
if (block.is_comptime) {
- return sema.analyzeComptimeAlloc(block, var_ty);
+ return sema.analyzeComptimeAlloc(block, var_ty, 0);
}
try sema.validateVarType(block, ty_src, var_ty, false);
const ptr_type = try Type.ptr(sema.arena, .{
@@ -2256,11 +2344,21 @@ fn validateUnionInit(
return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name);
const field_index = @intCast(u32, field_index_big);
- // TODO here we need to go back and see if we need to convert the union
- // to a comptime-known value. This will involve editing the AIR code we have
- // generated so far - in particular deleting some runtime pointer bitcast
- // instructions which are not actually needed if the initialization expression
- // ends up being comptime-known.
+ // Handle the possibility of the union value being comptime-known.
+ const union_ptr_inst = Air.refToIndex(sema.resolveInst(field_ptr_extra.lhs)).?;
+ switch (sema.air_instructions.items(.tag)[union_ptr_inst]) {
+ .constant => return, // In this case the tag has already been set. No validation to do.
+ .bitcast => {
+ // TODO here we need to go back and see if we need to convert the union
+ // to a comptime-known value. In such case, we must delete all the instructions
+ // added to the current block starting with the bitcast.
+ // If the bitcast result ptr is an alloc, the alloc should be replaced with
+ // a constant decl_ref.
+ // Otherwise, the bitcast should be preserved and a store instruction should be
+ // emitted to store the constant union value through the bitcast.
+ },
+ else => unreachable,
+ }
// Otherwise, we set the new union tag now.
const new_tag = try sema.addConstant(
@@ -2292,7 +2390,7 @@ fn validateStructInit(
const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start);
const field_index = struct_obj.fields.getIndex(field_name) orelse
- return sema.failWithBadFieldAccess(block, struct_obj, field_src, field_name);
+ return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name);
if (found_fields[field_index] != 0) {
const other_field_ptr = found_fields[field_index];
const other_field_ptr_data = sema.code.instructions.items(.data)[other_field_ptr].pl_node;
@@ -2354,7 +2452,32 @@ fn zirValidateArrayInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
}
}
-fn failWithBadFieldAccess(
+fn failWithBadMemberAccess(
+ sema: *Sema,
+ block: *Block,
+ agg_ty: Type,
+ field_src: LazySrcLoc,
+ field_name: []const u8,
+) CompileError {
+ const kw_name = switch (agg_ty.zigTypeTag()) {
+ .Union => "union",
+ .Struct => "struct",
+ .Opaque => "opaque",
+ .Enum => "enum",
+ else => unreachable,
+ };
+ const msg = msg: {
+ const msg = try sema.errMsg(block, field_src, "{s} '{}' has no member named '{s}'", .{
+ kw_name, agg_ty, field_name,
+ });
+ errdefer msg.destroy(sema.gpa);
+ try sema.addDeclaredHereNote(msg, agg_ty);
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+}
+
+fn failWithBadStructFieldAccess(
sema: *Sema,
block: *Block,
struct_obj: *Module.Struct,
@@ -2440,7 +2563,7 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
// if expressions should force it when the condition is compile-time known.
const src: LazySrcLoc = .unneeded;
try sema.requireRuntimeBlock(block, src);
- const bitcasted_ptr = try block.addTyOp(.bitcast, ptr_ty, ptr);
+ const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr);
return sema.storePtr(block, src, bitcasted_ptr, value);
}
@@ -2486,7 +2609,7 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi
.pointee_type = operand_ty,
.@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
});
- const bitcasted_ptr = try block.addTyOp(.bitcast, ptr_ty, ptr);
+ const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr);
return sema.storePtr(block, src, bitcasted_ptr, operand);
}
unreachable;
@@ -3985,18 +4108,20 @@ fn analyzeCall(
zir_tags,
);
} else res: {
+ try sema.requireRuntimeBlock(block, call_src);
+
const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len);
for (uncasted_args) |uncasted_arg, i| {
+ const arg_src = call_src; // TODO: better source location
if (i < fn_params_len) {
const param_ty = func_ty.fnParamType(i);
- const arg_src = call_src; // TODO: better source location
+ try sema.resolveTypeLayout(block, arg_src, param_ty);
args[i] = try sema.coerce(block, param_ty, uncasted_arg, arg_src);
} else {
args[i] = uncasted_arg;
}
}
- try sema.requireRuntimeBlock(block, call_src);
try sema.resolveTypeLayout(block, call_src, func_ty_info.return_type);
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len +
@@ -4067,6 +4192,7 @@ fn finishGenericCall(
const param_ty = new_fn_ty.fnParamType(runtime_i);
const arg_src = call_src; // TODO: better source location
const uncasted_arg = uncasted_args[total_i];
+ try sema.resolveTypeLayout(block, arg_src, param_ty);
const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src);
runtime_args[runtime_i] = casted_arg;
runtime_i += 1;
@@ -4241,7 +4367,7 @@ fn zirErrorToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
try sema.requireRuntimeBlock(block, src);
- return block.addTyOp(.bitcast, result_ty, op_coerced);
+ return block.addBitCast(result_ty, op_coerced);
}
fn zirIntToError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -4271,7 +4397,13 @@ fn zirIntToError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
// const is_gt_max = @panic("TODO get max errors in compilation");
// try sema.addSafetyCheck(block, is_gt_max, .invalid_error_code);
}
- return block.addTyOp(.bitcast, Type.anyerror, op);
+ return block.addInst(.{
+ .tag = .bitcast,
+ .data = .{ .ty_op = .{
+ .ty = Air.Inst.Ref.anyerror_type,
+ .operand = op,
+ } },
+ });
}
fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -4414,7 +4546,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
}
try sema.requireRuntimeBlock(block, src);
- return block.addTyOp(.bitcast, int_tag_ty, enum_tag);
+ return block.addBitCast(int_tag_ty, enum_tag);
}
fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -4494,7 +4626,7 @@ fn zirOptionalPayloadPtr(
});
if (try sema.resolveDefinedValue(block, src, optional_ptr)) |pointer_val| {
- if (try pointer_val.pointerDeref(sema.arena)) |val| {
+ if (try sema.pointerDeref(block, src, pointer_val, optional_ptr_ty)) |val| {
if (val.isNull()) {
return sema.fail(block, src, "unable to unwrap null", .{});
}
@@ -4528,19 +4660,23 @@ fn zirOptionalPayload(
const src = inst_data.src();
const operand = sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
- const opt_type = operand_ty;
- if (opt_type.zigTypeTag() != .Optional) {
- return sema.fail(block, src, "expected optional type, found {}", .{opt_type});
- }
-
- const child_type = try opt_type.optionalChildAlloc(sema.arena);
+ const result_ty = switch (operand_ty.zigTypeTag()) {
+ .Optional => try operand_ty.optionalChildAlloc(sema.arena),
+ .Pointer => t: {
+ if (operand_ty.ptrSize() != .C) {
+ return sema.failWithExpectedOptionalType(block, src, operand_ty);
+ }
+ break :t operand_ty;
+ },
+ else => return sema.failWithExpectedOptionalType(block, src, operand_ty),
+ };
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
if (val.isNull()) {
return sema.fail(block, src, "unable to unwrap null", .{});
}
const sub_val = val.castTag(.opt_payload).?.data;
- return sema.addConstant(child_type, sub_val);
+ return sema.addConstant(result_ty, sub_val);
}
try sema.requireRuntimeBlock(block, src);
@@ -4548,7 +4684,7 @@ fn zirOptionalPayload(
const is_non_null = try block.addUnOp(.is_non_null, operand);
try sema.addSafetyCheck(block, is_non_null, .unwrap_null);
}
- return block.addTyOp(.optional_payload, child_type, operand);
+ return block.addTyOp(.optional_payload, result_ty, operand);
}
/// Value in, value out
@@ -4613,7 +4749,7 @@ fn zirErrUnionPayloadPtr(
});
if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| {
- if (try pointer_val.pointerDeref(sema.arena)) |val| {
+ if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| {
if (val.getError()) |name| {
return sema.fail(block, src, "caught unexpected error '{s}'", .{name});
}
@@ -4672,7 +4808,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
const result_ty = operand_ty.elemType().errorUnionSet();
if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| {
- if (try pointer_val.pointerDeref(sema.arena)) |val| {
+ if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| {
assert(val.getError() != null);
return sema.addConstant(result_ty, val);
}
@@ -4731,7 +4867,7 @@ fn zirFunc(
body_inst,
ret_ty_body,
cc,
- Value.initTag(.null_value),
+ Value.@"null",
false,
inferred_error_set,
false,
@@ -4845,6 +4981,8 @@ fn funcCommon(
const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, .{
.func = new_func,
.map = .{},
+ .functions = .{},
+ .is_anyerror = false,
});
break :blk try Type.Tag.error_union.create(sema.arena, .{
.error_set = error_set_ty,
@@ -5105,16 +5243,10 @@ fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node };
- const lhs_src: LazySrcLoc = src; // TODO
const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(extra.field_name_start);
const object = sema.resolveInst(extra.lhs);
- if (sema.typeOf(object).isSinglePointer()) {
- const result_ptr = try sema.fieldPtr(block, src, object, field_name, field_name_src);
- return sema.analyzeLoad(block, src, result_ptr, lhs_src);
- } else {
- return sema.fieldVal(block, src, object, field_name, field_name_src);
- }
+ return sema.fieldVal(block, src, object, field_name, field_name_src);
}
fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -5388,11 +5520,80 @@ fn zirSwitchCapture(
const zir_datas = sema.code.instructions.items(.data);
const capture_info = zir_datas[inst].switch_capture;
const switch_info = zir_datas[capture_info.switch_inst].pl_node;
- const src = switch_info.src();
+ const switch_extra = sema.code.extraData(Zir.Inst.SwitchBlock, switch_info.payload_index);
+ const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = switch_info.src_node };
+ const switch_src = switch_info.src();
+ const operand_is_ref = switch_extra.data.bits.is_ref;
+ const cond_inst = Zir.refToIndex(switch_extra.data.operand).?;
+ const cond_info = sema.code.instructions.items(.data)[cond_inst].un_node;
+ const operand_ptr = sema.resolveInst(cond_info.operand);
+ const operand_ptr_ty = sema.typeOf(operand_ptr);
+ const operand_ty = if (operand_is_ref) operand_ptr_ty.childType() else operand_ptr_ty;
+
+ if (is_multi) {
+ return sema.fail(block, switch_src, "TODO implement Sema for switch capture multi", .{});
+ }
+ const scalar_prong = switch_extra.data.getScalarProng(sema.code, switch_extra.end, capture_info.prong_index);
+ const item = sema.resolveInst(scalar_prong.item);
+ // Previous switch validation ensured this will succeed
+ const item_val = sema.resolveConstValue(block, .unneeded, item) catch unreachable;
- _ = is_ref;
- _ = is_multi;
- return sema.fail(block, src, "TODO implement Sema for zirSwitchCapture", .{});
+ switch (operand_ty.zigTypeTag()) {
+ .Union => {
+ const union_obj = operand_ty.cast(Type.Payload.Union).?.data;
+ const enum_ty = union_obj.tag_ty;
+
+ const field_index_usize = enum_ty.enumTagFieldIndex(item_val).?;
+ const field_index = @intCast(u32, field_index_usize);
+ const field = union_obj.fields.values()[field_index];
+
+ // TODO handle multiple union tags which have compatible types
+
+ if (is_ref) {
+ assert(operand_is_ref);
+
+ const field_ty_ptr = try Type.ptr(sema.arena, .{
+ .pointee_type = field.ty,
+ .@"addrspace" = .generic,
+ .mutable = operand_ptr_ty.ptrIsMutable(),
+ });
+
+ if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| {
+ return sema.addConstant(
+ field_ty_ptr,
+ try Value.Tag.field_ptr.create(sema.arena, .{
+ .container_ptr = op_ptr_val,
+ .field_index = field_index,
+ }),
+ );
+ }
+ try sema.requireRuntimeBlock(block, operand_src);
+ return block.addStructFieldPtr(operand_ptr, field_index, field_ty_ptr);
+ }
+
+ const operand = if (operand_is_ref)
+ try sema.analyzeLoad(block, operand_src, operand_ptr, operand_src)
+ else
+ operand_ptr;
+
+ if (try sema.resolveDefinedValue(block, operand_src, operand)) |operand_val| {
+ return sema.addConstant(
+ field.ty,
+ operand_val.castTag(.@"union").?.data.val,
+ );
+ }
+ try sema.requireRuntimeBlock(block, operand_src);
+ return block.addStructFieldVal(operand, field_index, field.ty);
+ },
+ .ErrorSet => {
+ return sema.fail(block, operand_src, "TODO implement Sema for zirSwitchCapture for error sets", .{});
+ },
+ else => {
+ return sema.fail(block, operand_src, "switch on type '{}' provides no capture value", .{
+ operand_ty,
+ });
+ },
+ }
}
fn zirSwitchCaptureElse(
@@ -5407,96 +5608,104 @@ fn zirSwitchCaptureElse(
const zir_datas = sema.code.instructions.items(.data);
const capture_info = zir_datas[inst].switch_capture;
const switch_info = zir_datas[capture_info.switch_inst].pl_node;
+ const switch_extra = sema.code.extraData(Zir.Inst.SwitchBlock, switch_info.payload_index).data;
const src = switch_info.src();
+ const operand_is_ref = switch_extra.bits.is_ref;
+ assert(!is_ref or operand_is_ref);
- _ = is_ref;
return sema.fail(block, src, "TODO implement Sema for zirSwitchCaptureElse", .{});
}
-fn zirSwitchBlock(
+fn zirSwitchCond(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
is_ref: bool,
- special_prong: Zir.SpecialProng,
) CompileError!Air.Inst.Ref {
- const tracy = trace(@src());
- defer tracy.end();
-
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
- const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node };
- const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index);
+ const operand_ptr = sema.resolveInst(inst_data.operand);
+ const operand = if (is_ref) try sema.analyzeLoad(block, src, operand_ptr, src) else operand_ptr;
+ const operand_ty = sema.typeOf(operand);
- const operand_ptr = sema.resolveInst(extra.data.operand);
- const operand = if (is_ref)
- try sema.analyzeLoad(block, src, operand_ptr, operand_src)
- else
- operand_ptr;
+ switch (operand_ty.zigTypeTag()) {
+ .Type,
+ .Void,
+ .Bool,
+ .Int,
+ .Float,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .EnumLiteral,
+ .Pointer,
+ .Fn,
+ .ErrorSet,
+ .Enum,
+ => {
+ if ((try sema.typeHasOnePossibleValue(block, src, operand_ty))) |opv| {
+ return sema.addConstant(operand_ty, opv);
+ }
+ return operand;
+ },
- return sema.analyzeSwitch(
- block,
- operand,
- extra.end,
- special_prong,
- extra.data.cases_len,
- 0,
- inst,
- inst_data.src_node,
- );
+ .Union => {
+ const enum_ty = operand_ty.unionTagType() orelse {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "switch on untagged union", .{});
+ errdefer msg.destroy(sema.gpa);
+ try sema.addDeclaredHereNote(msg, operand_ty);
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ };
+ return sema.unionToTag(block, enum_ty, operand, src);
+ },
+
+ .ErrorUnion,
+ .NoReturn,
+ .Array,
+ .Struct,
+ .Undefined,
+ .Null,
+ .Optional,
+ .BoundFn,
+ .Opaque,
+ .Vector,
+ .Frame,
+ .AnyFrame,
+ => return sema.fail(block, src, "switch on type '{}'", .{operand_ty}),
+ }
}
-fn zirSwitchBlockMulti(
- sema: *Sema,
- block: *Block,
- inst: Zir.Inst.Index,
- is_ref: bool,
- special_prong: Zir.SpecialProng,
-) CompileError!Air.Inst.Ref {
+fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
+ const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node };
- const extra = sema.code.extraData(Zir.Inst.SwitchBlockMulti, inst_data.payload_index);
+ const src_node_offset = inst_data.src_node;
+ const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset };
+ const special_prong_src: LazySrcLoc = .{ .node_offset_switch_special_prong = src_node_offset };
+ const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index);
- const operand_ptr = sema.resolveInst(extra.data.operand);
- const operand = if (is_ref)
- try sema.analyzeLoad(block, src, operand_ptr, operand_src)
- else
- operand_ptr;
+ const operand = sema.resolveInst(extra.data.operand);
- return sema.analyzeSwitch(
- block,
- operand,
- extra.end,
- special_prong,
- extra.data.scalar_cases_len,
- extra.data.multi_cases_len,
- inst,
- inst_data.src_node,
- );
-}
+ var header_extra_index: usize = extra.end;
-fn analyzeSwitch(
- sema: *Sema,
- block: *Block,
- operand: Air.Inst.Ref,
- extra_end: usize,
- special_prong: Zir.SpecialProng,
- scalar_cases_len: usize,
- multi_cases_len: usize,
- switch_inst: Zir.Inst.Index,
- src_node_offset: i32,
-) CompileError!Air.Inst.Ref {
- const gpa = sema.gpa;
+ const scalar_cases_len = extra.data.bits.scalar_cases_len;
+ const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
+ const multi_cases_len = sema.code.extra[header_extra_index];
+ header_extra_index += 1;
+ break :blk multi_cases_len;
+ } else 0;
+ const special_prong = extra.data.bits.specialProng();
const special: struct { body: []const Zir.Inst.Index, end: usize } = switch (special_prong) {
- .none => .{ .body = &.{}, .end = extra_end },
+ .none => .{ .body = &.{}, .end = header_extra_index },
.under, .@"else" => blk: {
- const body_len = sema.code.extra[extra_end];
- const extra_body_start = extra_end + 1;
+ const body_len = sema.code.extra[header_extra_index];
+ const extra_body_start = header_extra_index + 1;
break :blk .{
.body = sema.code.extra[extra_body_start..][0..body_len],
.end = extra_body_start + body_len,
@@ -5504,9 +5713,6 @@ fn analyzeSwitch(
},
};
- const src: LazySrcLoc = .{ .node_offset = src_node_offset };
- const special_prong_src: LazySrcLoc = .{ .node_offset_switch_special_prong = src_node_offset };
- const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset };
const operand_ty = sema.typeOf(operand);
// Validate usage of '_' prongs.
@@ -5900,7 +6106,7 @@ fn analyzeSwitch(
.data = undefined,
});
var label: Block.Label = .{
- .zir_block = switch_inst,
+ .zir_block = inst,
.merges = .{
.results = .{},
.br_list = .{},
@@ -6173,14 +6379,22 @@ fn analyzeSwitch(
}
var final_else_body: []const Air.Inst.Index = &.{};
- if (special.body.len != 0) {
+ if (special.body.len != 0 or !is_first) {
var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope);
defer wip_captures.deinit();
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = wip_captures.scope;
- _ = try sema.analyzeBody(&case_block, special.body);
+ if (special.body.len != 0) {
+ _ = try sema.analyzeBody(&case_block, special.body);
+ } else {
+ // We still need a terminator in this block, but we have proven
+ // that it is unreachable.
+ // TODO this should be a special safety panic other than unreachable, something
+ // like "panic: switch operand had corrupt value not allowed by the type"
+ try case_block.addUnreachable(src, true);
+ }
try wip_captures.finalize();
@@ -6411,10 +6625,33 @@ fn validateSwitchNoRange(
fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
- _ = extra;
- const src = inst_data.src();
-
- return sema.fail(block, src, "TODO implement zirHasField", .{});
+ const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const unresolved_ty = try sema.resolveType(block, ty_src, extra.lhs);
+ const field_name = try sema.resolveConstString(block, name_src, extra.rhs);
+ const ty = try sema.resolveTypeFields(block, ty_src, unresolved_ty);
+
+ const has_field = hf: {
+ if (ty.isSlice()) {
+ if (mem.eql(u8, field_name, "ptr")) break :hf true;
+ if (mem.eql(u8, field_name, "len")) break :hf true;
+ break :hf false;
+ }
+ break :hf switch (ty.zigTypeTag()) {
+ .Struct => ty.structFields().contains(field_name),
+ .Union => ty.unionFields().contains(field_name),
+ .Enum => ty.enumFields().contains(field_name),
+ .Array => mem.eql(u8, field_name, "len"),
+ else => return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{
+ ty,
+ }),
+ };
+ };
+ if (has_field) {
+ return Air.Inst.Ref.bool_true;
+ } else {
+ return Air.Inst.Ref.bool_false;
+ }
}
fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -6465,6 +6702,45 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return sema.addConstant(file_root_decl.ty, file_root_decl.val);
}
+fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const mod = sema.mod;
+ const inst_data = sema.code.instructions.items(.data)[inst].un_node;
+ const src = inst_data.src();
+ const name = try sema.resolveConstString(block, src, inst_data.operand);
+
+ const embed_file = mod.embedFile(block.getFileScope(), name) catch |err| switch (err) {
+ error.ImportOutsidePkgPath => {
+ return sema.fail(block, src, "embed of file outside package path: '{s}'", .{name});
+ },
+ else => {
+ // TODO: these errors are file system errors; make sure an update() will
+ // retry this and not cache the file system error, which may be transient.
+ return sema.fail(block, src, "unable to open '{s}': {s}", .{ name, @errorName(err) });
+ },
+ };
+
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
+
+ const bytes_including_null = embed_file.bytes[0 .. embed_file.bytes.len + 1];
+
+ // TODO instead of using `Value.Tag.bytes`, create a new value tag for pointing at
+ // a `*Module.EmbedFile`. The purpose of this would be:
+ // - If only the length is read and the bytes are not inspected by comptime code,
+ // there can be an optimization where the codegen backend does a copy_file_range
+ // into the final binary, and never loads the data into memory.
+ // - When a Decl is destroyed, it can free the `*Module.EmbedFile`.
+ embed_file.owner_decl = try anon_decl.finish(
+ try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), embed_file.bytes.len),
+ try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null),
+ );
+
+ return sema.analyzeDeclRef(embed_file.owner_decl);
+}
+
fn zirRetErrValueCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
_ = block;
_ = inst;
@@ -6627,8 +6903,42 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const tracy = trace(@src());
defer tracy.end();
- _ = inst;
- return sema.fail(block, sema.src, "TODO implement zirBitNot", .{});
+ const inst_data = sema.code.instructions.items(.data)[inst].un_node;
+ const src = inst_data.src();
+ const operand_src = src; // TODO put this on the operand, not the '~'
+
+ const operand = sema.resolveInst(inst_data.operand);
+ const operand_type = sema.typeOf(operand);
+ const scalar_type = operand_type.scalarType();
+
+ if (scalar_type.zigTypeTag() != .Int) {
+ return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{operand_type});
+ }
+
+ if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
+ const target = sema.mod.getTarget();
+ if (val.isUndef()) {
+ return sema.addConstUndef(scalar_type);
+ } else if (operand_type.zigTypeTag() == .Vector) {
+ const vec_len = operand_type.arrayLen();
+ var elem_val_buf: Value.ElemValueBuffer = undefined;
+ const elems = try sema.arena.alloc(Value, vec_len);
+ for (elems) |*elem, i| {
+ const elem_val = val.elemValueBuffer(i, &elem_val_buf);
+ elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, target);
+ }
+ return sema.addConstant(
+ operand_type,
+ try Value.Tag.array.create(sema.arena, elems),
+ );
+ } else {
+ const result_val = try val.bitwiseNot(scalar_type, sema.arena, target);
+ return sema.addConstant(scalar_type, result_val);
+ }
+ }
+
+ try sema.requireRuntimeBlock(block, src);
+ return block.addTyOp(.not, operand_type, operand);
}
fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -6666,11 +6976,11 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const final_len = lhs_info.len + rhs_info.len;
const final_len_including_sent = final_len + @boolToInt(res_sent != null);
const is_pointer = lhs_ty.zigTypeTag() == .Pointer;
+ const lhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val;
+ const rhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty)).? else rhs_val;
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
- const lhs_sub_val = if (is_pointer) (try lhs_val.pointerDeref(anon_decl.arena())).? else lhs_val;
- const rhs_sub_val = if (is_pointer) (try rhs_val.pointerDeref(anon_decl.arena())).? else rhs_val;
const buf = try anon_decl.arena().alloc(Value, final_len_including_sent);
{
var i: u64 = 0;
@@ -6690,18 +7000,20 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
buf[final_len] = try rs.copy(anon_decl.arena());
break :ty try Type.Tag.array_sentinel.create(anon_decl.arena(), .{
.len = final_len,
- .elem_type = lhs_info.elem_type,
- .sentinel = rs,
+ .elem_type = try lhs_info.elem_type.copy(anon_decl.arena()),
+ .sentinel = try rs.copy(anon_decl.arena()),
});
} else try Type.Tag.array.create(anon_decl.arena(), .{
.len = final_len,
- .elem_type = lhs_info.elem_type,
+ .elem_type = try lhs_info.elem_type.copy(anon_decl.arena()),
});
const val = try Value.Tag.array.create(anon_decl.arena(), buf);
- return if (is_pointer)
- sema.analyzeDeclRef(try anon_decl.finish(ty, val))
- else
- sema.analyzeDeclVal(block, .unneeded, try anon_decl.finish(ty, val));
+ const decl = try anon_decl.finish(ty, val);
+ if (is_pointer) {
+ return sema.analyzeDeclRef(decl);
+ } else {
+ return sema.analyzeDeclVal(block, .unneeded, decl);
+ }
} else {
return sema.fail(block, lhs_src, "TODO runtime array_cat", .{});
}
@@ -6735,40 +7047,43 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
// In `**` rhs has to be comptime-known, but lhs can be runtime-known
- const tomulby = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize);
+ const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize);
const mulinfo = getArrayCatInfo(lhs_ty) orelse
return sema.fail(block, lhs_src, "expected array, found '{}'", .{lhs_ty});
- const final_len = std.math.mul(u64, mulinfo.len, tomulby) catch
+ const final_len = std.math.mul(u64, mulinfo.len, factor) catch
return sema.fail(block, rhs_src, "operation results in overflow", .{});
const final_len_including_sent = final_len + @boolToInt(mulinfo.sentinel != null);
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
+ const lhs_sub_val = if (lhs_ty.zigTypeTag() == .Pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val;
+
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
- const lhs_sub_val = if (lhs_ty.zigTypeTag() == .Pointer) (try lhs_val.pointerDeref(anon_decl.arena())).? else lhs_val;
const final_ty = if (mulinfo.sentinel) |sent|
try Type.Tag.array_sentinel.create(anon_decl.arena(), .{
.len = final_len,
- .elem_type = mulinfo.elem_type,
- .sentinel = sent,
+ .elem_type = try mulinfo.elem_type.copy(anon_decl.arena()),
+ .sentinel = try sent.copy(anon_decl.arena()),
})
else
try Type.Tag.array.create(anon_decl.arena(), .{
.len = final_len,
- .elem_type = mulinfo.elem_type,
+ .elem_type = try mulinfo.elem_type.copy(anon_decl.arena()),
});
const buf = try anon_decl.arena().alloc(Value, final_len_including_sent);
- // handles the optimisation where arr.len == 0 : [_]T { X } ** N
+ // Optimization for the common pattern of a single element repeated N times, such
+ // as zero-filling a byte array.
const val = if (mulinfo.len == 1) blk: {
- const copied_val = try (try lhs_sub_val.elemValue(sema.arena, 0)).copy(anon_decl.arena());
+ const elem_val = try lhs_sub_val.elemValue(sema.arena, 0);
+ const copied_val = try elem_val.copy(anon_decl.arena());
break :blk try Value.Tag.repeated.create(anon_decl.arena(), copied_val);
} else blk: {
// the actual loop
var i: u64 = 0;
- while (i < tomulby) : (i += 1) {
+ while (i < factor) : (i += 1) {
var j: u64 = 0;
while (j < mulinfo.len) : (j += 1) {
const val = try lhs_sub_val.elemValue(sema.arena, j);
@@ -6780,10 +7095,11 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
break :blk try Value.Tag.array.create(anon_decl.arena(), buf);
};
+ const decl = try anon_decl.finish(final_ty, val);
if (lhs_ty.zigTypeTag() == .Pointer) {
- return sema.analyzeDeclRef(try anon_decl.finish(final_ty, val));
+ return sema.analyzeDeclRef(decl);
} else {
- return sema.analyzeDeclVal(block, .unneeded, try anon_decl.finish(final_ty, val));
+ return sema.analyzeDeclVal(block, .unneeded, decl);
}
}
return sema.fail(block, lhs_src, "TODO runtime array_mul", .{});
@@ -6872,7 +7188,6 @@ fn analyzeArithmetic(
if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize()) {
.One, .Slice => {},
.Many, .C => {
- // Pointer arithmetic.
const op_src = src; // TODO better source location
const air_tag: Air.Inst.Tag = switch (zir_tag) {
.add => .ptr_add,
@@ -6884,24 +7199,7 @@ fn analyzeArithmetic(
.{@tagName(zir_tag)},
),
};
- // TODO if the operand is comptime-known to be negative, or is a negative int,
- // coerce to isize instead of usize.
- const casted_rhs = try sema.coerce(block, Type.usize, rhs, rhs_src);
- const runtime_src = runtime_src: {
- if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
- if (try sema.resolveDefinedValue(block, rhs_src, casted_rhs)) |rhs_val| {
- _ = lhs_val;
- _ = rhs_val;
- return sema.fail(block, src, "TODO implement Sema for comptime pointer arithmetic", .{});
- } else {
- break :runtime_src rhs_src;
- }
- } else {
- break :runtime_src lhs_src;
- }
- };
- try sema.requireRuntimeBlock(block, runtime_src);
- return block.addBinOp(air_tag, lhs, casted_rhs);
+ return analyzePtrArithmetic(sema, block, op_src, lhs, rhs, air_tag, lhs_src, rhs_src);
},
};
@@ -7121,6 +7419,9 @@ fn analyzeArithmetic(
} else break :rs .{ .src = lhs_src, .air_tag = .sub_sat };
},
.div => {
+ // TODO: emit compile error when .div is used on integers and there would be an
+ // ambiguous result between div_floor and div_trunc.
+
// For integers:
// If the lhs is zero, then zero is returned regardless of rhs.
// If the rhs is zero, compile error for division by zero.
@@ -7130,9 +7431,11 @@ fn analyzeArithmetic(
// * if lhs type is signed:
// * if rhs is comptime-known and not -1, result is undefined
// * if rhs is -1 or runtime-known, compile error because there is a
- // possible value (-min_int * -1) for which division would be
+ // possible value (-min_int / -1) for which division would be
// illegal behavior.
// * if lhs type is unsigned, undef is returned regardless of rhs.
+ // TODO: emit runtime safety for division by zero
+ //
// For floats:
// If the rhs is zero, compile error for division by zero.
// If the rhs is undefined, compile error because there is a possible
@@ -7178,8 +7481,198 @@ fn analyzeArithmetic(
try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena),
);
}
- } else break :rs .{ .src = rhs_src, .air_tag = .div };
- } else break :rs .{ .src = lhs_src, .air_tag = .div };
+ } else {
+ if (is_int) {
+ break :rs .{ .src = rhs_src, .air_tag = .div_trunc };
+ } else {
+ break :rs .{ .src = rhs_src, .air_tag = .div_float };
+ }
+ }
+ } else {
+ if (is_int) {
+ break :rs .{ .src = lhs_src, .air_tag = .div_trunc };
+ } else {
+ break :rs .{ .src = lhs_src, .air_tag = .div_float };
+ }
+ }
+ },
+ .div_trunc => {
+ // For integers:
+ // If the lhs is zero, then zero is returned regardless of rhs.
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined:
+ // * if lhs type is signed:
+ // * if rhs is comptime-known and not -1, result is undefined
+ // * if rhs is -1 or runtime-known, compile error because there is a
+ // possible value (-min_int / -1) for which division would be
+ // illegal behavior.
+ // * if lhs type is unsigned, undef is returned regardless of rhs.
+ // TODO: emit runtime safety for division by zero
+ //
+ // For floats:
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined, result is undefined.
+ if (maybe_lhs_val) |lhs_val| {
+ if (!lhs_val.isUndef()) {
+ if (lhs_val.compareWithZero(.eq)) {
+ return sema.addConstant(scalar_type, Value.zero);
+ }
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) {
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) {
+ return sema.addConstUndef(scalar_type);
+ }
+ }
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ return sema.addConstUndef(scalar_type);
+ }
+
+ if (maybe_rhs_val) |rhs_val| {
+ if (is_int) {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.intDiv(rhs_val, sema.arena),
+ );
+ } else {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.floatDivTrunc(rhs_val, scalar_type, sema.arena),
+ );
+ }
+ } else break :rs .{ .src = rhs_src, .air_tag = .div_trunc };
+ } else break :rs .{ .src = lhs_src, .air_tag = .div_trunc };
+ },
+ .div_floor => {
+ // For integers:
+ // If the lhs is zero, then zero is returned regardless of rhs.
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined:
+ // * if lhs type is signed:
+ // * if rhs is comptime-known and not -1, result is undefined
+ // * if rhs is -1 or runtime-known, compile error because there is a
+ // possible value (-min_int / -1) for which division would be
+ // illegal behavior.
+ // * if lhs type is unsigned, undef is returned regardless of rhs.
+ // TODO: emit runtime safety for division by zero
+ //
+ // For floats:
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined, result is undefined.
+ if (maybe_lhs_val) |lhs_val| {
+ if (!lhs_val.isUndef()) {
+ if (lhs_val.compareWithZero(.eq)) {
+ return sema.addConstant(scalar_type, Value.zero);
+ }
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) {
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) {
+ return sema.addConstUndef(scalar_type);
+ }
+ }
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ return sema.addConstUndef(scalar_type);
+ }
+
+ if (maybe_rhs_val) |rhs_val| {
+ if (is_int) {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.intDivFloor(rhs_val, sema.arena),
+ );
+ } else {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.floatDivFloor(rhs_val, scalar_type, sema.arena),
+ );
+ }
+ } else break :rs .{ .src = rhs_src, .air_tag = .div_floor };
+ } else break :rs .{ .src = lhs_src, .air_tag = .div_floor };
+ },
+ .div_exact => {
+ // For integers:
+ // If the lhs is zero, then zero is returned regardless of rhs.
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined, compile error because there is a possible
+ // value for which the division would result in a remainder.
+ // TODO: emit runtime safety for if there is a remainder
+ // TODO: emit runtime safety for division by zero
+ //
+ // For floats:
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined, compile error because there is a possible
+ // value for which the division would result in a remainder.
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ } else {
+ if (lhs_val.compareWithZero(.eq)) {
+ return sema.addConstant(scalar_type, Value.zero);
+ }
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (maybe_rhs_val) |rhs_val| {
+ if (is_int) {
+ // TODO: emit compile error if there is a remainder
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.intDiv(rhs_val, sema.arena),
+ );
+ } else {
+ // TODO: emit compile error if there is a remainder
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena),
+ );
+ }
+ } else break :rs .{ .src = rhs_src, .air_tag = .div_exact };
+ } else break :rs .{ .src = lhs_src, .air_tag = .div_exact };
},
.mul => {
// For integers:
@@ -7505,6 +7998,51 @@ fn analyzeArithmetic(
return block.addBinOp(rs.air_tag, casted_lhs, casted_rhs);
}
+fn analyzePtrArithmetic(
+ sema: *Sema,
+ block: *Block,
+ op_src: LazySrcLoc,
+ ptr: Air.Inst.Ref,
+ uncasted_offset: Air.Inst.Ref,
+ air_tag: Air.Inst.Tag,
+ ptr_src: LazySrcLoc,
+ offset_src: LazySrcLoc,
+) CompileError!Air.Inst.Ref {
+ // TODO if the operand is comptime-known to be negative, or is a negative int,
+ // coerce to isize instead of usize.
+ const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src);
+ // TODO adjust the return type according to alignment and other factors
+ const runtime_src = rs: {
+ if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
+ if (try sema.resolveDefinedValue(block, offset_src, offset)) |offset_val| {
+ const ptr_ty = sema.typeOf(ptr);
+ const offset_int = offset_val.toUnsignedInt();
+ const new_ptr_ty = ptr_ty; // TODO modify alignment
+ if (ptr_val.getUnsignedInt()) |addr| {
+ const target = sema.mod.getTarget();
+ const elem_ty = ptr_ty.childType();
+ const elem_size = elem_ty.abiSize(target);
+ const new_addr = switch (air_tag) {
+ .ptr_add => addr + elem_size * offset_int,
+ .ptr_sub => addr - elem_size * offset_int,
+ else => unreachable,
+ };
+ const new_ptr_val = try Value.Tag.int_u64.create(sema.arena, new_addr);
+ return sema.addConstant(new_ptr_ty, new_ptr_val);
+ }
+ if (air_tag == .ptr_sub) {
+ return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{});
+ }
+ const new_ptr_val = try ptr_val.elemPtr(sema.arena, offset_int);
+ return sema.addConstant(new_ptr_ty, new_ptr_val);
+ } else break :rs offset_src;
+ } else break :rs ptr_src;
+ };
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+ return block.addBinOp(air_tag, ptr, offset);
+}
+
fn zirLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -7629,11 +8167,13 @@ fn zirCmpEq(
rhs_ty_tag == .Null and lhs_ty_tag == .Optional))
{
// comparing null with optionals
- const opt_operand = if (lhs_ty_tag == .Optional) lhs else rhs;
+ const opt_operand = if (lhs_ty_tag == .Null) rhs else lhs;
return sema.analyzeIsNull(block, src, opt_operand, op == .neq);
}
if (((lhs_ty_tag == .Null and rhs_ty.isCPtr()) or (rhs_ty_tag == .Null and lhs_ty.isCPtr()))) {
- return sema.fail(block, src, "TODO implement C pointer cmp", .{});
+ // comparing null with C pointers
+ const opt_operand = if (lhs_ty_tag == .Null) rhs else lhs;
+ return sema.analyzeIsNull(block, src, opt_operand, op == .neq);
}
if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) {
const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty;
@@ -8030,7 +8570,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// return_type: ?type,
field_values[4] = try Value.Tag.ty.create(sema.arena, ty.fnReturnType());
// args: []const FnArg,
- field_values[5] = Value.initTag(.null_value); // TODO
+ field_values[5] = Value.@"null"; // TODO
return sema.addConstant(
type_info_ty,
@@ -8088,7 +8628,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_allowzero: bool,
field_values[5] = if (info.@"allowzero") Value.initTag(.bool_true) else Value.initTag(.bool_false);
// sentinel: anytype,
- field_values[6] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.initTag(.null_value);
+ field_values[6] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.@"null";
return sema.addConstant(
type_info_ty,
@@ -8106,7 +8646,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// child: type,
field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type);
// sentinel: anytype,
- field_values[2] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.initTag(.null_value);
+ field_values[2] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.@"null";
return sema.addConstant(
type_info_ty,
@@ -8159,14 +8699,6 @@ fn zirTypeof(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return sema.addType(operand_ty);
}
-fn zirTypeofElem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- _ = block;
- const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const operand_ptr = sema.resolveInst(inst_data.operand);
- const elem_ty = sema.typeOf(operand_ptr).elemType();
- return sema.addType(elem_ty);
-}
-
fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
@@ -8237,12 +8769,13 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const bool_type = Type.initTag(.bool);
const operand = try sema.coerce(block, bool_type, uncasted_operand, operand_src);
- if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
- if (val.toBool()) {
- return Air.Inst.Ref.bool_false;
- } else {
- return Air.Inst.Ref.bool_true;
- }
+ if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
+ return if (val.isUndef())
+ sema.addConstUndef(bool_type)
+ else if (val.toBool())
+ Air.Inst.Ref.bool_false
+ else
+ Air.Inst.Ref.bool_true;
}
try sema.requireRuntimeBlock(block, src);
return block.addTyOp(.not, bool_type, operand);
@@ -8446,15 +8979,10 @@ fn zirUnreachable(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const inst_data = sema.code.instructions.items(.data)[inst].@"unreachable";
const src = inst_data.src();
- const safety_check = inst_data.safety;
try sema.requireRuntimeBlock(block, src);
// TODO Add compile error for @optimizeFor occurring too late in a scope.
- if (safety_check and block.wantSafety()) {
- return sema.safetyPanic(block, src, .unreach);
- } else {
- _ = try block.addNoOp(.unreach);
- return always_noreturn;
- }
+ try block.addUnreachable(src, inst_data.safety);
+ return always_noreturn;
}
fn zirRetErrValue(
@@ -8466,19 +8994,13 @@ fn zirRetErrValue(
const err_name = inst_data.get(sema.code);
const src = inst_data.src();
- // Add the error tag to the inferred error set of the in-scope function.
- if (sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) {
- if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| {
- _ = try payload.data.map.getOrPut(sema.gpa, err_name);
- }
- }
// Return the error code from the function.
const kv = try sema.mod.getErrorValue(err_name);
const result_inst = try sema.addConstant(
try Type.Tag.error_set_single.create(sema.arena, kv.key),
try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }),
);
- return sema.analyzeRet(block, result_inst, src, true);
+ return sema.analyzeRet(block, result_inst, src);
}
fn zirRetCoerce(
@@ -8493,7 +9015,7 @@ fn zirRetCoerce(
const operand = sema.resolveInst(inst_data.operand);
const src = inst_data.src();
- return sema.analyzeRet(block, operand, src, true);
+ return sema.analyzeRet(block, operand, src);
}
fn zirRetNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
@@ -8504,11 +9026,7 @@ fn zirRetNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir
const operand = sema.resolveInst(inst_data.operand);
const src = inst_data.src();
- // TODO: we pass false here for the `need_coercion` boolean, but I'm pretty sure we need
- // to remove this parameter entirely. Observe the problem by looking at the incorrect compile
- // error that occurs when a behavior test case being executed at comptime fails, e.g.
- // `test { comptime foo(); } fn foo() { try expect(false); }`
- return sema.analyzeRet(block, operand, src, false);
+ return sema.analyzeRet(block, operand, src);
}
fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
@@ -8521,7 +9039,7 @@ fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir
if (block.is_comptime or block.inlining != null) {
const operand = try sema.analyzeLoad(block, src, ret_ptr, src);
- return sema.analyzeRet(block, operand, src, false);
+ return sema.analyzeRet(block, operand, src);
}
try sema.requireRuntimeBlock(block, src);
_ = try block.addUnOp(.ret_load, ret_ptr);
@@ -8533,12 +9051,25 @@ fn analyzeRet(
block: *Block,
uncasted_operand: Air.Inst.Ref,
src: LazySrcLoc,
- need_coercion: bool,
) CompileError!Zir.Inst.Index {
- const operand = if (!need_coercion)
- uncasted_operand
- else
- try sema.coerce(block, sema.fn_ret_ty, uncasted_operand, src);
+ // Special case for returning an error to an inferred error set; we need to
+ // add the error tag to the inferred error set of the in-scope function, so
+ // that the coercion below works correctly.
+ if (sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) {
+ if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| {
+ const op_ty = sema.typeOf(uncasted_operand);
+ switch (op_ty.zigTypeTag()) {
+ .ErrorSet => {
+ try payload.data.addErrorSet(sema.gpa, op_ty);
+ },
+ .ErrorUnion => {
+ try payload.data.addErrorSet(sema.gpa, op_ty.errorUnionSet());
+ },
+ else => {},
+ }
+ }
+ }
+ const operand = try sema.coerce(block, sema.fn_ret_ty, uncasted_operand, src);
if (block.inlining) |inlining| {
if (block.is_comptime) {
@@ -8559,7 +9090,7 @@ fn analyzeRet(
fn floatOpAllowed(tag: Zir.Inst.Tag) bool {
// extend this swich as additional operators are implemented
return switch (tag) {
- .add, .sub, .mul, .div, .mod, .rem, .mod_rem => true,
+ .add, .sub, .mul, .div, .div_exact, .div_trunc, .div_floor, .mod, .rem, .mod_rem => true,
else => false,
};
}
@@ -8708,7 +9239,7 @@ fn zirStructInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool)
const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(field_type_extra.name_start);
const field_index = struct_obj.fields.getIndex(field_name) orelse
- return sema.failWithBadFieldAccess(block, struct_obj, field_src, field_name);
+ return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name);
if (found_fields[field_index] != 0) {
const other_field_type = found_fields[field_index];
const other_field_type_data = zir_datas[other_field_type].pl_node;
@@ -8789,8 +9320,9 @@ fn zirStructInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool)
const field_src: LazySrcLoc = .{ .node_offset_back2tok = field_type_data.src_node };
const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(field_type_extra.name_start);
- const field_index = union_obj.fields.getIndex(field_name) orelse
+ const field_index_usize = union_obj.fields.getIndex(field_name) orelse
return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name);
+ const field_index = @intCast(u32, field_index_usize);
if (is_ref) {
return sema.fail(block, src, "TODO: Sema.zirStructInit is_ref=true union", .{});
@@ -8798,12 +9330,10 @@ fn zirStructInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool)
const init_inst = sema.resolveInst(item.data.init);
if (try sema.resolveMaybeUndefVal(block, field_src, init_inst)) |val| {
+ const tag_val = try Value.Tag.enum_field_index.create(sema.arena, field_index);
return sema.addConstant(
resolved_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.int_u64.create(sema.arena, field_index),
- .val = val,
- }),
+ try Value.Tag.@"union".create(sema.arena, .{ .tag = tag_val, .val = val }),
);
}
return sema.fail(block, src, "TODO: Sema.zirStructInit for runtime-known union values", .{});
@@ -8917,7 +9447,7 @@ fn zirFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
.Struct => {
const struct_obj = resolved_ty.castTag(.@"struct").?.data;
const field = struct_obj.fields.get(field_name) orelse
- return sema.failWithBadFieldAccess(block, struct_obj, src, field_name);
+ return sema.failWithBadStructFieldAccess(block, struct_obj, src, field_name);
return sema.addType(field.ty);
},
.Union => {
@@ -8963,8 +9493,10 @@ fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ty = try sema.resolveType(block, operand_src, inst_data.operand);
+ const resolved_ty = try sema.resolveTypeFields(block, operand_src, ty);
+ try sema.resolveTypeLayout(block, operand_src, resolved_ty);
const target = sema.mod.getTarget();
- const abi_align = ty.abiAlignment(target);
+ const abi_align = resolved_ty.abiAlignment(target);
return sema.addIntUnsigned(Type.comptime_int, abi_align);
}
@@ -8980,12 +9512,6 @@ fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
return block.addUnOp(.bool_to_int, operand);
}
-fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirEmbedFile", .{});
-}
-
fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
@@ -9013,8 +9539,8 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src);
const val = try sema.resolveConstValue(block, operand_src, type_info);
const union_val = val.cast(Value.Payload.Union).?.data;
- const TypeInfoTag = std.meta.Tag(std.builtin.TypeInfo);
- const tag_index = @intCast(std.meta.Tag(TypeInfoTag), union_val.tag.toUnsignedInt());
+ const tag_ty = type_info_ty.unionTagType().?;
+ const tag_index = tag_ty.enumTagFieldIndex(union_val.tag).?;
switch (@intToEnum(std.builtin.TypeId, tag_index)) {
.Type => return Air.Inst.Ref.type_type,
.Void => return Air.Inst.Ref.void_type,
@@ -9103,8 +9629,8 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const operand = sema.resolveInst(extra.rhs);
const operand_ty = sema.typeOf(operand);
- _ = try sema.checkIntType(block, ty_src, dest_ty);
- try sema.checkFloatType(block, operand_src, operand_ty);
+ try sema.checkFloatType(block, ty_src, dest_ty);
+ _ = try sema.checkIntType(block, operand_src, operand_ty);
if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
const target = sema.mod.getTarget();
@@ -9169,7 +9695,7 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.addSafetyCheck(block, is_aligned, .incorrect_alignment);
}
}
- return block.addTyOp(.bitcast, type_res, operand_coerced);
+ return block.addBitCast(type_res, operand_coerced);
}
fn zirErrSetCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9196,10 +9722,7 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
@tagName(dest_ty.zigTypeTag()), dest_ty,
});
}
- if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
- return sema.addConstant(dest_ty, val);
- }
- return block.addTyOp(.bitcast, dest_ty, operand);
+ return sema.coerceCompatiblePtrs(block, dest_ty, operand, operand_src);
}
fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9219,14 +9742,18 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
const target = sema.mod.getTarget();
- const src_info = operand_ty.intInfo(target);
const dest_info = dest_ty.intInfo(target);
- if (src_info.bits == 0 or dest_info.bits == 0) {
- return sema.addConstant(dest_ty, Value.initTag(.zero));
+ if (dest_info.bits == 0) {
+ return sema.addConstant(dest_ty, Value.zero);
}
if (!src_is_comptime_int) {
+ const src_info = operand_ty.intInfo(target);
+ if (src_info.bits == 0) {
+ return sema.addConstant(dest_ty, Value.zero);
+ }
+
if (src_info.signedness != dest_info.signedness) {
return sema.fail(block, operand_src, "expected {s} integer type, found '{}'", .{
@tagName(dest_info.signedness), operand_ty,
@@ -9264,8 +9791,33 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirAlignCast", .{});
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
+ const align_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const dest_align = try sema.resolveAlign(block, align_src, extra.lhs);
+ const ptr = sema.resolveInst(extra.rhs);
+ const ptr_ty = sema.typeOf(ptr);
+
+ // TODO in addition to pointers, this instruction is supposed to work for
+ // pointer-like optionals and slices.
+ try sema.checkPtrType(block, ptr_src, ptr_ty);
+
+ // TODO compile error if the result pointer is comptime known and would have an
+ // alignment that disagrees with the Decl's alignment.
+
+ // TODO insert safety check that the alignment is correct
+
+ const ptr_info = ptr_ty.ptrInfo().data;
+ const dest_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = ptr_info.pointee_type,
+ .@"align" = dest_align,
+ .@"addrspace" = ptr_info.@"addrspace",
+ .mutable = ptr_info.mutable,
+ .@"allowzero" = ptr_info.@"allowzero",
+ .@"volatile" = ptr_info.@"volatile",
+ .size = ptr_info.size,
+ });
+ return sema.coerceCompatiblePtrs(block, dest_ty, ptr, ptr_src);
}
fn zirClz(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9340,24 +9892,6 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
return sema.fail(block, src, "TODO: Sema.zirBitReverse", .{});
}
-fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirDivExact", .{});
-}
-
-fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirDivFloor", .{});
-}
-
-fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirDivTrunc", .{});
-}
-
fn zirShrExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
@@ -9385,6 +9919,18 @@ fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileEr
}
}
+fn checkPtrType(
+ sema: *Sema,
+ block: *Block,
+ ty_src: LazySrcLoc,
+ ty: Type,
+) CompileError!void {
+ switch (ty.zigTypeTag()) {
+ .Pointer => {},
+ else => return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty}),
+ }
+}
+
fn checkFloatType(
sema: *Sema,
block: *Block,
@@ -9648,7 +10194,8 @@ fn zirCmpxchg(
const failure_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg5 = inst_data.src_node };
// zig fmt: on
const ptr = sema.resolveInst(extra.ptr);
- const elem_ty = sema.typeOf(ptr).elemType();
+ const ptr_ty = sema.typeOf(ptr);
+ const elem_ty = ptr_ty.elemType();
try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty);
if (elem_ty.zigTypeTag() == .Float) {
return sema.fail(
@@ -9680,7 +10227,7 @@ fn zirCmpxchg(
// special case zero bit types
if ((try sema.typeHasOnePossibleValue(block, elem_ty_src, elem_ty)) != null) {
- return sema.addConstant(result_ty, Value.initTag(.null_value));
+ return sema.addConstant(result_ty, Value.@"null");
}
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
@@ -9691,10 +10238,10 @@ fn zirCmpxchg(
// to become undef as well
return sema.addConstUndef(result_ty);
}
- const stored_val = (try ptr_val.pointerDeref(sema.arena)) orelse break :rs ptr_src;
+ const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
const result_val = if (stored_val.eql(expected_val, elem_ty)) blk: {
try sema.storePtr(block, src, ptr, new_value);
- break :blk Value.initTag(.null_value);
+ break :blk Value.@"null";
} else try Value.Tag.opt_payload.create(sema.arena, stored_val);
return sema.addConstant(result_ty, result_val);
@@ -9753,7 +10300,8 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
// zig fmt: on
const ptr = sema.resolveInst(extra.lhs);
- const elem_ty = sema.typeOf(ptr).elemType();
+ const ptr_ty = sema.typeOf(ptr);
+ const elem_ty = ptr_ty.elemType();
try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty);
const order = try sema.resolveAtomicOrder(block, order_src, extra.rhs);
@@ -9774,7 +10322,7 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
- if (try ptr_val.pointerDeref(sema.arena)) |elem_val| {
+ if (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) |elem_val| {
return sema.addConstant(elem_ty, elem_val);
}
}
@@ -9801,7 +10349,8 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node };
// zig fmt: on
const ptr = sema.resolveInst(extra.ptr);
- const operand_ty = sema.typeOf(ptr).elemType();
+ const ptr_ty = sema.typeOf(ptr);
+ const operand_ty = ptr_ty.elemType();
try sema.checkAtomicOperandType(block, operand_ty_src, operand_ty);
const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation);
@@ -9838,7 +10387,7 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
};
if (ptr_val.isComptimeMutablePtr()) {
const target = sema.mod.getTarget();
- const stored_val = (try ptr_val.pointerDeref(sema.arena)) orelse break :rs ptr_src;
+ const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
const new_val = switch (op) {
// zig fmt: off
.Xchg => operand_val,
@@ -10152,7 +10701,7 @@ fn zirVarExtended(
// extra_index += 1;
// const align_tv = try sema.resolveInstConst(block, align_src, align_ref);
// break :blk align_tv.val;
- //} else Value.initTag(.null_value);
+ //} else Value.@"null";
const uncasted_init: Air.Inst.Ref = if (small.has_init) blk: {
const init_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
@@ -10234,7 +10783,7 @@ fn zirFuncExtended(
extra_index += 1;
const align_tv = try sema.resolveInstConst(block, align_src, align_ref);
break :blk align_tv.val;
- } else Value.initTag(.null_value);
+ } else Value.@"null";
const ret_ty_body = sema.code.extra[extra_index..][0..extra.data.ret_body_len];
extra_index += ret_ty_body.len;
@@ -10519,7 +11068,7 @@ fn panicWithMsg(
});
const null_stack_trace = try sema.addConstant(
try Type.optional(arena, ptr_stack_trace_ty),
- Value.initTag(.null_value),
+ Value.@"null",
);
const args = try arena.create([2]Air.Inst.Ref);
args.* = .{ msg_inst, null_stack_trace };
@@ -10580,12 +11129,22 @@ fn fieldVal(
const object_src = src; // TODO better source location
const object_ty = sema.typeOf(object);
- switch (object_ty.zigTypeTag()) {
+ // Zig allows dereferencing a single pointer during field lookup. Note that
+ // we don't actually need to generate the dereference some field lookups, like the
+ // length of arrays and other comptime operations.
+ const is_pointer_to = object_ty.isSinglePointer();
+
+ const inner_ty = if (is_pointer_to)
+ object_ty.childType()
+ else
+ object_ty;
+
+ switch (inner_ty.zigTypeTag()) {
.Array => {
if (mem.eql(u8, field_name, "len")) {
return sema.addConstant(
Type.initTag(.comptime_int),
- try Value.Tag.int_u64.create(arena, object_ty.arrayLen()),
+ try Value.Tag.int_u64.create(arena, inner_ty.arrayLen()),
);
} else {
return sema.fail(
@@ -10596,75 +11155,40 @@ fn fieldVal(
);
}
},
- .Pointer => switch (object_ty.ptrSize()) {
- .Slice => {
- if (mem.eql(u8, field_name, "ptr")) {
- const buf = try arena.create(Type.SlicePtrFieldTypeBuffer);
- const result_ty = object_ty.slicePtrFieldType(buf);
- if (try sema.resolveMaybeUndefVal(block, object_src, object)) |val| {
- if (val.isUndef()) return sema.addConstUndef(result_ty);
- return sema.addConstant(result_ty, val.slicePtr());
- }
- try sema.requireRuntimeBlock(block, src);
- return block.addTyOp(.slice_ptr, result_ty, object);
- } else if (mem.eql(u8, field_name, "len")) {
- const result_ty = Type.usize;
- if (try sema.resolveMaybeUndefVal(block, object_src, object)) |val| {
- if (val.isUndef()) return sema.addConstUndef(result_ty);
- return sema.addConstant(
- result_ty,
- try Value.Tag.int_u64.create(arena, val.sliceLen()),
- );
- }
- try sema.requireRuntimeBlock(block, src);
- return block.addTyOp(.slice_len, result_ty, object);
- } else {
- return sema.fail(
- block,
- field_name_src,
- "no member named '{s}' in '{}'",
- .{ field_name, object_ty },
- );
- }
- },
- .One => {
- const ptr_child = object_ty.elemType();
- switch (ptr_child.zigTypeTag()) {
- .Array => {
- if (mem.eql(u8, field_name, "len")) {
- return sema.addConstant(
- Type.initTag(.comptime_int),
- try Value.Tag.int_u64.create(arena, ptr_child.arrayLen()),
- );
- } else {
- return sema.fail(
- block,
- field_name_src,
- "no member named '{s}' in '{}'",
- .{ field_name, object_ty },
- );
- }
- },
- .Struct => {
- const struct_ptr_deref = try sema.analyzeLoad(block, src, object, object_src);
- return sema.unionFieldVal(block, src, struct_ptr_deref, field_name, field_name_src, ptr_child);
- },
- .Union => {
- const union_ptr_deref = try sema.analyzeLoad(block, src, object, object_src);
- return sema.unionFieldVal(block, src, union_ptr_deref, field_name, field_name_src, ptr_child);
- },
- else => {},
- }
- },
- .Many, .C => {},
+ .Pointer => if (inner_ty.isSlice()) {
+ if (mem.eql(u8, field_name, "ptr")) {
+ const slice = if (is_pointer_to)
+ try sema.analyzeLoad(block, src, object, object_src)
+ else
+ object;
+ return sema.analyzeSlicePtr(block, src, slice, inner_ty, object_src);
+ } else if (mem.eql(u8, field_name, "len")) {
+ const slice = if (is_pointer_to)
+ try sema.analyzeLoad(block, src, object, object_src)
+ else
+ object;
+ return sema.analyzeSliceLen(block, src, slice);
+ } else {
+ return sema.fail(
+ block,
+ field_name_src,
+ "no member named '{s}' in '{}'",
+ .{ field_name, object_ty },
+ );
+ }
},
.Type => {
- const val = (try sema.resolveDefinedValue(block, object_src, object)).?;
+ const dereffed_type = if (is_pointer_to)
+ try sema.analyzeLoad(block, src, object, object_src)
+ else
+ object;
+
+ const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?;
var to_type_buffer: Value.ToTypeBuffer = undefined;
const child_type = val.toType(&to_type_buffer);
+
switch (child_type.zigTypeTag()) {
.ErrorSet => {
- // TODO resolve inferred error sets
const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: {
const error_set = payload.data;
// TODO this is O(N). I'm putting off solving this until we solve inferred
@@ -10685,10 +11209,39 @@ fn fieldVal(
try Value.Tag.@"error".create(arena, .{ .name = name }),
);
},
- .Struct, .Opaque, .Union => {
+ .Union => {
if (child_type.getNamespace()) |namespace| {
- if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
- return sema.analyzeLoad(block, src, inst, src);
+ if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| {
+ return inst;
+ }
+ }
+ if (child_type.unionTagType()) |enum_ty| {
+ if (enum_ty.enumFieldIndex(field_name)) |field_index_usize| {
+ const field_index = @intCast(u32, field_index_usize);
+ return sema.addConstant(
+ enum_ty,
+ try Value.Tag.enum_field_index.create(sema.arena, field_index),
+ );
+ }
+ }
+ return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
+ },
+ .Enum => {
+ if (child_type.getNamespace()) |namespace| {
+ if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| {
+ return inst;
+ }
+ }
+ const field_index_usize = child_type.enumFieldIndex(field_name) orelse
+ return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
+ const field_index = @intCast(u32, field_index_usize);
+ const enum_val = try Value.Tag.enum_field_index.create(arena, field_index);
+ return sema.addConstant(try child_type.copy(arena), enum_val);
+ },
+ .Struct, .Opaque => {
+ if (child_type.getNamespace()) |namespace| {
+ if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| {
+ return inst;
}
}
// TODO add note: declared here
@@ -10702,40 +11255,23 @@ fn fieldVal(
kw_name, child_type, field_name,
});
},
- .Enum => {
- if (child_type.getNamespace()) |namespace| {
- if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
- return sema.analyzeLoad(block, src, inst, src);
- }
- }
- const field_index = child_type.enumFieldIndex(field_name) orelse {
- const msg = msg: {
- const msg = try sema.errMsg(
- block,
- src,
- "enum '{}' has no member named '{s}'",
- .{ child_type, field_name },
- );
- errdefer msg.destroy(sema.gpa);
- try sema.mod.errNoteNonLazy(
- child_type.declSrcLoc(),
- msg,
- "enum declared here",
- .{},
- );
- break :msg msg;
- };
- return sema.failWithOwnedErrorMsg(msg);
- };
- const field_index_u32 = @intCast(u32, field_index);
- const enum_val = try Value.Tag.enum_field_index.create(arena, field_index_u32);
- return sema.addConstant(try child_type.copy(arena), enum_val);
- },
else => return sema.fail(block, src, "type '{}' has no members", .{child_type}),
}
},
- .Struct => return sema.structFieldVal(block, src, object, field_name, field_name_src, object_ty),
- .Union => return sema.unionFieldVal(block, src, object, field_name, field_name_src, object_ty),
+ .Struct => if (is_pointer_to) {
+ // Avoid loading the entire struct by fetching a pointer and loading that
+ const field_ptr = try sema.structFieldPtr(block, src, object, field_name, field_name_src, inner_ty);
+ return sema.analyzeLoad(block, src, field_ptr, object_src);
+ } else {
+ return sema.structFieldVal(block, src, object, field_name, field_name_src, inner_ty);
+ },
+ .Union => if (is_pointer_to) {
+ // Avoid loading the entire union by fetching a pointer and loading that
+ const field_ptr = try sema.unionFieldPtr(block, src, object, field_name, field_name_src, inner_ty);
+ return sema.analyzeLoad(block, src, field_ptr, object_src);
+ } else {
+ return sema.unionFieldVal(block, src, object, field_name, field_name_src, inner_ty);
+ },
else => {},
}
return sema.fail(block, src, "type '{}' does not support field access", .{object_ty});
@@ -10758,14 +11294,25 @@ fn fieldPtr(
.Pointer => object_ptr_ty.elemType(),
else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty}),
};
- switch (object_ty.zigTypeTag()) {
+
+ // Zig allows dereferencing a single pointer during field lookup. Note that
+ // we don't actually need to generate the dereference some field lookups, like the
+ // length of arrays and other comptime operations.
+ const is_pointer_to = object_ty.isSinglePointer();
+
+ const inner_ty = if (is_pointer_to)
+ object_ty.childType()
+ else
+ object_ty;
+
+ switch (inner_ty.zigTypeTag()) {
.Array => {
if (mem.eql(u8, field_name, "len")) {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
Type.initTag(.comptime_int),
- try Value.Tag.int_u64.create(anon_decl.arena(), object_ty.arrayLen()),
+ try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen()),
));
} else {
return sema.fail(
@@ -10776,77 +11323,74 @@ fn fieldPtr(
);
}
},
- .Pointer => switch (object_ty.ptrSize()) {
- .Slice => {
- // Here for the ptr and len fields what we need to do is the situation
- // when a temporary has its address taken, e.g. `&a[c..d].len`.
- // This value may be known at compile-time or runtime. In the former
- // case, it should create an anonymous Decl and return a decl_ref to it.
- // In the latter case, it should add an `alloc` instruction, store
- // the runtime value to it, and then return the `alloc`.
- // In both cases the pointer should be const.
- if (mem.eql(u8, field_name, "ptr")) {
- return sema.fail(
- block,
- field_name_src,
- "TODO: implement reference to 'ptr' field of slice '{}'",
- .{object_ty},
- );
- } else if (mem.eql(u8, field_name, "len")) {
- return sema.fail(
- block,
- field_name_src,
- "TODO: implement reference to 'len' field of slice '{}'",
- .{object_ty},
- );
- } else {
- return sema.fail(
- block,
- field_name_src,
- "no member named '{s}' in '{}'",
- .{ field_name, object_ty },
- );
+ .Pointer => if (inner_ty.isSlice()) {
+ const inner_ptr = if (is_pointer_to)
+ try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
+ else
+ object_ptr;
+
+ if (mem.eql(u8, field_name, "ptr")) {
+ const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer);
+ const slice_ptr_ty = inner_ty.slicePtrFieldType(buf);
+
+ if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
+
+ return sema.analyzeDeclRef(try anon_decl.finish(
+ try slice_ptr_ty.copy(anon_decl.arena()),
+ try val.slicePtr().copy(anon_decl.arena()),
+ ));
}
- },
- .One => {
- const ptr_child = object_ty.elemType();
- switch (ptr_child.zigTypeTag()) {
- .Array => {
- if (mem.eql(u8, field_name, "len")) {
- var anon_decl = try block.startAnonDecl();
- defer anon_decl.deinit();
- return sema.analyzeDeclRef(try anon_decl.finish(
- Type.initTag(.comptime_int),
- try Value.Tag.int_u64.create(anon_decl.arena(), ptr_child.arrayLen()),
- ));
- } else {
- return sema.fail(
- block,
- field_name_src,
- "no member named '{s}' in '{}'",
- .{ field_name, object_ty },
- );
- }
- },
- .Struct => {
- const struct_ptr_deref = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src);
- return sema.structFieldPtr(block, src, struct_ptr_deref, field_name, field_name_src, ptr_child);
- },
- .Union => {
- const union_ptr_deref = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src);
- return sema.unionFieldPtr(block, src, union_ptr_deref, field_name, field_name_src, ptr_child);
- },
- else => {},
+ try sema.requireRuntimeBlock(block, src);
+
+ const result_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = slice_ptr_ty,
+ .mutable = object_ptr_ty.ptrIsMutable(),
+ .@"addrspace" = object_ptr_ty.ptrAddressSpace(),
+ });
+
+ return block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr);
+ } else if (mem.eql(u8, field_name, "len")) {
+ if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
+
+ return sema.analyzeDeclRef(try anon_decl.finish(
+ Type.usize,
+ try Value.Tag.int_u64.create(anon_decl.arena(), val.sliceLen()),
+ ));
}
- },
- .Many, .C => {},
+ try sema.requireRuntimeBlock(block, src);
+
+ const result_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = Type.usize,
+ .mutable = object_ptr_ty.ptrIsMutable(),
+ .@"addrspace" = object_ptr_ty.ptrAddressSpace(),
+ });
+
+ return block.addTyOp(.ptr_slice_len_ptr, result_ty, inner_ptr);
+ } else {
+ return sema.fail(
+ block,
+ field_name_src,
+ "no member named '{s}' in '{}'",
+ .{ field_name, object_ty },
+ );
+ }
},
.Type => {
_ = try sema.resolveConstValue(block, object_ptr_src, object_ptr);
const result = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src);
- const val = (sema.resolveDefinedValue(block, src, result) catch unreachable).?;
+ const inner = if (is_pointer_to)
+ try sema.analyzeLoad(block, src, result, object_ptr_src)
+ else
+ result;
+
+ const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?;
var to_type_buffer: Value.ToTypeBuffer = undefined;
const child_type = val.toType(&to_type_buffer);
+
switch (child_type.zigTypeTag()) {
.ErrorSet => {
// TODO resolve inferred error sets
@@ -10872,22 +11416,24 @@ fn fieldPtr(
try Value.Tag.@"error".create(anon_decl.arena(), .{ .name = name }),
));
},
- .Struct, .Opaque, .Union => {
+ .Union => {
if (child_type.getNamespace()) |namespace| {
if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
return inst;
}
}
- // TODO add note: declared here
- const kw_name = switch (child_type.zigTypeTag()) {
- .Struct => "struct",
- .Opaque => "opaque",
- .Union => "union",
- else => unreachable,
- };
- return sema.fail(block, src, "{s} '{}' has no member named '{s}'", .{
- kw_name, child_type, field_name,
- });
+ if (child_type.unionTagType()) |enum_ty| {
+ if (enum_ty.enumFieldIndex(field_name)) |field_index| {
+ const field_index_u32 = @intCast(u32, field_index);
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
+ return sema.analyzeDeclRef(try anon_decl.finish(
+ try enum_ty.copy(anon_decl.arena()),
+ try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32),
+ ));
+ }
+ }
+ return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
},
.Enum => {
if (child_type.getNamespace()) |namespace| {
@@ -10896,23 +11442,7 @@ fn fieldPtr(
}
}
const field_index = child_type.enumFieldIndex(field_name) orelse {
- const msg = msg: {
- const msg = try sema.errMsg(
- block,
- src,
- "enum '{}' has no member named '{s}'",
- .{ child_type, field_name },
- );
- errdefer msg.destroy(sema.gpa);
- try sema.mod.errNoteNonLazy(
- child_type.declSrcLoc(),
- msg,
- "enum declared here",
- .{},
- );
- break :msg msg;
- };
- return sema.failWithOwnedErrorMsg(msg);
+ return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
};
const field_index_u32 = @intCast(u32, field_index);
var anon_decl = try block.startAnonDecl();
@@ -10922,14 +11452,34 @@ fn fieldPtr(
try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32),
));
},
+ .Struct, .Opaque => {
+ if (child_type.getNamespace()) |namespace| {
+ if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
+ return inst;
+ }
+ }
+ return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
+ },
else => return sema.fail(block, src, "type '{}' has no members", .{child_type}),
}
},
- .Struct => return sema.structFieldPtr(block, src, object_ptr, field_name, field_name_src, object_ty),
- .Union => return sema.unionFieldPtr(block, src, object_ptr, field_name, field_name_src, object_ty),
+ .Struct => {
+ const inner_ptr = if (is_pointer_to)
+ try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
+ else
+ object_ptr;
+ return sema.structFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty);
+ },
+ .Union => {
+ const inner_ptr = if (is_pointer_to)
+ try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
+ else
+ object_ptr;
+ return sema.unionFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty);
+ },
else => {},
}
- return sema.fail(block, src, "type '{}' does not support field access", .{object_ty});
+ return sema.fail(block, src, "type '{}' does not support field access (fieldPtr, {}.{s})", .{ object_ty, object_ptr_ty, field_name });
}
fn fieldCallBind(
@@ -11084,6 +11634,17 @@ fn namespaceLookupRef(
return try sema.analyzeDeclRef(decl);
}
+fn namespaceLookupVal(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ namespace: *Namespace,
+ decl_name: []const u8,
+) CompileError!?Air.Inst.Ref {
+ const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null;
+ return try sema.analyzeDeclVal(block, src, decl);
+}
+
fn structFieldPtr(
sema: *Sema,
block: *Block,
@@ -11101,7 +11662,7 @@ fn structFieldPtr(
const struct_obj = struct_ty.castTag(.@"struct").?.data;
const field_index_big = struct_obj.fields.getIndex(field_name) orelse
- return sema.failWithBadFieldAccess(block, struct_obj, field_name_src, field_name);
+ return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
const field_index = @intCast(u32, field_index_big);
const field = struct_obj.fields.values()[field_index];
const ptr_field_ty = try Type.ptr(arena, .{
@@ -11138,8 +11699,9 @@ fn structFieldVal(
const struct_ty = try sema.resolveTypeFields(block, src, unresolved_struct_ty);
const struct_obj = struct_ty.castTag(.@"struct").?.data;
- const field_index = struct_obj.fields.getIndex(field_name) orelse
- return sema.failWithBadFieldAccess(block, struct_obj, field_name_src, field_name);
+ const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
+ return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
+ const field_index = @intCast(u32, field_index_usize);
const field = struct_obj.fields.values()[field_index];
if (try sema.resolveMaybeUndefVal(block, src, struct_byval)) |struct_val| {
@@ -11150,16 +11712,7 @@ fn structFieldVal(
}
try sema.requireRuntimeBlock(block, src);
- return block.addInst(.{
- .tag = .struct_field_val,
- .data = .{ .ty_pl = .{
- .ty = try sema.addType(field.ty),
- .payload = try sema.addExtra(Air.StructField{
- .struct_operand = struct_byval,
- .field_index = @intCast(u32, field_index),
- }),
- } },
- });
+ return block.addStructFieldVal(struct_byval, field_index, field.ty);
}
fn unionFieldPtr(
@@ -11218,9 +11771,9 @@ fn unionFieldVal(
const union_ty = try sema.resolveTypeFields(block, src, unresolved_union_ty);
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
- const field_index = union_obj.fields.getIndex(field_name) orelse
+ const field_index_usize = union_obj.fields.getIndex(field_name) orelse
return sema.failWithBadUnionFieldAccess(block, union_obj, field_name_src, field_name);
-
+ const field_index = @intCast(u32, field_index_usize);
const field = union_obj.fields.values()[field_index];
if (try sema.resolveMaybeUndefVal(block, src, union_byval)) |union_val| {
@@ -11232,7 +11785,7 @@ fn unionFieldVal(
}
try sema.requireRuntimeBlock(block, src);
- return sema.fail(block, src, "TODO implement runtime union field access", .{});
+ return block.addStructFieldVal(union_byval, field_index, field.ty);
}
fn elemPtr(
@@ -11250,135 +11803,124 @@ fn elemPtr(
else => return sema.fail(block, array_ptr_src, "expected pointer, found '{}'", .{array_ptr_ty}),
};
if (!array_ty.isIndexable()) {
- return sema.fail(block, src, "array access of non-array type '{}'", .{array_ty});
- }
- if (array_ty.isSinglePointer() and array_ty.elemType().zigTypeTag() == .Array) {
- // we have to deref the ptr operand to get the actual array pointer
- const array_ptr_deref = try sema.analyzeLoad(block, src, array_ptr, array_ptr_src);
- return sema.elemPtrArray(block, src, array_ptr_deref, elem_index, elem_index_src);
- }
- if (array_ty.zigTypeTag() == .Array) {
- return sema.elemPtrArray(block, src, array_ptr, elem_index, elem_index_src);
+ return sema.fail(block, src, "array access of non-indexable type '{}'", .{array_ty});
}
- return sema.fail(block, src, "TODO implement more analyze elemptr", .{});
+ switch (array_ty.zigTypeTag()) {
+ .Pointer => {
+ // In all below cases, we have to deref the ptr operand to get the actual array pointer.
+ const array = try sema.analyzeLoad(block, array_ptr_src, array_ptr, array_ptr_src);
+ const result_ty = try array_ty.elemPtrType(sema.arena);
+ switch (array_ty.ptrSize()) {
+ .Slice => {
+ const maybe_slice_val = try sema.resolveDefinedValue(block, array_ptr_src, array);
+ const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
+ const runtime_src = if (maybe_slice_val) |slice_val| rs: {
+ const index_val = maybe_index_val orelse break :rs elem_index_src;
+ const index = @intCast(usize, index_val.toUnsignedInt());
+ const elem_ptr = try slice_val.elemPtr(sema.arena, index);
+ return sema.addConstant(result_ty, elem_ptr);
+ } else array_ptr_src;
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+ return block.addSliceElemPtr(array, elem_index, result_ty);
+ },
+ .Many, .C => {
+ const maybe_ptr_val = try sema.resolveDefinedValue(block, array_ptr_src, array);
+ const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
+
+ const runtime_src = rs: {
+ const ptr_val = maybe_ptr_val orelse break :rs array_ptr_src;
+ const index_val = maybe_index_val orelse break :rs elem_index_src;
+ const index = @intCast(usize, index_val.toUnsignedInt());
+ const elem_ptr = try ptr_val.elemPtr(sema.arena, index);
+ return sema.addConstant(result_ty, elem_ptr);
+ };
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+ return block.addPtrElemPtr(array, elem_index, result_ty);
+ },
+ .One => {
+ assert(array_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable
+ return sema.elemPtrArray(block, array_ptr_src, array, elem_index, elem_index_src);
+ },
+ }
+ },
+ .Array => return sema.elemPtrArray(block, array_ptr_src, array_ptr, elem_index, elem_index_src),
+ .Vector => return sema.fail(block, src, "TODO implement Sema for elemPtr for vector", .{}),
+ else => unreachable,
+ }
}
fn elemVal(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
- array_maybe_ptr: Air.Inst.Ref,
+ array: Air.Inst.Ref,
elem_index: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const array_ptr_src = src; // TODO better source location
- const maybe_ptr_ty = sema.typeOf(array_maybe_ptr);
- switch (maybe_ptr_ty.zigTypeTag()) {
- .Pointer => switch (maybe_ptr_ty.ptrSize()) {
+ const array_src = src; // TODO better source location
+ const array_ty = sema.typeOf(array);
+
+ if (!array_ty.isIndexable()) {
+ return sema.fail(block, src, "array access of non-indexable type '{}'", .{array_ty});
+ }
+
+ switch (array_ty.zigTypeTag()) {
+ .Pointer => switch (array_ty.ptrSize()) {
.Slice => {
- const maybe_slice_val = try sema.resolveDefinedValue(block, array_ptr_src, array_maybe_ptr);
+ const maybe_slice_val = try sema.resolveDefinedValue(block, array_src, array);
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
const runtime_src = if (maybe_slice_val) |slice_val| rs: {
const index_val = maybe_index_val orelse break :rs elem_index_src;
const index = @intCast(usize, index_val.toUnsignedInt());
const elem_val = try slice_val.elemValue(sema.arena, index);
- return sema.addConstant(maybe_ptr_ty.elemType2(), elem_val);
- } else array_ptr_src;
+ return sema.addConstant(array_ty.elemType2(), elem_val);
+ } else array_src;
try sema.requireRuntimeBlock(block, runtime_src);
- return block.addBinOp(.slice_elem_val, array_maybe_ptr, elem_index);
+ return block.addBinOp(.slice_elem_val, array, elem_index);
},
.Many, .C => {
- if (try sema.resolveDefinedValue(block, src, array_maybe_ptr)) |ptr_val| {
- _ = ptr_val;
- return sema.fail(block, src, "TODO implement Sema for elemVal for comptime known pointer", .{});
- }
- try sema.requireRuntimeBlock(block, src);
- return block.addBinOp(.ptr_elem_val, array_maybe_ptr, elem_index);
+ const maybe_ptr_val = try sema.resolveDefinedValue(block, array_src, array);
+ const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
+
+ const runtime_src = rs: {
+ const ptr_val = maybe_ptr_val orelse break :rs array_src;
+ const index_val = maybe_index_val orelse break :rs elem_index_src;
+ const index = @intCast(usize, index_val.toUnsignedInt());
+ const maybe_array_val = try sema.pointerDeref(block, array_src, ptr_val, array_ty);
+ const array_val = maybe_array_val orelse break :rs array_src;
+ const elem_val = try array_val.elemValue(sema.arena, index);
+ return sema.addConstant(array_ty.elemType2(), elem_val);
+ };
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+ return block.addBinOp(.ptr_elem_val, array, elem_index);
},
.One => {
- const indexable_ty = maybe_ptr_ty.childType();
- switch (indexable_ty.zigTypeTag()) {
- .Pointer => switch (indexable_ty.ptrSize()) {
- .Slice => {
- // We have a pointer to a slice and we want an element value.
- if (try sema.isComptimeKnown(block, src, array_maybe_ptr)) {
- const slice = try sema.analyzeLoad(block, src, array_maybe_ptr, array_ptr_src);
- if (try sema.resolveDefinedValue(block, src, slice)) |slice_val| {
- _ = slice_val;
- return sema.fail(block, src, "TODO implement Sema for elemVal for comptime known slice", .{});
- }
- try sema.requireRuntimeBlock(block, src);
- return block.addBinOp(.slice_elem_val, slice, elem_index);
- }
- try sema.requireRuntimeBlock(block, src);
- return block.addBinOp(.ptr_slice_elem_val, array_maybe_ptr, elem_index);
- },
- .Many, .C => {
- // We have a pointer to a pointer and we want an element value.
- if (try sema.isComptimeKnown(block, src, array_maybe_ptr)) {
- const ptr = try sema.analyzeLoad(block, src, array_maybe_ptr, array_ptr_src);
- if (try sema.resolveDefinedValue(block, src, ptr)) |ptr_val| {
- _ = ptr_val;
- return sema.fail(block, src, "TODO implement Sema for elemVal for comptime known pointer", .{});
- }
- try sema.requireRuntimeBlock(block, src);
- return block.addBinOp(.ptr_elem_val, ptr, elem_index);
- }
- try sema.requireRuntimeBlock(block, src);
- return block.addBinOp(.ptr_ptr_elem_val, array_maybe_ptr, elem_index);
- },
- .One => {
- const array_ty = indexable_ty.childType();
- if (array_ty.zigTypeTag() == .Array) {
- // We have a double pointer to an array, and we want an element
- // value. This can happen with this code for example:
- // var a: *[1]u8 = undefined; _ = a[0];
- const array_ptr = try sema.analyzeLoad(block, src, array_maybe_ptr, array_ptr_src);
- const ptr = try sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src);
- return sema.analyzeLoad(block, src, ptr, elem_index_src);
- } else return sema.fail(
- block,
- array_ptr_src,
- "expected pointer, found '{}'",
- .{array_ty},
- );
- },
- },
- .Array => {
- const ptr = try sema.elemPtr(block, src, array_maybe_ptr, elem_index, elem_index_src);
- return sema.analyzeLoad(block, src, ptr, elem_index_src);
- },
- else => return sema.fail(
- block,
- array_ptr_src,
- "expected pointer, found '{}'",
- .{indexable_ty},
- ),
- }
+ assert(array_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable
+ const elem_ptr = try sema.elemPtr(block, array_src, array, elem_index, elem_index_src);
+ return sema.analyzeLoad(block, array_src, elem_ptr, elem_index_src);
},
},
.Array => {
- if (try sema.resolveMaybeUndefVal(block, src, array_maybe_ptr)) |array_val| {
- const elem_ty = maybe_ptr_ty.childType();
- const opt_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
+ if (try sema.resolveMaybeUndefVal(block, array_src, array)) |array_val| {
+ const elem_ty = array_ty.childType();
if (array_val.isUndef()) return sema.addConstUndef(elem_ty);
- if (opt_index_val) |index_val| {
+ const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
+ if (maybe_index_val) |index_val| {
const index = @intCast(usize, index_val.toUnsignedInt());
const elem_val = try array_val.elemValue(sema.arena, index);
return sema.addConstant(elem_ty, elem_val);
}
}
- try sema.requireRuntimeBlock(block, src);
- return block.addBinOp(.array_elem_val, array_maybe_ptr, elem_index);
+ try sema.requireRuntimeBlock(block, array_src);
+ return block.addBinOp(.array_elem_val, array, elem_index);
},
- else => return sema.fail(
- block,
- array_ptr_src,
- "expected pointer or array; found '{}'",
- .{maybe_ptr_ty},
- ),
+ .Vector => return sema.fail(block, array_src, "TODO implement Sema for elemVal for vector", .{}),
+ else => unreachable,
}
}
@@ -11391,12 +11933,7 @@ fn elemPtrArray(
elem_index_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const array_ptr_ty = sema.typeOf(array_ptr);
- const pointee_type = array_ptr_ty.elemType().elemType();
- const result_ty = try Type.ptr(sema.arena, .{
- .pointee_type = pointee_type,
- .mutable = array_ptr_ty.ptrIsMutable(),
- .@"addrspace" = array_ptr_ty.ptrAddressSpace(),
- });
+ const result_ty = try array_ptr_ty.elemPtrType(sema.arena);
if (try sema.resolveDefinedValue(block, src, array_ptr)) |array_ptr_val| {
if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| {
@@ -11410,16 +11947,7 @@ fn elemPtrArray(
}
// TODO safety check for array bounds
try sema.requireRuntimeBlock(block, src);
- return block.addInst(.{
- .tag = .ptr_elem_ptr,
- .data = .{ .ty_pl = .{
- .ty = try sema.addType(result_ty),
- .payload = try sema.addExtra(Air.Bin{
- .lhs = array_ptr,
- .rhs = elem_index,
- }),
- } },
- });
+ return block.addPtrElemPtr(array_ptr, elem_index, result_ty);
}
fn coerce(
@@ -11452,7 +11980,7 @@ fn coerce(
return sema.addConstant(dest_ty, val);
}
try sema.requireRuntimeBlock(block, inst_src);
- return block.addTyOp(.bitcast, dest_ty, inst);
+ return block.addBitCast(dest_ty, inst);
}
// undefined to anything
@@ -11464,6 +11992,8 @@ fn coerce(
assert(inst_ty.zigTypeTag() != .Undefined);
// comptime known number to other number
+ // TODO why is this a separate function? should just be flattened into the
+ // switch expression below.
if (try sema.coerceNum(block, dest_ty, inst, inst_src)) |some|
return some;
@@ -11471,16 +12001,17 @@ fn coerce(
.Optional => {
// null to ?T
if (inst_ty.zigTypeTag() == .Null) {
- return sema.addConstant(dest_ty, Value.initTag(.null_value));
+ return sema.addConstant(dest_ty, Value.@"null");
}
// T to ?T
- var buf: Type.Payload.ElemType = undefined;
- const child_type = dest_ty.optionalChild(&buf);
+ const child_type = try dest_ty.optionalChildAlloc(sema.arena);
const intermediate = try sema.coerce(block, child_type, inst, inst_src);
return sema.wrapOptional(block, dest_ty, intermediate, inst_src);
},
.Pointer => {
+ const dest_info = dest_ty.ptrInfo().data;
+
// Function body to function pointer.
if (inst_ty.zigTypeTag() == .Fn) {
const fn_val = try sema.resolveConstValue(block, inst_src, inst);
@@ -11489,49 +12020,98 @@ fn coerce(
return sema.coerce(block, dest_ty, inst_as_ptr, inst_src);
}
+ // *T to *[1]T
+ single_item: {
+ if (dest_info.size != .One) break :single_item;
+ if (!inst_ty.isSinglePointer()) break :single_item;
+ const ptr_elem_ty = inst_ty.childType();
+ const array_ty = dest_info.pointee_type;
+ if (array_ty.zigTypeTag() != .Array) break :single_item;
+ const array_elem_ty = array_ty.childType();
+ const dest_is_mut = dest_info.mutable;
+ if (inst_ty.isConstPtr() and dest_is_mut) break :single_item;
+ if (inst_ty.isVolatilePtr() and !dest_info.@"volatile") break :single_item;
+ if (inst_ty.ptrAddressSpace() != dest_info.@"addrspace") break :single_item;
+ switch (coerceInMemoryAllowed(array_elem_ty, ptr_elem_ty, dest_is_mut, target)) {
+ .ok => {},
+ .no_match => break :single_item,
+ }
+ return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
+ }
+
// Coercions where the source is a single pointer to an array.
src_array_ptr: {
if (!inst_ty.isSinglePointer()) break :src_array_ptr;
- const array_type = inst_ty.elemType();
- if (array_type.zigTypeTag() != .Array) break :src_array_ptr;
- const array_elem_type = array_type.elemType();
- const dest_is_mut = !dest_ty.isConstPtr();
+ const array_ty = inst_ty.childType();
+ if (array_ty.zigTypeTag() != .Array) break :src_array_ptr;
+ const array_elem_type = array_ty.childType();
+ const dest_is_mut = dest_info.mutable;
if (inst_ty.isConstPtr() and dest_is_mut) break :src_array_ptr;
- if (inst_ty.isVolatilePtr() and !dest_ty.isVolatilePtr()) break :src_array_ptr;
- if (inst_ty.ptrAddressSpace() != dest_ty.ptrAddressSpace()) break :src_array_ptr;
+ if (inst_ty.isVolatilePtr() and !dest_info.@"volatile") break :src_array_ptr;
+ if (inst_ty.ptrAddressSpace() != dest_info.@"addrspace") break :src_array_ptr;
- const dst_elem_type = dest_ty.elemType();
+ const dst_elem_type = dest_info.pointee_type;
switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type, dest_is_mut, target)) {
.ok => {},
.no_match => break :src_array_ptr,
}
- switch (dest_ty.ptrSize()) {
+ switch (dest_info.size) {
.Slice => {
// *[N]T to []T
return sema.coerceArrayPtrToSlice(block, dest_ty, inst, inst_src);
},
.C => {
// *[N]T to [*c]T
- return sema.coerceArrayPtrToMany(block, dest_ty, inst, inst_src);
+ return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
},
.Many => {
// *[N]T to [*]T
// *[N:s]T to [*:s]T
// *[N:s]T to [*]T
- if (dest_ty.sentinel()) |dst_sentinel| {
- if (array_type.sentinel()) |src_sentinel| {
+ if (dest_info.sentinel) |dst_sentinel| {
+ if (array_ty.sentinel()) |src_sentinel| {
if (src_sentinel.eql(dst_sentinel, dst_elem_type)) {
- return sema.coerceArrayPtrToMany(block, dest_ty, inst, inst_src);
+ return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
}
}
} else {
- return sema.coerceArrayPtrToMany(block, dest_ty, inst, inst_src);
+ return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
}
},
.One => {},
}
}
+
+ // coercion to C pointer
+ if (dest_info.size == .C) {
+ switch (inst_ty.zigTypeTag()) {
+ .Null => {
+ return sema.addConstant(dest_ty, Value.@"null");
+ },
+ .ComptimeInt => {
+ const addr = try sema.coerce(block, Type.usize, inst, inst_src);
+ return sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src);
+ },
+ .Int => {
+ const ptr_size_ty = switch (inst_ty.intInfo(target).signedness) {
+ .signed => Type.isize,
+ .unsigned => Type.usize,
+ };
+ const addr = try sema.coerce(block, ptr_size_ty, inst, inst_src);
+ return sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src);
+ },
+ else => {},
+ }
+ }
+
+ // cast from *T and [*]T to *c_void
+ // but don't do it if the source type is a double pointer
+ if (dest_info.pointee_type.tag() == .c_void and inst_ty.zigTypeTag() == .Pointer and
+ inst_ty.childType().zigTypeTag() != .Pointer)
+ {
+ return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
+ }
},
.Int => {
// integer widening
@@ -11605,21 +12185,41 @@ fn coerce(
// T to E!T or E to E!T
return sema.wrapErrorUnion(block, dest_ty, inst, inst_src);
},
- .ErrorSet => {
- // Coercion to `anyerror`.
- // TODO If the dest type tag is not `anyerror` it still could
- // resolve to anyerror. `dest_ty` needs to have inferred error set resolution
- // happen before this check.
- if (dest_ty.tag() == .anyerror and inst_ty.zigTypeTag() == .ErrorSet) {
- return sema.coerceErrSetToAnyError(block, inst, inst_src);
- }
+ .ErrorSet => switch (inst_ty.zigTypeTag()) {
+ .ErrorSet => {
+ // Coercion to `anyerror`. Note that this check can return false positives
+ // in case the error sets did not get resolved.
+ if (dest_ty.isAnyError()) {
+ return sema.coerceCompatibleErrorSets(block, inst, inst_src);
+ }
+ // If both are inferred error sets of functions, and
+ // the dest includes the source function, the coercion is OK.
+ // This check is important because it works without forcing a full resolution
+ // of inferred error sets.
+ if (inst_ty.castTag(.error_set_inferred)) |src_payload| {
+ if (dest_ty.castTag(.error_set_inferred)) |dst_payload| {
+ const src_func = src_payload.data.func;
+ const dst_func = dst_payload.data.func;
+
+ if (src_func == dst_func or dst_payload.data.functions.contains(src_func)) {
+ return sema.coerceCompatibleErrorSets(block, inst, inst_src);
+ }
+ }
+ }
+ // TODO full error set resolution and compare sets by names.
+ },
+ else => {},
},
.Union => switch (inst_ty.zigTypeTag()) {
.Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src),
else => {},
},
.Array => switch (inst_ty.zigTypeTag()) {
- .Vector => return sema.coerceVectorToArray(block, dest_ty, dest_ty_src, inst, inst_src),
+ .Vector => return sema.coerceVectorInMemory(block, dest_ty, dest_ty_src, inst, inst_src),
+ else => {},
+ },
+ .Vector => switch (inst_ty.zigTypeTag()) {
+ .Array => return sema.coerceVectorInMemory(block, dest_ty, dest_ty_src, inst, inst_src),
else => {},
},
else => {},
@@ -12064,6 +12664,121 @@ fn beginComptimePtrMutation(
}
}
+const ComptimePtrLoadKit = struct {
+ /// The Value of the Decl that owns this memory.
+ root_val: Value,
+ /// Parent Value.
+ val: Value,
+ /// The Type of the parent Value.
+ ty: Type,
+ /// The starting byte offset of `val` from `root_val`.
+ byte_offset: usize,
+ /// Whether the `root_val` could be mutated by further
+ /// semantic analysis and a copy must be performed.
+ is_mutable: bool,
+};
+
+const ComptimePtrLoadError = CompileError || error{
+ RuntimeLoad,
+};
+
+fn beginComptimePtrLoad(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ ptr_val: Value,
+) ComptimePtrLoadError!ComptimePtrLoadKit {
+ const target = sema.mod.getTarget();
+ switch (ptr_val.tag()) {
+ .decl_ref => {
+ const decl = ptr_val.castTag(.decl_ref).?.data;
+ const decl_val = try decl.value();
+ if (decl_val.tag() == .variable) return error.RuntimeLoad;
+ return ComptimePtrLoadKit{
+ .root_val = decl_val,
+ .val = decl_val,
+ .ty = decl.ty,
+ .byte_offset = 0,
+ .is_mutable = false,
+ };
+ },
+ .decl_ref_mut => {
+ const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl;
+ const decl_val = try decl.value();
+ if (decl_val.tag() == .variable) return error.RuntimeLoad;
+ return ComptimePtrLoadKit{
+ .root_val = decl_val,
+ .val = decl_val,
+ .ty = decl.ty,
+ .byte_offset = 0,
+ .is_mutable = true,
+ };
+ },
+ .elem_ptr => {
+ const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
+ const parent = try beginComptimePtrLoad(sema, block, src, elem_ptr.array_ptr);
+ const elem_ty = parent.ty.childType();
+ const elem_size = elem_ty.abiSize(target);
+ return ComptimePtrLoadKit{
+ .root_val = parent.root_val,
+ .val = try parent.val.elemValue(sema.arena, elem_ptr.index),
+ .ty = elem_ty,
+ .byte_offset = parent.byte_offset + elem_size * elem_ptr.index,
+ .is_mutable = parent.is_mutable,
+ };
+ },
+ .field_ptr => {
+ const field_ptr = ptr_val.castTag(.field_ptr).?.data;
+ const parent = try beginComptimePtrLoad(sema, block, src, field_ptr.container_ptr);
+ const field_index = @intCast(u32, field_ptr.field_index);
+ try sema.resolveTypeLayout(block, src, parent.ty);
+ const field_offset = parent.ty.structFieldOffset(field_index, target);
+ return ComptimePtrLoadKit{
+ .root_val = parent.root_val,
+ .val = try parent.val.fieldValue(sema.arena, field_index),
+ .ty = parent.ty.structFieldType(field_index),
+ .byte_offset = parent.byte_offset + field_offset,
+ .is_mutable = parent.is_mutable,
+ };
+ },
+ .eu_payload_ptr => {
+ const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data;
+ const parent = try beginComptimePtrLoad(sema, block, src, err_union_ptr);
+ return ComptimePtrLoadKit{
+ .root_val = parent.root_val,
+ .val = parent.val.castTag(.eu_payload).?.data,
+ .ty = parent.ty.errorUnionPayload(),
+ .byte_offset = undefined,
+ .is_mutable = parent.is_mutable,
+ };
+ },
+ .opt_payload_ptr => {
+ const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data;
+ const parent = try beginComptimePtrLoad(sema, block, src, opt_ptr);
+ return ComptimePtrLoadKit{
+ .root_val = parent.root_val,
+ .val = parent.val.castTag(.opt_payload).?.data,
+ .ty = try parent.ty.optionalChildAlloc(sema.arena),
+ .byte_offset = undefined,
+ .is_mutable = parent.is_mutable,
+ };
+ },
+
+ .zero,
+ .one,
+ .int_u64,
+ .int_i64,
+ .int_big_positive,
+ .int_big_negative,
+ .variable,
+ .extern_fn,
+ .function,
+ => return error.RuntimeLoad,
+
+ else => unreachable,
+ }
+}
+
fn bitCast(
sema: *Sema,
block: *Block,
@@ -12079,7 +12794,7 @@ fn bitCast(
return sema.addConstant(dest_ty, result_val);
}
try sema.requireRuntimeBlock(block, inst_src);
- return block.addTyOp(.bitcast, dest_ty, inst);
+ return block.addBitCast(dest_ty, inst);
}
fn coerceArrayPtrToSlice(
@@ -12090,21 +12805,26 @@ fn coerceArrayPtrToSlice(
inst_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| {
- // The comptime Value representation is compatible with both types.
- return sema.addConstant(dest_ty, val);
+ const ptr_array_ty = sema.typeOf(inst);
+ const array_ty = ptr_array_ty.childType();
+ const slice_val = try Value.Tag.slice.create(sema.arena, .{
+ .ptr = val,
+ .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()),
+ });
+ return sema.addConstant(dest_ty, slice_val);
}
try sema.requireRuntimeBlock(block, inst_src);
return block.addTyOp(.array_to_slice, dest_ty, inst);
}
-fn coerceArrayPtrToMany(
+fn coerceCompatiblePtrs(
sema: *Sema,
block: *Block,
dest_ty: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
- if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| {
+ if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| {
// The comptime Value representation is compatible with both types.
return sema.addConstant(dest_ty, val);
}
@@ -12187,7 +12907,7 @@ fn coerceEnumToUnion(
// If the union has all fields 0 bits, the union value is just the enum value.
if (union_ty.unionHasAllZeroBitFieldTypes()) {
- return block.addTyOp(.bitcast, union_ty, enum_tag);
+ return block.addBitCast(union_ty, enum_tag);
}
// TODO resolve the field names and add a hint that says "field 'foo' has type 'bar'"
@@ -12203,49 +12923,52 @@ fn coerceEnumToUnion(
return sema.failWithOwnedErrorMsg(msg);
}
-fn coerceVectorToArray(
+// Coerces vectors/arrays which have the same in-memory layout. This can be used for
+// both coercing from and to vectors.
+fn coerceVectorInMemory(
sema: *Sema,
block: *Block,
- array_ty: Type,
- array_ty_src: LazySrcLoc,
- vector: Air.Inst.Ref,
- vector_src: LazySrcLoc,
+ dest_ty: Type,
+ dest_ty_src: LazySrcLoc,
+ inst: Air.Inst.Ref,
+ inst_src: LazySrcLoc,
) !Air.Inst.Ref {
- const vector_ty = sema.typeOf(vector);
- const array_len = array_ty.arrayLen();
- const vector_len = vector_ty.arrayLen();
- if (array_len != vector_len) {
+ const inst_ty = sema.typeOf(inst);
+ const inst_len = inst_ty.arrayLen();
+ const dest_len = dest_ty.arrayLen();
+
+ if (dest_len != inst_len) {
const msg = msg: {
- const msg = try sema.errMsg(block, vector_src, "expected {}, found {}", .{
- array_ty, vector_ty,
+ const msg = try sema.errMsg(block, inst_src, "expected {}, found {}", .{
+ dest_ty, inst_ty,
});
errdefer msg.destroy(sema.gpa);
- try sema.errNote(block, array_ty_src, msg, "array has length {d}", .{array_len});
- try sema.errNote(block, vector_src, msg, "vector has length {d}", .{vector_len});
+ try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len});
+ try sema.errNote(block, inst_src, msg, "source has length {d}", .{inst_len});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const target = sema.mod.getTarget();
- const array_elem_ty = array_ty.childType();
- const vector_elem_ty = vector_ty.childType();
- const in_memory_result = coerceInMemoryAllowed(array_elem_ty, vector_elem_ty, false, target);
+ const dest_elem_ty = dest_ty.childType();
+ const inst_elem_ty = inst_ty.childType();
+ const in_memory_result = coerceInMemoryAllowed(dest_elem_ty, inst_elem_ty, false, target);
if (in_memory_result != .ok) {
// TODO recursive error notes for coerceInMemoryAllowed failure
- return sema.fail(block, vector_src, "expected {}, found {}", .{ array_ty, vector_ty });
+ return sema.fail(block, inst_src, "expected {}, found {}", .{ dest_ty, inst_ty });
}
- if (try sema.resolveMaybeUndefVal(block, vector_src, vector)) |vector_val| {
+ if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |inst_val| {
// These types share the same comptime value representation.
- return sema.addConstant(array_ty, vector_val);
+ return sema.addConstant(dest_ty, inst_val);
}
- try sema.requireRuntimeBlock(block, vector_src);
- return block.addTyOp(.bitcast, array_ty, vector);
+ try sema.requireRuntimeBlock(block, inst_src);
+ return block.addBitCast(dest_ty, inst);
}
-fn coerceErrSetToAnyError(
+fn coerceCompatibleErrorSets(
sema: *Sema,
block: *Block,
err_set: Air.Inst.Ref,
@@ -12256,7 +12979,13 @@ fn coerceErrSetToAnyError(
return sema.addConstant(Type.anyerror, err_set_val);
}
try sema.requireRuntimeBlock(block, err_set_src);
- return block.addTyOp(.bitcast, Type.anyerror, err_set);
+ return block.addInst(.{
+ .tag = .bitcast,
+ .data = .{ .ty_op = .{
+ .ty = Air.Inst.Ref.anyerror_type,
+ .operand = err_set,
+ } },
+ });
}
fn analyzeDeclVal(
@@ -12296,10 +13025,15 @@ fn analyzeDeclRef(sema: *Sema, decl: *Decl) CompileError!Air.Inst.Ref {
const decl_tv = try decl.typedValue();
if (decl_tv.val.castTag(.variable)) |payload| {
const variable = payload.data;
+ const alignment: u32 = if (decl.align_val.tag() == .null_value)
+ 0
+ else
+ @intCast(u32, decl.align_val.toUnsignedInt());
const ty = try Type.ptr(sema.arena, .{
.pointee_type = decl_tv.ty,
.mutable = variable.is_mutable,
.@"addrspace" = decl.@"addrspace",
+ .@"align" = alignment,
});
return sema.addConstant(ty, try Value.Tag.decl_ref.create(sema.arena, decl));
}
@@ -12361,7 +13095,7 @@ fn analyzeLoad(
else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty}),
};
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
- if (try ptr_val.pointerDeref(sema.arena)) |elem_val| {
+ if (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) |elem_val| {
return sema.addConstant(elem_ty, elem_val);
}
}
@@ -12370,6 +13104,25 @@ fn analyzeLoad(
return block.addTyOp(.load, elem_ty, ptr);
}
+fn analyzeSlicePtr(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ slice: Air.Inst.Ref,
+ slice_ty: Type,
+ slice_src: LazySrcLoc,
+) CompileError!Air.Inst.Ref {
+ const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer);
+ const result_ty = slice_ty.slicePtrFieldType(buf);
+
+ if (try sema.resolveMaybeUndefVal(block, slice_src, slice)) |val| {
+ if (val.isUndef()) return sema.addConstUndef(result_ty);
+ return sema.addConstant(result_ty, val.slicePtr());
+ }
+ try sema.requireRuntimeBlock(block, src);
+ return block.addTyOp(.slice_ptr, result_ty, slice);
+}
+
fn analyzeSliceLen(
sema: *Sema,
block: *Block,
@@ -12441,74 +13194,128 @@ fn analyzeSlice(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
- array_ptr: Air.Inst.Ref,
- start: Air.Inst.Ref,
- end_opt: Air.Inst.Ref,
+ ptr_ptr: Air.Inst.Ref,
+ uncasted_start: Air.Inst.Ref,
+ uncasted_end_opt: Air.Inst.Ref,
sentinel_opt: Air.Inst.Ref,
sentinel_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const array_ptr_ty = sema.typeOf(array_ptr);
- const ptr_child = switch (array_ptr_ty.zigTypeTag()) {
- .Pointer => array_ptr_ty.elemType(),
- else => return sema.fail(block, src, "expected pointer, found '{}'", .{array_ptr_ty}),
+ const ptr_src = src; // TODO better source location
+ const start_src = src; // TODO better source location
+ const end_src = src; // TODO better source location
+ // Slice expressions can operate on a variable whose type is an array. This requires
+ // the slice operand to be a pointer. In the case of a non-array, it will be a double pointer.
+ const ptr_ptr_ty = sema.typeOf(ptr_ptr);
+ const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag()) {
+ .Pointer => ptr_ptr_ty.elemType(),
+ else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty}),
};
- var array_type = ptr_child;
- const elem_type = switch (ptr_child.zigTypeTag()) {
- .Array => ptr_child.elemType(),
- .Pointer => blk: {
- if (ptr_child.isSinglePointer()) {
- if (ptr_child.elemType().zigTypeTag() == .Array) {
- array_type = ptr_child.elemType();
- break :blk ptr_child.elemType().elemType();
+ var array_ty = ptr_ptr_child_ty;
+ var slice_ty = ptr_ptr_ty;
+ var ptr_or_slice = ptr_ptr;
+ var elem_ty = ptr_ptr_child_ty.childType();
+ switch (ptr_ptr_child_ty.zigTypeTag()) {
+ .Array => {},
+ .Pointer => {
+ if (ptr_ptr_child_ty.isSinglePointer()) {
+ const double_child_ty = ptr_ptr_child_ty.childType();
+ if (double_child_ty.zigTypeTag() == .Array) {
+ ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
+ slice_ty = ptr_ptr_child_ty;
+ array_ty = double_child_ty;
+ elem_ty = double_child_ty.childType();
+ } else {
+ return sema.fail(block, ptr_src, "slice of single-item pointer", .{});
}
-
- return sema.fail(block, src, "slice of single-item pointer", .{});
}
- break :blk ptr_child.elemType();
},
- else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_child}),
+ else => return sema.fail(block, ptr_src, "slice of non-array type '{}'", .{ptr_ptr_child_ty}),
+ }
+ const ptr = if (slice_ty.isSlice())
+ try sema.analyzeSlicePtr(block, src, ptr_or_slice, slice_ty, ptr_src)
+ else
+ ptr_or_slice;
+
+ const start = try sema.coerce(block, Type.usize, uncasted_start, start_src);
+ const new_ptr = try analyzePtrArithmetic(sema, block, src, ptr, start, .ptr_add, ptr_src, start_src);
+
+ const end = e: {
+ if (uncasted_end_opt != .none) {
+ break :e try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
+ }
+
+ if (array_ty.zigTypeTag() == .Array) {
+ break :e try sema.addConstant(
+ Type.usize,
+ try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()),
+ );
+ } else if (slice_ty.isSlice()) {
+ break :e try sema.analyzeSliceLen(block, src, ptr_or_slice);
+ }
+ return sema.fail(block, end_src, "slice of pointer must include end value", .{});
};
const slice_sentinel = if (sentinel_opt != .none) blk: {
- const casted = try sema.coerce(block, elem_type, sentinel_opt, sentinel_src);
+ const casted = try sema.coerce(block, elem_ty, sentinel_opt, sentinel_src);
break :blk try sema.resolveConstValue(block, sentinel_src, casted);
} else null;
- var return_ptr_size: std.builtin.TypeInfo.Pointer.Size = .Slice;
- var return_elem_type = elem_type;
- if (end_opt != .none) {
- if (try sema.resolveDefinedValue(block, src, end_opt)) |end_val| {
- if (try sema.resolveDefinedValue(block, src, start)) |start_val| {
- const start_u64 = start_val.toUnsignedInt();
- const end_u64 = end_val.toUnsignedInt();
- if (start_u64 > end_u64) {
- return sema.fail(block, src, "out of bounds slice", .{});
- }
+ const new_len = try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src);
- const len = end_u64 - start_u64;
- const array_sentinel = if (array_type.zigTypeTag() == .Array and end_u64 == array_type.arrayLen())
- array_type.sentinel()
- else
- slice_sentinel;
- return_elem_type = try Type.array(sema.arena, len, array_sentinel, elem_type);
- return_ptr_size = .One;
- }
+ const opt_new_ptr_val = try sema.resolveDefinedValue(block, ptr_src, new_ptr);
+ const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len);
+
+ const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo().data;
+
+ if (opt_new_len_val) |new_len_val| {
+ const new_len_int = new_len_val.toUnsignedInt();
+
+ const sentinel = if (array_ty.zigTypeTag() == .Array and new_len_int == array_ty.arrayLen())
+ array_ty.sentinel()
+ else
+ slice_sentinel;
+
+ const return_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = try Type.array(sema.arena, new_len_int, sentinel, elem_ty),
+ .sentinel = null,
+ .@"align" = new_ptr_ty_info.@"align",
+ .@"addrspace" = new_ptr_ty_info.@"addrspace",
+ .mutable = new_ptr_ty_info.mutable,
+ .@"allowzero" = new_ptr_ty_info.@"allowzero",
+ .@"volatile" = new_ptr_ty_info.@"volatile",
+ .size = .One,
+ });
+
+ if (opt_new_ptr_val) |new_ptr_val| {
+ return sema.addConstant(return_ty, new_ptr_val);
+ } else {
+ return block.addBitCast(return_ty, new_ptr);
}
}
- const return_type = try Type.ptr(sema.arena, .{
- .pointee_type = return_elem_type,
- .sentinel = if (end_opt == .none) slice_sentinel else null,
- .@"align" = 0, // TODO alignment
- .@"addrspace" = if (ptr_child.zigTypeTag() == .Pointer) ptr_child.ptrAddressSpace() else .generic,
- .mutable = !ptr_child.isConstPtr(),
- .@"allowzero" = ptr_child.isAllowzeroPtr(),
- .@"volatile" = ptr_child.isVolatilePtr(),
- .size = return_ptr_size,
+
+ const return_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = elem_ty,
+ .sentinel = slice_sentinel,
+ .@"align" = new_ptr_ty_info.@"align",
+ .@"addrspace" = new_ptr_ty_info.@"addrspace",
+ .mutable = new_ptr_ty_info.mutable,
+ .@"allowzero" = new_ptr_ty_info.@"allowzero",
+ .@"volatile" = new_ptr_ty_info.@"volatile",
+ .size = .Slice,
});
- _ = return_type;
- return sema.fail(block, src, "TODO implement analysis of slice", .{});
+ try sema.requireRuntimeBlock(block, src);
+ return block.addInst(.{
+ .tag = .slice,
+ .data = .{ .ty_pl = .{
+ .ty = try sema.addType(return_ty),
+ .payload = try sema.addExtra(Air.Bin{
+ .lhs = new_ptr,
+ .rhs = new_len,
+ }),
+ } },
+ });
}
/// Asserts that lhs and rhs types are both numeric.
@@ -12732,47 +13539,30 @@ fn wrapErrorUnion(
}
switch (dest_err_set_ty.tag()) {
.anyerror => {},
- .error_set_single => {
+ .error_set_single => ok: {
const expected_name = val.castTag(.@"error").?.data.name;
const n = dest_err_set_ty.castTag(.error_set_single).?.data;
- if (!mem.eql(u8, expected_name, n)) {
- return sema.fail(
- block,
- inst_src,
- "expected type '{}', found type '{}'",
- .{ dest_err_set_ty, inst_ty },
- );
- }
+ if (mem.eql(u8, expected_name, n)) break :ok;
+ return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
},
- .error_set => {
+ .error_set => ok: {
const expected_name = val.castTag(.@"error").?.data.name;
const error_set = dest_err_set_ty.castTag(.error_set).?.data;
const names = error_set.names_ptr[0..error_set.names_len];
// TODO this is O(N). I'm putting off solving this until we solve inferred
// error sets at the same time.
- const found = for (names) |name| {
- if (mem.eql(u8, expected_name, name)) break true;
- } else false;
- if (!found) {
- return sema.fail(
- block,
- inst_src,
- "expected type '{}', found type '{}'",
- .{ dest_err_set_ty, inst_ty },
- );
+ for (names) |name| {
+ if (mem.eql(u8, expected_name, name)) break :ok;
}
+ return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
},
- .error_set_inferred => {
+ .error_set_inferred => ok: {
+ const err_set_payload = dest_err_set_ty.castTag(.error_set_inferred).?.data;
+ if (err_set_payload.is_anyerror) break :ok;
const expected_name = val.castTag(.@"error").?.data.name;
- const map = &dest_err_set_ty.castTag(.error_set_inferred).?.data.map;
- if (!map.contains(expected_name)) {
- return sema.fail(
- block,
- inst_src,
- "expected type '{}', found type '{}'",
- .{ dest_err_set_ty, inst_ty },
- );
- }
+ if (err_set_payload.map.contains(expected_name)) break :ok;
+ // TODO error set resolution here before emitting a compile error
+ return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
},
else => unreachable,
}
@@ -12794,15 +13584,18 @@ fn wrapErrorUnion(
fn unionToTag(
sema: *Sema,
block: *Block,
- dest_ty: Type,
+ enum_ty: Type,
un: Air.Inst.Ref,
un_src: LazySrcLoc,
) !Air.Inst.Ref {
+ if ((try sema.typeHasOnePossibleValue(block, un_src, enum_ty))) |opv| {
+ return sema.addConstant(enum_ty, opv);
+ }
if (try sema.resolveMaybeUndefVal(block, un_src, un)) |un_val| {
- return sema.addConstant(dest_ty, un_val.unionTag());
+ return sema.addConstant(enum_ty, un_val.unionTag());
}
try sema.requireRuntimeBlock(block, un_src);
- return block.addTyOp(.get_union_tag, dest_ty, un);
+ return block.addTyOp(.get_union_tag, enum_ty, un);
}
fn resolvePeerTypes(
@@ -12831,114 +13624,127 @@ fn resolvePeerTypes(
const candidate_ty_tag = candidate_ty.zigTypeTag();
const chosen_ty_tag = chosen_ty.zigTypeTag();
- if (candidate_ty_tag == .NoReturn)
- continue;
- if (chosen_ty_tag == .NoReturn) {
- chosen = candidate;
- chosen_i = candidate_i + 1;
- continue;
- }
- if (candidate_ty_tag == .Undefined)
- continue;
- if (chosen_ty_tag == .Undefined) {
- chosen = candidate;
- chosen_i = candidate_i + 1;
- continue;
- }
- if (chosen_ty.isInt() and
- candidate_ty.isInt() and
- chosen_ty.isSignedInt() == candidate_ty.isSignedInt())
- {
- if (chosen_ty.intInfo(target).bits < candidate_ty.intInfo(target).bits) {
- chosen = candidate;
- chosen_i = candidate_i + 1;
- }
- continue;
- }
- if (chosen_ty.isRuntimeFloat() and candidate_ty.isRuntimeFloat()) {
- if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) {
- chosen = candidate;
- chosen_i = candidate_i + 1;
- }
- continue;
- }
-
- if (chosen_ty_tag == .ComptimeInt and candidate_ty.isInt()) {
- chosen = candidate;
- chosen_i = candidate_i + 1;
- continue;
- }
-
- if (chosen_ty.isInt() and candidate_ty_tag == .ComptimeInt) {
- continue;
- }
-
- if ((chosen_ty_tag == .ComptimeFloat or chosen_ty_tag == .ComptimeInt) and
- candidate_ty.isRuntimeFloat())
- {
- chosen = candidate;
- chosen_i = candidate_i + 1;
- continue;
- }
- if (chosen_ty.isRuntimeFloat() and
- (candidate_ty_tag == .ComptimeFloat or candidate_ty_tag == .ComptimeInt))
- {
- continue;
- }
-
- if (chosen_ty_tag == .Enum and candidate_ty_tag == .EnumLiteral) {
- continue;
- }
- if (chosen_ty_tag == .EnumLiteral and candidate_ty_tag == .Enum) {
- chosen = candidate;
- chosen_i = candidate_i + 1;
- continue;
- }
+ switch (candidate_ty_tag) {
+ .NoReturn, .Undefined => continue,
- if (chosen_ty_tag == .ComptimeFloat and candidate_ty_tag == .ComptimeInt)
- continue;
- if (chosen_ty_tag == .ComptimeInt and candidate_ty_tag == .ComptimeFloat) {
- chosen = candidate;
- chosen_i = candidate_i + 1;
- continue;
- }
+ .Null => {
+ any_are_null = true;
+ continue;
+ },
- if (chosen_ty_tag == .Null) {
- any_are_null = true;
- chosen = candidate;
- chosen_i = candidate_i + 1;
- continue;
- }
- if (candidate_ty_tag == .Null) {
- any_are_null = true;
- continue;
+ .Int => switch (chosen_ty_tag) {
+ .ComptimeInt => {
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ continue;
+ },
+ .Int => {
+ if (chosen_ty.isSignedInt() == candidate_ty.isSignedInt()) {
+ if (chosen_ty.intInfo(target).bits < candidate_ty.intInfo(target).bits) {
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ }
+ continue;
+ }
+ },
+ .Pointer => if (chosen_ty.ptrSize() == .C) continue,
+ else => {},
+ },
+ .ComptimeInt => switch (chosen_ty_tag) {
+ .Int, .Float, .ComptimeFloat => continue,
+ .Pointer => if (chosen_ty.ptrSize() == .C) continue,
+ else => {},
+ },
+ .Float => switch (chosen_ty_tag) {
+ .Float => {
+ if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) {
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ }
+ continue;
+ },
+ .ComptimeFloat, .ComptimeInt => {
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ continue;
+ },
+ else => {},
+ },
+ .ComptimeFloat => switch (chosen_ty_tag) {
+ .Float => continue,
+ .ComptimeInt => {
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ continue;
+ },
+ else => {},
+ },
+ .Enum => switch (chosen_ty_tag) {
+ .EnumLiteral => {
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ continue;
+ },
+ else => {},
+ },
+ .EnumLiteral => switch (chosen_ty_tag) {
+ .Enum => continue,
+ else => {},
+ },
+ .Pointer => {
+ if (candidate_ty.ptrSize() == .C) {
+ if (chosen_ty_tag == .Int or chosen_ty_tag == .ComptimeInt) {
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ continue;
+ }
+ if (chosen_ty_tag == .Pointer and chosen_ty.ptrSize() != .Slice) {
+ continue;
+ }
+ }
+ },
+ .Optional => {
+ var opt_child_buf: Type.Payload.ElemType = undefined;
+ const opt_child_ty = candidate_ty.optionalChild(&opt_child_buf);
+ if (coerceInMemoryAllowed(opt_child_ty, chosen_ty, false, target) == .ok) {
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ continue;
+ }
+ if (coerceInMemoryAllowed(chosen_ty, opt_child_ty, false, target) == .ok) {
+ any_are_null = true;
+ continue;
+ }
+ },
+ else => {},
}
- if (chosen_ty_tag == .Optional) {
- var opt_child_buf: Type.Payload.ElemType = undefined;
- const opt_child_ty = chosen_ty.optionalChild(&opt_child_buf);
- if (coerceInMemoryAllowed(opt_child_ty, candidate_ty, false, target) == .ok) {
- continue;
- }
- if (coerceInMemoryAllowed(candidate_ty, opt_child_ty, false, target) == .ok) {
- any_are_null = true;
+ switch (chosen_ty_tag) {
+ .NoReturn, .Undefined => {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
- }
- }
- if (candidate_ty_tag == .Optional) {
- var opt_child_buf: Type.Payload.ElemType = undefined;
- const opt_child_ty = candidate_ty.optionalChild(&opt_child_buf);
- if (coerceInMemoryAllowed(opt_child_ty, chosen_ty, false, target) == .ok) {
+ },
+ .Null => {
+ any_are_null = true;
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
- }
- if (coerceInMemoryAllowed(chosen_ty, opt_child_ty, false, target) == .ok) {
- any_are_null = true;
- continue;
- }
+ },
+ .Optional => {
+ var opt_child_buf: Type.Payload.ElemType = undefined;
+ const opt_child_ty = chosen_ty.optionalChild(&opt_child_buf);
+ if (coerceInMemoryAllowed(opt_child_ty, candidate_ty, false, target) == .ok) {
+ continue;
+ }
+ if (coerceInMemoryAllowed(candidate_ty, opt_child_ty, false, target) == .ok) {
+ any_are_null = true;
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ continue;
+ }
+ },
+ else => {},
}
// At this point, we hit a compile error. We need to recover
@@ -13020,6 +13826,19 @@ pub fn resolveTypeLayout(
}
union_obj.status = .have_layout;
},
+ .Array => {
+ const elem_ty = ty.childType();
+ return sema.resolveTypeLayout(block, src, elem_ty);
+ },
+ .Optional => {
+ var buf: Type.Payload.ElemType = undefined;
+ const payload_ty = ty.optionalChild(&buf);
+ return sema.resolveTypeLayout(block, src, payload_ty);
+ },
+ .ErrorUnion => {
+ const payload_ty = ty.errorUnionPayload();
+ return sema.resolveTypeLayout(block, src, payload_ty);
+ },
else => {},
}
}
@@ -13572,10 +14391,9 @@ fn typeHasOnePossibleValue(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
- starting_type: Type,
+ ty: Type,
) CompileError!?Value {
- var ty = starting_type;
- while (true) switch (ty.tag()) {
+ switch (ty.tag()) {
.f16,
.f32,
.f64,
@@ -13669,7 +14487,7 @@ fn typeHasOnePossibleValue(
const enum_obj = resolved_ty.castTag(.enum_numbered).?.data;
if (enum_obj.fields.count() == 1) {
if (enum_obj.values.count() == 0) {
- return Value.initTag(.zero); // auto-numbered
+ return Value.zero; // auto-numbered
} else {
return enum_obj.values.keys()[0];
}
@@ -13682,7 +14500,7 @@ fn typeHasOnePossibleValue(
const enum_obj = resolved_ty.castTag(.enum_full).?.data;
if (enum_obj.fields.count() == 1) {
if (enum_obj.values.count() == 0) {
- return Value.initTag(.zero); // auto-numbered
+ return Value.zero; // auto-numbered
} else {
return enum_obj.values.keys()[0];
}
@@ -13694,12 +14512,19 @@ fn typeHasOnePossibleValue(
const resolved_ty = try sema.resolveTypeFields(block, src, ty);
const enum_simple = resolved_ty.castTag(.enum_simple).?.data;
if (enum_simple.fields.count() == 1) {
- return Value.initTag(.zero);
+ return Value.zero;
+ } else {
+ return null;
+ }
+ },
+ .enum_nonexhaustive => {
+ const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty;
+ if (!tag_ty.hasCodeGenBits()) {
+ return Value.zero;
} else {
return null;
}
},
- .enum_nonexhaustive => ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty,
.@"union" => {
return null; // TODO
},
@@ -13710,12 +14535,12 @@ fn typeHasOnePossibleValue(
.empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value),
.void => return Value.void,
.noreturn => return Value.initTag(.unreachable_value),
- .@"null" => return Value.initTag(.null_value),
+ .@"null" => return Value.@"null",
.@"undefined" => return Value.initTag(.undef),
.int_unsigned, .int_signed => {
if (ty.cast(Type.Payload.Bits).?.data == 0) {
- return Value.initTag(.zero);
+ return Value.zero;
} else {
return null;
}
@@ -13723,14 +14548,16 @@ fn typeHasOnePossibleValue(
.vector, .array, .array_u8 => {
if (ty.arrayLen() == 0)
return Value.initTag(.empty_array);
- ty = ty.elemType();
- continue;
+ if ((try sema.typeHasOnePossibleValue(block, src, ty.elemType())) != null) {
+ return Value.initTag(.the_only_possible_value);
+ }
+ return null;
},
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
.generic_poison => return error.GenericPoison,
- };
+ }
}
fn getAstTree(sema: *Sema, block: *Block) CompileError!*const std.zig.Ast {
@@ -13934,14 +14761,22 @@ fn analyzeComptimeAlloc(
sema: *Sema,
block: *Block,
var_type: Type,
+ alignment: u32,
) CompileError!Air.Inst.Ref {
const ptr_type = try Type.ptr(sema.arena, .{
.pointee_type = var_type,
.@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .global_constant),
+ .@"align" = alignment,
});
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
+
+ const align_val = if (alignment == 0)
+ Value.@"null"
+ else
+ try Value.Tag.int_u64.create(anon_decl.arena(), alignment);
+
const decl = try anon_decl.finish(
try var_type.copy(anon_decl.arena()),
// There will be stores before the first load, but they may be to sub-elements or
@@ -13949,6 +14784,8 @@ fn analyzeComptimeAlloc(
// into fields/elements and have those overridden with stored values.
Value.undef,
);
+ decl.align_val = align_val;
+
try sema.mod.declareDeclDependency(sema.owner_decl, decl);
return sema.addConstant(ptr_type, try Value.Tag.decl_ref_mut.create(sema.arena, .{
.runtime_index = block.runtime_index,
@@ -14010,3 +14847,45 @@ pub fn analyzeAddrspace(
return address_space;
}
+
+/// Asserts the value is a pointer and dereferences it.
+/// Returns `null` if the pointer contents cannot be loaded at comptime.
+fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value {
+ const target = sema.mod.getTarget();
+ const load_ty = ptr_ty.childType();
+ const parent = sema.beginComptimePtrLoad(block, src, ptr_val) catch |err| switch (err) {
+ error.RuntimeLoad => return null,
+ else => |e| return e,
+ };
+ // We have a Value that lines up in virtual memory exactly with what we want to load.
+ // If the Type is in-memory coercable to `load_ty`, it may be returned without modifications.
+ const coerce_in_mem_ok =
+ coerceInMemoryAllowed(load_ty, parent.ty, false, target) == .ok or
+ coerceInMemoryAllowed(parent.ty, load_ty, false, target) == .ok;
+ if (coerce_in_mem_ok) {
+ if (parent.is_mutable) {
+ // The decl whose value we are obtaining here may be overwritten with
+ // a different value upon further semantic analysis, which would
+ // invalidate this memory. So we must copy here.
+ return try parent.val.copy(sema.arena);
+ }
+ return parent.val;
+ }
+
+ // The type is not in-memory coercable, so it must be bitcasted according
+ // to the pointer type we are performing the load through.
+
+ // TODO emit a compile error if the types are not allowed to be bitcasted
+
+ if (parent.ty.abiSize(target) >= load_ty.abiSize(target)) {
+ // The Type it is stored as in the compiler has an ABI size greater or equal to
+ // the ABI size of `load_ty`. We may perform the bitcast based on
+ // `parent.val` alone (more efficient).
+ return try parent.val.bitCast(parent.ty, load_ty, target, sema.gpa, sema.arena);
+ }
+
+ // The Type it is stored as in the compiler has an ABI size less than the ABI size
+ // of `load_ty`. The bitcast must be performed based on the `parent.root_val`
+ // and reinterpreted starting at `parent.byte_offset`.
+ return sema.fail(block, src, "TODO: implement bitcast with index offset", .{});
+}
diff --git a/src/Zir.zig b/src/Zir.zig
index e45aac1a6f..7e5937e40d 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -72,6 +72,7 @@ pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, en
Inst.Ref => @intToEnum(Inst.Ref, code.extra[i]),
i32 => @bitCast(i32, code.extra[i]),
Inst.Call.Flags => @bitCast(Inst.Call.Flags, code.extra[i]),
+ Inst.SwitchBlock.Bits => @bitCast(Inst.SwitchBlock.Bits, code.extra[i]),
else => @compileError("bad field type"),
};
i += 1;
@@ -239,10 +240,6 @@ pub const Inst = struct {
/// Reinterpret the memory representation of a value as a different type.
/// Uses the pl_node field with payload `Bin`.
bitcast,
- /// A typed result location pointer is bitcasted to a new result location pointer.
- /// The new result location pointer has an inferred type.
- /// Uses the pl_node field with payload `Bin`.
- bitcast_result_ptr,
/// Bitwise NOT. `~`
/// Uses `un_node`.
bit_not,
@@ -481,6 +478,7 @@ pub const Inst = struct {
/// Includes a token source location.
/// Uses the `un_tok` union field.
/// The operand needs to get coerced to the function's return type.
+ /// TODO rename this to `ret_tok` because coercion is now done unconditionally in Sema.
ret_coerce,
/// Sends control flow back to the function's callee.
/// The return operand is `error.foo` where `foo` is given by the string.
@@ -546,9 +544,6 @@ pub const Inst = struct {
/// Returns the type of a value.
/// Uses the `un_node` field.
typeof,
- /// Given a value which is a pointer, returns the element type.
- /// Uses the `un_node` field.
- typeof_elem,
/// Given a value, look at the type of it, which must be an integer type.
/// Returns the integer type for the RHS of a shift operation.
/// Uses the `un_node` field.
@@ -618,39 +613,16 @@ pub const Inst = struct {
enum_literal,
/// A switch expression. Uses the `pl_node` union field.
/// AST node is the switch, payload is `SwitchBlock`.
- /// All prongs of target handled.
switch_block,
- /// Same as switch_block, except one or more prongs have multiple items.
- /// Payload is `SwitchBlockMulti`
- switch_block_multi,
- /// Same as switch_block, except has an else prong.
- switch_block_else,
- /// Same as switch_block_else, except one or more prongs have multiple items.
- /// Payload is `SwitchBlockMulti`
- switch_block_else_multi,
- /// Same as switch_block, except has an underscore prong.
- switch_block_under,
- /// Same as switch_block, except one or more prongs have multiple items.
- /// Payload is `SwitchBlockMulti`
- switch_block_under_multi,
- /// Same as `switch_block` but the target is a pointer to the value being switched on.
- switch_block_ref,
- /// Same as `switch_block_multi` but the target is a pointer to the value being switched on.
- /// Payload is `SwitchBlockMulti`
- switch_block_ref_multi,
- /// Same as `switch_block_else` but the target is a pointer to the value being switched on.
- switch_block_ref_else,
- /// Same as `switch_block_else_multi` but the target is a pointer to the
- /// value being switched on.
- /// Payload is `SwitchBlockMulti`
- switch_block_ref_else_multi,
- /// Same as `switch_block_under` but the target is a pointer to the value
- /// being switched on.
- switch_block_ref_under,
- /// Same as `switch_block_under_multi` but the target is a pointer to
- /// the value being switched on.
- /// Payload is `SwitchBlockMulti`
- switch_block_ref_under_multi,
+ /// Produces the value that will be switched on. For example, for
+ /// integers, it returns the integer with no modifications. For tagged unions, it
+ /// returns the active enum tag.
+ /// Uses the `un_node` union field.
+ switch_cond,
+ /// Same as `switch_cond`, except the input operand is a pointer to
+ /// what will be switched on.
+ /// Uses the `un_node` union field.
+ switch_cond_ref,
/// Produces the capture value for a switch prong.
/// Uses the `switch_capture` field.
switch_capture,
@@ -998,7 +970,6 @@ pub const Inst = struct {
.as_node,
.bit_and,
.bitcast,
- .bitcast_result_ptr,
.bit_or,
.block,
.block_inline,
@@ -1071,7 +1042,6 @@ pub const Inst = struct {
.negate,
.negate_wrap,
.typeof,
- .typeof_elem,
.xor,
.optional_type,
.optional_payload_safe,
@@ -1109,17 +1079,8 @@ pub const Inst = struct {
.switch_capture_else,
.switch_capture_else_ref,
.switch_block,
- .switch_block_multi,
- .switch_block_else,
- .switch_block_else_multi,
- .switch_block_under,
- .switch_block_under_multi,
- .switch_block_ref,
- .switch_block_ref_multi,
- .switch_block_ref_else,
- .switch_block_ref_else_multi,
- .switch_block_ref_under,
- .switch_block_ref_under_multi,
+ .switch_cond,
+ .switch_cond_ref,
.validate_struct_init,
.validate_array_init,
.struct_init_empty,
@@ -1265,7 +1226,6 @@ pub const Inst = struct {
.as_node = .pl_node,
.bit_and = .pl_node,
.bitcast = .pl_node,
- .bitcast_result_ptr = .pl_node,
.bit_not = .un_node,
.bit_or = .pl_node,
.block = .pl_node,
@@ -1348,7 +1308,6 @@ pub const Inst = struct {
.negate = .un_node,
.negate_wrap = .un_node,
.typeof = .un_node,
- .typeof_elem = .un_node,
.typeof_log2_int_type = .un_node,
.log2_int_type = .un_node,
.@"unreachable" = .@"unreachable",
@@ -1367,17 +1326,8 @@ pub const Inst = struct {
.ensure_err_payload_void = .un_tok,
.enum_literal = .str_tok,
.switch_block = .pl_node,
- .switch_block_multi = .pl_node,
- .switch_block_else = .pl_node,
- .switch_block_else_multi = .pl_node,
- .switch_block_under = .pl_node,
- .switch_block_under_multi = .pl_node,
- .switch_block_ref = .pl_node,
- .switch_block_ref_multi = .pl_node,
- .switch_block_ref_else = .pl_node,
- .switch_block_ref_else_multi = .pl_node,
- .switch_block_ref_under = .pl_node,
- .switch_block_ref_under_multi = .pl_node,
+ .switch_cond = .un_node,
+ .switch_cond_ref = .un_node,
.switch_capture = .switch_capture,
.switch_capture_ref = .switch_capture,
.switch_capture_multi = .switch_capture,
@@ -2466,37 +2416,17 @@ pub const Inst = struct {
index: u32,
};
- /// This form is supported when there are no ranges, and exactly 1 item per block.
- /// Depending on zir tag and len fields, extra fields trail
- /// this one in the extra array.
- /// 0. else_body { // If the tag has "_else" or "_under" in it.
+ /// 0. multi_cases_len: u32 // If has_multi_cases is set.
+ /// 1. else_body { // If has_else or has_under is set.
/// body_len: u32,
/// body member Index for every body_len
/// }
- /// 1. cases: {
+ /// 2. scalar_cases: { // for every scalar_cases_len
/// item: Ref,
/// body_len: u32,
/// body member Index for every body_len
- /// } for every cases_len
- pub const SwitchBlock = struct {
- operand: Ref,
- cases_len: u32,
- };
-
- /// This form is required when there exists a block which has more than one item,
- /// or a range.
- /// Depending on zir tag and len fields, extra fields trail
- /// this one in the extra array.
- /// 0. else_body { // If the tag has "_else" or "_under" in it.
- /// body_len: u32,
- /// body member Index for every body_len
/// }
- /// 1. scalar_cases: { // for every scalar_cases_len
- /// item: Ref,
- /// body_len: u32,
- /// body member Index for every body_len
- /// }
- /// 2. multi_cases: { // for every multi_cases_len
+ /// 3. multi_cases: { // for every multi_cases_len
/// items_len: u32,
/// ranges_len: u32,
/// body_len: u32,
@@ -2507,10 +2437,88 @@ pub const Inst = struct {
/// }
/// body member Index for every body_len
/// }
- pub const SwitchBlockMulti = struct {
+ pub const SwitchBlock = struct {
+ /// This is always a `switch_cond` or `switch_cond_ref` instruction.
+ /// If it is a `switch_cond_ref` instruction, bits.is_ref is always true.
+ /// If it is a `switch_cond` instruction, bits.is_ref is always false.
+ /// Both `switch_cond` and `switch_cond_ref` return a value, not a pointer,
+ /// that is useful for the case items, but cannot be used for capture values.
+ /// For the capture values, Sema is expected to find the operand of this operand
+ /// and use that.
operand: Ref,
- scalar_cases_len: u32,
- multi_cases_len: u32,
+ bits: Bits,
+
+ pub const Bits = packed struct {
+ /// If true, one or more prongs have multiple items.
+ has_multi_cases: bool,
+ /// If true, there is an else prong. This is mutually exclusive with `has_under`.
+ has_else: bool,
+ /// If true, there is an underscore prong. This is mutually exclusive with `has_else`.
+ has_under: bool,
+ /// If true, the `operand` is a pointer to the value being switched on.
+ /// TODO this flag is redundant with the tag of operand and can be removed.
+ is_ref: bool,
+ scalar_cases_len: ScalarCasesLen,
+
+ pub const ScalarCasesLen = u28;
+
+ pub fn specialProng(bits: Bits) SpecialProng {
+ const has_else: u2 = @boolToInt(bits.has_else);
+ const has_under: u2 = @boolToInt(bits.has_under);
+ return switch ((has_else << 1) | has_under) {
+ 0b00 => .none,
+ 0b01 => .under,
+ 0b10 => .@"else",
+ 0b11 => unreachable,
+ };
+ }
+ };
+
+ pub const ScalarProng = struct {
+ item: Ref,
+ body: []const Index,
+ };
+
+ /// TODO performance optimization: instead of having this helper method
+ /// change the definition of switch_capture instruction to store extra_index
+ /// instead of prong_index. This way, Sema won't be doing O(N^2) iterations
+ /// over the switch prongs.
+ pub fn getScalarProng(
+ self: SwitchBlock,
+ zir: Zir,
+ extra_end: usize,
+ prong_index: usize,
+ ) ScalarProng {
+ var extra_index: usize = extra_end;
+
+ if (self.bits.has_multi_cases) {
+ extra_index += 1;
+ }
+
+ if (self.bits.specialProng() != .none) {
+ const body_len = zir.extra[extra_index];
+ extra_index += 1;
+ const body = zir.extra[extra_index..][0..body_len];
+ extra_index += body.len;
+ }
+
+ var scalar_i: usize = 0;
+ while (true) : (scalar_i += 1) {
+ const item = @intToEnum(Ref, zir.extra[extra_index]);
+ extra_index += 1;
+ const body_len = zir.extra[extra_index];
+ extra_index += 1;
+ const body = zir.extra[extra_index..][0..body_len];
+ extra_index += body.len;
+
+ if (scalar_i < prong_index) continue;
+
+ return .{
+ .item = item,
+ .body = body,
+ };
+ }
+ }
};
pub const Field = struct {
@@ -2934,7 +2942,7 @@ pub const Inst = struct {
/// Trailing: for each `imports_len` there is an Item
pub const Imports = struct {
- imports_len: Zir.Inst.Index,
+ imports_len: Inst.Index,
pub const Item = struct {
/// null terminated string index
@@ -3077,7 +3085,7 @@ pub fn declIteratorInner(zir: Zir, extra_index: usize, decls_len: u32) DeclItera
/// The iterator would have to allocate memory anyway to iterate. So here we populate
/// an ArrayList as the result.
-pub fn findDecls(zir: Zir, list: *std.ArrayList(Zir.Inst.Index), decl_sub_index: u32) !void {
+pub fn findDecls(zir: Zir, list: *std.ArrayList(Inst.Index), decl_sub_index: u32) !void {
const block_inst = zir.extra[decl_sub_index + 6];
list.clearRetainingCapacity();
@@ -3086,8 +3094,8 @@ pub fn findDecls(zir: Zir, list: *std.ArrayList(Zir.Inst.Index), decl_sub_index:
fn findDeclsInner(
zir: Zir,
- list: *std.ArrayList(Zir.Inst.Index),
- inst: Zir.Inst.Index,
+ list: *std.ArrayList(Inst.Index),
+ inst: Inst.Index,
) Allocator.Error!void {
const tags = zir.instructions.items(.tag);
const datas = zir.instructions.items(.data);
@@ -3148,19 +3156,7 @@ fn findDeclsInner(
try zir.findDeclsBody(list, then_body);
try zir.findDeclsBody(list, else_body);
},
- .switch_block => return findDeclsSwitch(zir, list, inst, .none),
- .switch_block_else => return findDeclsSwitch(zir, list, inst, .@"else"),
- .switch_block_under => return findDeclsSwitch(zir, list, inst, .under),
- .switch_block_ref => return findDeclsSwitch(zir, list, inst, .none),
- .switch_block_ref_else => return findDeclsSwitch(zir, list, inst, .@"else"),
- .switch_block_ref_under => return findDeclsSwitch(zir, list, inst, .under),
-
- .switch_block_multi => return findDeclsSwitchMulti(zir, list, inst, .none),
- .switch_block_else_multi => return findDeclsSwitchMulti(zir, list, inst, .@"else"),
- .switch_block_under_multi => return findDeclsSwitchMulti(zir, list, inst, .under),
- .switch_block_ref_multi => return findDeclsSwitchMulti(zir, list, inst, .none),
- .switch_block_ref_else_multi => return findDeclsSwitchMulti(zir, list, inst, .@"else"),
- .switch_block_ref_under_multi => return findDeclsSwitchMulti(zir, list, inst, .under),
+ .switch_block => return findDeclsSwitch(zir, list, inst),
.suspend_block => @panic("TODO iterate suspend block"),
@@ -3170,71 +3166,34 @@ fn findDeclsInner(
fn findDeclsSwitch(
zir: Zir,
- list: *std.ArrayList(Zir.Inst.Index),
- inst: Zir.Inst.Index,
- special_prong: SpecialProng,
+ list: *std.ArrayList(Inst.Index),
+ inst: Inst.Index,
) Allocator.Error!void {
const inst_data = zir.instructions.items(.data)[inst].pl_node;
const extra = zir.extraData(Inst.SwitchBlock, inst_data.payload_index);
- const special: struct {
- body: []const Inst.Index,
- end: usize,
- } = switch (special_prong) {
- .none => .{ .body = &.{}, .end = extra.end },
- .under, .@"else" => blk: {
- const body_len = zir.extra[extra.end];
- const extra_body_start = extra.end + 1;
- break :blk .{
- .body = zir.extra[extra_body_start..][0..body_len],
- .end = extra_body_start + body_len,
- };
- },
- };
- try zir.findDeclsBody(list, special.body);
+ var extra_index: usize = extra.end;
- var extra_index: usize = special.end;
- var scalar_i: usize = 0;
- while (scalar_i < extra.data.cases_len) : (scalar_i += 1) {
+ const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
+ const multi_cases_len = zir.extra[extra_index];
extra_index += 1;
+ break :blk multi_cases_len;
+ } else 0;
+
+ const special_prong = extra.data.bits.specialProng();
+ if (special_prong != .none) {
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.extra[extra_index..][0..body_len];
- extra_index += body_len;
+ extra_index += body.len;
try zir.findDeclsBody(list, body);
}
-}
-
-fn findDeclsSwitchMulti(
- zir: Zir,
- list: *std.ArrayList(Zir.Inst.Index),
- inst: Zir.Inst.Index,
- special_prong: SpecialProng,
-) Allocator.Error!void {
- const inst_data = zir.instructions.items(.data)[inst].pl_node;
- const extra = zir.extraData(Inst.SwitchBlockMulti, inst_data.payload_index);
- const special: struct {
- body: []const Inst.Index,
- end: usize,
- } = switch (special_prong) {
- .none => .{ .body = &.{}, .end = extra.end },
- .under, .@"else" => blk: {
- const body_len = zir.extra[extra.end];
- const extra_body_start = extra.end + 1;
- break :blk .{
- .body = zir.extra[extra_body_start..][0..body_len],
- .end = extra_body_start + body_len,
- };
- },
- };
- try zir.findDeclsBody(list, special.body);
-
- var extra_index: usize = special.end;
{
+ const scalar_cases_len = extra.data.bits.scalar_cases_len;
var scalar_i: usize = 0;
- while (scalar_i < extra.data.scalar_cases_len) : (scalar_i += 1) {
+ while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
extra_index += 1;
const body_len = zir.extra[extra_index];
extra_index += 1;
@@ -3246,7 +3205,7 @@ fn findDeclsSwitchMulti(
}
{
var multi_i: usize = 0;
- while (multi_i < extra.data.multi_cases_len) : (multi_i += 1) {
+ while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = zir.extra[extra_index];
extra_index += 1;
const ranges_len = zir.extra[extra_index];
@@ -3353,3 +3312,18 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
.total_params_len = total_params_len,
};
}
+
+const ref_start_index: u32 = Inst.Ref.typed_value_map.len;
+
+pub fn indexToRef(inst: Inst.Index) Inst.Ref {
+ return @intToEnum(Inst.Ref, ref_start_index + inst);
+}
+
+pub fn refToIndex(inst: Inst.Ref) ?Inst.Index {
+ const ref_int = @enumToInt(inst);
+ if (ref_int >= ref_start_index) {
+ return ref_int - ref_start_index;
+ } else {
+ return null;
+ }
+}
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 73ada3a9ca..2c6feec70c 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -410,13 +410,15 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.mul => try self.airMul(inst),
.mulwrap => try self.airMulWrap(inst),
.mul_sat => try self.airMulSat(inst),
- .div => try self.airDiv(inst),
.rem => try self.airRem(inst),
.mod => try self.airMod(inst),
.shl, .shl_exact => try self.airShl(inst),
.shl_sat => try self.airShlSat(inst),
.min => try self.airMin(inst),
.max => try self.airMax(inst),
+ .slice => try self.airSlice(inst),
+
+ .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst),
.cmp_lt => try self.airCmp(inst, .lt),
.cmp_lte => try self.airCmp(inst, .lte),
@@ -494,12 +496,14 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.slice_ptr => try self.airSlicePtr(inst),
.slice_len => try self.airSliceLen(inst),
+ .ptr_slice_len_ptr => try self.airPtrSliceLenPtr(inst),
+ .ptr_slice_ptr_ptr => try self.airPtrSlicePtrPtr(inst),
+
.array_elem_val => try self.airArrayElemVal(inst),
.slice_elem_val => try self.airSliceElemVal(inst),
- .ptr_slice_elem_val => try self.airPtrSliceElemVal(inst),
+ .slice_elem_ptr => try self.airSliceElemPtr(inst),
.ptr_elem_val => try self.airPtrElemVal(inst),
.ptr_elem_ptr => try self.airPtrElemPtr(inst),
- .ptr_ptr_elem_val => try self.airPtrPtrElemVal(inst),
.constant => unreachable, // excluded from function bodies
.const_ty => unreachable, // excluded from function bodies
@@ -871,6 +875,13 @@ fn airMax(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
+fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice for {}", .{self.target.cpu.arch});
+ return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+}
+
fn airAdd(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement add for {}", .{self.target.cpu.arch});
@@ -1057,6 +1068,18 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
+fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_slice_len_ptr for {}", .{self.target.cpu.arch});
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+}
+
+fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_slice_ptr_ptr for {}", .{self.target.cpu.arch});
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+}
+
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -1064,16 +1087,16 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement array_elem_val for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice_elem_ptr for {}", .{self.target.cpu.arch});
+ return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
-fn airPtrSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const is_volatile = false; // TODO
+fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_slice_elem_val for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement array_elem_val for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -1091,13 +1114,6 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
-fn airPtrPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const is_volatile = false; // TODO
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_ptr_elem_val for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
_ = bin_op;
diff --git a/src/clang.zig b/src/clang.zig
index 430c9093f2..8a8d794e41 100644
--- a/src/clang.zig
+++ b/src/clang.zig
@@ -536,6 +536,9 @@ pub const FunctionDecl = opaque {
pub const isInlineSpecified = ZigClangFunctionDecl_isInlineSpecified;
extern fn ZigClangFunctionDecl_isInlineSpecified(*const FunctionDecl) bool;
+ pub const hasAlwaysInlineAttr = ZigClangFunctionDecl_hasAlwaysInlineAttr;
+ extern fn ZigClangFunctionDecl_hasAlwaysInlineAttr(*const FunctionDecl) bool;
+
pub const isDefined = ZigClangFunctionDecl_isDefined;
extern fn ZigClangFunctionDecl_isDefined(*const FunctionDecl) bool;
diff --git a/src/codegen.zig b/src/codegen.zig
index 0371f32a8a..5f5ee1b549 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -758,13 +758,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.mul => try self.airMul(inst),
.mulwrap => try self.airMulWrap(inst),
.mul_sat => try self.airMulSat(inst),
- .div => try self.airDiv(inst),
.rem => try self.airRem(inst),
.mod => try self.airMod(inst),
.shl, .shl_exact => try self.airShl(inst),
.shl_sat => try self.airShlSat(inst),
.min => try self.airMin(inst),
.max => try self.airMax(inst),
+ .slice => try self.airSlice(inst),
+
+ .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst),
.cmp_lt => try self.airCmp(inst, .lt),
.cmp_lte => try self.airCmp(inst, .lte),
@@ -842,12 +844,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.slice_ptr => try self.airSlicePtr(inst),
.slice_len => try self.airSliceLen(inst),
+ .ptr_slice_len_ptr => try self.airPtrSliceLenPtr(inst),
+ .ptr_slice_ptr_ptr => try self.airPtrSlicePtrPtr(inst),
+
.array_elem_val => try self.airArrayElemVal(inst),
.slice_elem_val => try self.airSliceElemVal(inst),
- .ptr_slice_elem_val => try self.airPtrSliceElemVal(inst),
+ .slice_elem_ptr => try self.airSliceElemPtr(inst),
.ptr_elem_val => try self.airPtrElemVal(inst),
.ptr_elem_ptr => try self.airPtrElemPtr(inst),
- .ptr_ptr_elem_val => try self.airPtrPtrElemVal(inst),
.constant => unreachable, // excluded from function bodies
.const_ty => unreachable, // excluded from function bodies
@@ -1241,6 +1245,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
+ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement slice for {}", .{self.target.cpu.arch}),
+ };
+ return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ }
+
fn airAdd(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
@@ -1498,6 +1511,22 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
+ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement ptr_slice_len_ptr for {}", .{self.target.cpu.arch}),
+ };
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ }
+
+ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement ptr_slice_ptr_ptr for {}", .{self.target.cpu.arch}),
+ };
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ }
+
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -1507,19 +1536,19 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
- fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
- else => return self.fail("TODO implement array_elem_val for {}", .{self.target.cpu.arch}),
+ else => return self.fail("TODO implement slice_elem_ptr for {}", .{self.target.cpu.arch}),
};
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
- fn airPtrSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const is_volatile = false; // TODO
+ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else switch (arch) {
- else => return self.fail("TODO implement ptr_slice_elem_val for {}", .{self.target.cpu.arch}),
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement array_elem_val for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -1542,15 +1571,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
- fn airPtrPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const is_volatile = false; // TODO
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else switch (arch) {
- else => return self.fail("TODO implement ptr_ptr_elem_val for {}", .{self.target.cpu.arch}),
- };
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
- }
-
fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = switch (arch) {
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index ad98dc87c1..dd71590566 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -976,7 +976,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.mul => try airBinOp (f, inst, " * "),
// TODO use a different strategy for div that communicates to the optimizer
// that wrapping is UB.
- .div => try airBinOp( f, inst, " / "),
+ .div_float, .div_exact, .div_trunc => try airBinOp( f, inst, " / "),
+ .div_floor => try airBinOp( f, inst, " divfloor "),
.rem => try airBinOp( f, inst, " % "),
.mod => try airBinOp( f, inst, " mod "), // TODO implement modulus division
@@ -992,6 +993,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.min => try airMinMax(f, inst, "<"),
.max => try airMinMax(f, inst, ">"),
+ .slice => try airSlice(f, inst),
+
.cmp_eq => try airBinOp(f, inst, " == "),
.cmp_gt => try airBinOp(f, inst, " > "),
.cmp_gte => try airBinOp(f, inst, " >= "),
@@ -1075,11 +1078,13 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.slice_ptr => try airSliceField(f, inst, ".ptr;\n"),
.slice_len => try airSliceField(f, inst, ".len;\n"),
+ .ptr_slice_len_ptr => try airPtrSliceFieldPtr(f, inst, ".len;\n"),
+ .ptr_slice_ptr_ptr => try airPtrSliceFieldPtr(f, inst, ".ptr;\n"),
+
.ptr_elem_val => try airPtrElemVal(f, inst, "["),
- .ptr_ptr_elem_val => try airPtrElemVal(f, inst, "[0]["),
.ptr_elem_ptr => try airPtrElemPtr(f, inst),
.slice_elem_val => try airSliceElemVal(f, inst, "["),
- .ptr_slice_elem_val => try airSliceElemVal(f, inst, "[0]["),
+ .slice_elem_ptr => try airSliceElemPtr(f, inst),
.array_elem_val => try airArrayElemVal(f, inst),
.unwrap_errunion_payload => try airUnwrapErrUnionPay(f, inst),
@@ -1101,8 +1106,7 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
}
fn airSliceField(f: *Function, inst: Air.Inst.Index, suffix: []const u8) !CValue {
- if (f.liveness.isUnused(inst))
- return CValue.none;
+ if (f.liveness.isUnused(inst)) return CValue.none;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
@@ -1114,6 +1118,21 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, suffix: []const u8) !CValue
return local;
}
+fn airPtrSliceFieldPtr(f: *Function, inst: Air.Inst.Index, suffix: []const u8) !CValue {
+ if (f.liveness.isUnused(inst))
+ return CValue.none;
+
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const operand = try f.resolveInst(ty_op.operand);
+ const writer = f.object.writer();
+
+ _ = writer;
+ _ = operand;
+ _ = suffix;
+
+ return f.fail("TODO: C backend: airPtrSliceFieldPtr", .{});
+}
+
fn airPtrElemVal(f: *Function, inst: Air.Inst.Index, prefix: []const u8) !CValue {
const is_volatile = false; // TODO
if (!is_volatile and f.liveness.isUnused(inst))
@@ -1148,6 +1167,24 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index, prefix: []const u8) !CVal
return local;
}
+fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
+ return CValue.none;
+ const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
+
+ const slice = try f.resolveInst(bin_op.lhs);
+ const index = try f.resolveInst(bin_op.rhs);
+ const writer = f.object.writer();
+ const local = try f.allocLocal(f.air.typeOfIndex(inst), .Const);
+ try writer.writeAll(" = &");
+ try f.writeCValue(writer, slice);
+ try writer.writeByte('[');
+ try f.writeCValue(writer, index);
+ try writer.writeAll("];\n");
+ return local;
+}
+
fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
if (f.liveness.isUnused(inst)) return CValue.none;
@@ -1623,6 +1660,27 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: [*:0]const u8) !CValu
return local;
}
+fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst)) return CValue.none;
+
+ const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
+ const ptr = try f.resolveInst(bin_op.lhs);
+ const len = try f.resolveInst(bin_op.rhs);
+
+ const writer = f.object.writer();
+ const inst_ty = f.air.typeOfIndex(inst);
+ const local = try f.allocLocal(inst_ty, .Const);
+
+ try writer.writeAll(" = {");
+ try f.writeCValue(writer, ptr);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, len);
+ try writer.writeAll("};\n");
+
+ return local;
+}
+
fn airCall(f: *Function, inst: Air.Inst.Index) !CValue {
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const extra = f.air.extraData(Air.Call, pl_op.payload);
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 681b3c36db..80625928cb 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -527,19 +527,11 @@ pub const Object = struct {
if (self.llvm_module.getNamedGlobalAlias(exp_name_z.ptr, exp_name_z.len)) |alias| {
alias.setAliasee(llvm_global);
} else {
- const alias = self.llvm_module.addAlias(llvm_global.typeOf(), llvm_global, exp_name_z);
- switch (exp.options.linkage) {
- .Internal => alias.setLinkage(.Internal),
- .Strong => alias.setLinkage(.External),
- .Weak => {
- if (is_extern) {
- alias.setLinkage(.ExternalWeak);
- } else {
- alias.setLinkage(.WeakODR);
- }
- },
- .LinkOnce => alias.setLinkage(.LinkOnceODR),
- }
+ _ = self.llvm_module.addAlias(
+ llvm_global.typeOf(),
+ llvm_global,
+ exp_name_z,
+ );
}
}
} else {
@@ -589,7 +581,9 @@ pub const DeclGen = struct {
} else if (decl.val.castTag(.extern_fn)) |extern_fn| {
_ = try self.resolveLlvmFunction(extern_fn.data);
} else {
+ const target = self.module.getTarget();
const global = try self.resolveGlobalDecl(decl);
+ global.setAlignment(decl.getAlignment(target));
assert(decl.has_tv);
const init_val = if (decl.val.castTag(.variable)) |payload| init_val: {
const variable = payload.data;
@@ -1090,6 +1084,37 @@ pub const DeclGen = struct {
const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(), .False);
return llvm_int.constIntToPtr(try self.llvmType(tv.ty));
},
+ .field_ptr => {
+ const field_ptr = tv.val.castTag(.field_ptr).?.data;
+ const parent_ptr = try self.lowerParentPtr(field_ptr.container_ptr);
+ const llvm_u32 = self.context.intType(32);
+ const indices: [2]*const llvm.Value = .{
+ llvm_u32.constInt(0, .False),
+ llvm_u32.constInt(field_ptr.field_index, .False),
+ };
+ return parent_ptr.constInBoundsGEP(&indices, indices.len);
+ },
+ .elem_ptr => {
+ const elem_ptr = tv.val.castTag(.elem_ptr).?.data;
+ const parent_ptr = try self.lowerParentPtr(elem_ptr.array_ptr);
+ const llvm_usize = try self.llvmType(Type.usize);
+ if (parent_ptr.typeOf().getElementType().getTypeKind() == .Array) {
+ const indices: [2]*const llvm.Value = .{
+ llvm_usize.constInt(0, .False),
+ llvm_usize.constInt(elem_ptr.index, .False),
+ };
+ return parent_ptr.constInBoundsGEP(&indices, indices.len);
+ } else {
+ const indices: [1]*const llvm.Value = .{
+ llvm_usize.constInt(elem_ptr.index, .False),
+ };
+ return parent_ptr.constInBoundsGEP(&indices, indices.len);
+ }
+ },
+ .null_value, .zero => {
+ const llvm_type = try self.llvmType(tv.ty);
+ return llvm_type.constNull();
+ },
else => |tag| return self.todo("implement const of pointer type '{}' ({})", .{ tv.ty, tag }),
},
.Array => switch (tv.val.tag()) {
@@ -1254,6 +1279,10 @@ pub const DeclGen = struct {
}
const field_ty = tv.ty.unionFieldType(tag_and_val.tag);
const payload = p: {
+ if (!field_ty.hasCodeGenBits()) {
+ const padding_len = @intCast(c_uint, layout.payload_size);
+ break :p self.context.intType(8).arrayType(padding_len).getUndef();
+ }
const field = try genTypedValue(self, .{ .ty = field_ty, .val = tag_and_val.val });
const field_size = field_ty.abiSize(target);
if (field_size == layout.payload_size) {
@@ -1284,6 +1313,66 @@ pub const DeclGen = struct {
}
return llvm_union_ty.constNamedStruct(&fields, fields.len);
},
+ .Vector => switch (tv.val.tag()) {
+ .bytes => {
+ // Note, sentinel is not stored even if the type has a sentinel.
+ const bytes = tv.val.castTag(.bytes).?.data;
+ const vector_len = tv.ty.arrayLen();
+ assert(vector_len == bytes.len or vector_len + 1 == bytes.len);
+
+ const elem_ty = tv.ty.elemType();
+ const llvm_elems = try self.gpa.alloc(*const llvm.Value, vector_len);
+ defer self.gpa.free(llvm_elems);
+ for (llvm_elems) |*elem, i| {
+ var byte_payload: Value.Payload.U64 = .{
+ .base = .{ .tag = .int_u64 },
+ .data = bytes[i],
+ };
+
+ elem.* = try self.genTypedValue(.{
+ .ty = elem_ty,
+ .val = Value.initPayload(&byte_payload.base),
+ });
+ }
+ return llvm.constVector(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ },
+ .array => {
+ // Note, sentinel is not stored even if the type has a sentinel.
+ // The value includes the sentinel in those cases.
+ const elem_vals = tv.val.castTag(.array).?.data;
+ const vector_len = tv.ty.arrayLen();
+ assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len);
+ const elem_ty = tv.ty.elemType();
+ const llvm_elems = try self.gpa.alloc(*const llvm.Value, vector_len);
+ defer self.gpa.free(llvm_elems);
+ for (llvm_elems) |*elem, i| {
+ elem.* = try self.genTypedValue(.{ .ty = elem_ty, .val = elem_vals[i] });
+ }
+ return llvm.constVector(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ },
+ .repeated => {
+ // Note, sentinel is not stored even if the type has a sentinel.
+ const val = tv.val.castTag(.repeated).?.data;
+ const elem_ty = tv.ty.elemType();
+ const len = tv.ty.arrayLen();
+ const llvm_elems = try self.gpa.alloc(*const llvm.Value, len);
+ defer self.gpa.free(llvm_elems);
+ for (llvm_elems) |*elem| {
+ elem.* = try self.genTypedValue(.{ .ty = elem_ty, .val = val });
+ }
+ return llvm.constVector(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ },
+ else => unreachable,
+ },
.ComptimeInt => unreachable,
.ComptimeFloat => unreachable,
@@ -1298,11 +1387,73 @@ pub const DeclGen = struct {
.Frame,
.AnyFrame,
- .Vector,
=> return self.todo("implement const of type '{}'", .{tv.ty}),
}
}
+ const ParentPtr = struct {
+ ty: Type,
+ llvm_ptr: *const llvm.Value,
+ };
+
+ fn lowerParentPtrDecl(
+ dg: *DeclGen,
+ ptr_val: Value,
+ decl: *Module.Decl,
+ ) Error!ParentPtr {
+ decl.alive = true;
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = decl.ty,
+ };
+ const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const llvm_ptr = try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl);
+ return ParentPtr{
+ .llvm_ptr = llvm_ptr,
+ .ty = decl.ty,
+ };
+ }
+
+ fn lowerParentPtr(dg: *DeclGen, ptr_val: Value) Error!*const llvm.Value {
+ switch (ptr_val.tag()) {
+ .decl_ref_mut => {
+ const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl;
+ return (try dg.lowerParentPtrDecl(ptr_val, decl)).llvm_ptr;
+ },
+ .decl_ref => {
+ const decl = ptr_val.castTag(.decl_ref).?.data;
+ return (try dg.lowerParentPtrDecl(ptr_val, decl)).llvm_ptr;
+ },
+ .variable => {
+ const decl = ptr_val.castTag(.variable).?.data.owner_decl;
+ return (try dg.lowerParentPtrDecl(ptr_val, decl)).llvm_ptr;
+ },
+ .field_ptr => {
+ const field_ptr = ptr_val.castTag(.field_ptr).?.data;
+ const parent_ptr = try dg.lowerParentPtr(field_ptr.container_ptr);
+ const llvm_u32 = dg.context.intType(32);
+ const indices: [2]*const llvm.Value = .{
+ llvm_u32.constInt(0, .False),
+ llvm_u32.constInt(field_ptr.field_index, .False),
+ };
+ return parent_ptr.constInBoundsGEP(&indices, indices.len);
+ },
+ .elem_ptr => {
+ const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
+ const parent_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr);
+ const llvm_usize = try dg.llvmType(Type.usize);
+ const indices: [2]*const llvm.Value = .{
+ llvm_usize.constInt(0, .False),
+ llvm_usize.constInt(elem_ptr.index, .False),
+ };
+ return parent_ptr.constInBoundsGEP(&indices, indices.len);
+ },
+ .opt_payload_ptr => return dg.todo("implement lowerParentPtr for optional payload", .{}),
+ .eu_payload_ptr => return dg.todo("implement lowerParentPtr for error union payload", .{}),
+ else => unreachable,
+ }
+ }
+
fn lowerDeclRefValue(
self: *DeclGen,
tv: TypedValue,
@@ -1328,12 +1479,13 @@ pub const DeclGen = struct {
return self.context.constStruct(&fields, fields.len, .False);
}
- decl.alive = true;
const llvm_type = try self.llvmType(tv.ty);
if (!tv.ty.childType().hasCodeGenBits()) {
return self.lowerPtrToVoid(tv.ty);
}
+ decl.alive = true;
+
const llvm_val = if (decl.ty.zigTypeTag() == .Fn)
try self.resolveLlvmFunction(decl)
else
@@ -1523,6 +1675,7 @@ pub const FuncGen = struct {
global.setInitializer(llvm_val);
global.setLinkage(.Private);
global.setGlobalConstant(.True);
+ global.setUnnamedAddr(.True);
global.setAlignment(ty.abiAlignment(target));
gop.value_ptr.* = global;
return global;
@@ -1542,7 +1695,10 @@ pub const FuncGen = struct {
.mul => try self.airMul(inst),
.mulwrap => try self.airMulWrap(inst),
.mul_sat => try self.airMulSat(inst),
- .div => try self.airDiv(inst),
+ .div_float => try self.airDivFloat(inst),
+ .div_trunc => try self.airDivTrunc(inst),
+ .div_floor => try self.airDivFloor(inst),
+ .div_exact => try self.airDivExact(inst),
.rem => try self.airRem(inst),
.mod => try self.airMod(inst),
.ptr_add => try self.airPtrAdd(inst),
@@ -1552,6 +1708,7 @@ pub const FuncGen = struct {
.shl_exact => try self.airShlExact(inst),
.min => try self.airMin(inst),
.max => try self.airMax(inst),
+ .slice => try self.airSlice(inst),
.bit_and, .bool_and => try self.airAnd(inst),
.bit_or, .bool_or => try self.airOr(inst),
@@ -1600,6 +1757,10 @@ pub const FuncGen = struct {
.assembly => try self.airAssembly(inst),
.slice_ptr => try self.airSliceField(inst, 0),
.slice_len => try self.airSliceField(inst, 1),
+
+ .ptr_slice_ptr_ptr => try self.airPtrSliceFieldPtr(inst, 0),
+ .ptr_slice_len_ptr => try self.airPtrSliceFieldPtr(inst, 1),
+
.array_to_slice => try self.airArrayToSlice(inst),
.float_to_int => try self.airFloatToInt(inst),
.int_to_float => try self.airIntToFloat(inst),
@@ -1630,13 +1791,12 @@ pub const FuncGen = struct {
.array_elem_val => try self.airArrayElemVal(inst),
.slice_elem_val => try self.airSliceElemVal(inst),
- .ptr_slice_elem_val => try self.airPtrSliceElemVal(inst),
+ .slice_elem_ptr => try self.airSliceElemPtr(inst),
.ptr_elem_val => try self.airPtrElemVal(inst),
.ptr_elem_ptr => try self.airPtrElemPtr(inst),
- .ptr_ptr_elem_val => try self.airPtrPtrElemVal(inst),
- .optional_payload => try self.airOptionalPayload(inst, false),
- .optional_payload_ptr => try self.airOptionalPayload(inst, true),
+ .optional_payload => try self.airOptionalPayload(inst),
+ .optional_payload_ptr => try self.airOptionalPayloadPtr(inst),
.unwrap_errunion_payload => try self.airErrUnionPayload(inst, false),
.unwrap_errunion_payload_ptr => try self.airErrUnionPayload(inst, true),
@@ -1829,14 +1989,16 @@ pub const FuncGen = struct {
const raw_llvm_ty = try self.dg.llvmType(inst_ty);
- // If the zig tag type is a function, this represents an actual function body; not
- // a pointer to it. LLVM IR allows the call instruction to use function bodies instead
- // of function pointers, however the phi makes it a runtime value and therefore
- // the LLVM type has to be wrapped in a pointer.
- const llvm_ty = if (inst_ty.zigTypeTag() == .Fn)
- raw_llvm_ty.pointerType(0)
- else
- raw_llvm_ty;
+ const llvm_ty = ty: {
+ // If the zig tag type is a function, this represents an actual function body; not
+ // a pointer to it. LLVM IR allows the call instruction to use function bodies instead
+ // of function pointers, however the phi makes it a runtime value and therefore
+ // the LLVM type has to be wrapped in a pointer.
+ if (inst_ty.zigTypeTag() == .Fn or isByRef(inst_ty)) {
+ break :ty raw_llvm_ty.pointerType(0);
+ }
+ break :ty raw_llvm_ty;
+ };
const phi_node = self.builder.buildPhi(llvm_ty, "");
phi_node.addIncoming(
@@ -1874,23 +2036,55 @@ pub const FuncGen = struct {
const then_block = self.context.appendBasicBlock(self.llvm_func, "Then");
const else_block = self.context.appendBasicBlock(self.llvm_func, "Else");
- {
- const prev_block = self.builder.getInsertBlock();
- defer self.builder.positionBuilderAtEnd(prev_block);
+ _ = self.builder.buildCondBr(cond, then_block, else_block);
- self.builder.positionBuilderAtEnd(then_block);
- try self.genBody(then_body);
+ self.builder.positionBuilderAtEnd(then_block);
+ try self.genBody(then_body);
- self.builder.positionBuilderAtEnd(else_block);
- try self.genBody(else_body);
- }
- _ = self.builder.buildCondBr(cond, then_block, else_block);
+ self.builder.positionBuilderAtEnd(else_block);
+ try self.genBody(else_body);
+
+ // No need to reset the insert cursor since this instruction is noreturn.
return null;
}
fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- _ = inst;
- return self.todo("implement llvm codegen for switch_br", .{});
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const cond = try self.resolveInst(pl_op.operand);
+ const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
+ const else_block = self.context.appendBasicBlock(self.llvm_func, "Else");
+ const llvm_switch = self.builder.buildSwitch(cond, else_block, switch_br.data.cases_len);
+
+ var extra_index: usize = switch_br.end;
+ var case_i: u32 = 0;
+
+ while (case_i < switch_br.data.cases_len) : (case_i += 1) {
+ const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
+ const items = @bitCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
+ const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
+ extra_index = case.end + case.data.items_len + case_body.len;
+
+ const case_block = self.context.appendBasicBlock(self.llvm_func, "Case");
+
+ for (items) |item| {
+ const llvm_item = try self.resolveInst(item);
+ llvm_switch.addCase(llvm_item, case_block);
+ }
+
+ self.builder.positionBuilderAtEnd(case_block);
+ try self.genBody(case_body);
+ }
+
+ self.builder.positionBuilderAtEnd(else_block);
+ const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len];
+ if (else_body.len != 0) {
+ try self.genBody(else_body);
+ } else {
+ _ = self.builder.buildUnreachable();
+ }
+
+ // No need to reset the insert cursor since this instruction is noreturn.
+ return null;
}
fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -1972,14 +2166,22 @@ pub const FuncGen = struct {
}
fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
return self.builder.buildExtractValue(operand, index, "");
}
+ fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const slice_ptr = try self.resolveInst(ty_op.operand);
+
+ return self.builder.buildStructGEP(slice_ptr, index, "");
+ }
+
fn airSliceElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const slice_ty = self.air.typeOf(bin_op.lhs);
@@ -1987,28 +2189,18 @@ pub const FuncGen = struct {
const slice = try self.resolveInst(bin_op.lhs);
const index = try self.resolveInst(bin_op.rhs);
- const base_ptr = self.builder.buildExtractValue(slice, 0, "");
- const indices: [1]*const llvm.Value = .{index};
- const ptr = self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ const ptr = self.sliceElemPtr(slice, index);
return self.load(ptr, slice_ty);
}
- fn airPtrSliceElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const slice_ty = self.air.typeOf(bin_op.lhs).childType();
- if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null;
-
- const lhs = try self.resolveInst(bin_op.lhs);
- const rhs = try self.resolveInst(bin_op.rhs);
-
- const base_ptr = ptr: {
- const ptr_field_ptr = self.builder.buildStructGEP(lhs, 0, "");
- break :ptr self.builder.buildLoad(ptr_field_ptr, "");
- };
+ fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const indices: [1]*const llvm.Value = .{rhs};
- const ptr = self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
- return self.load(ptr, slice_ty);
+ const slice = try self.resolveInst(bin_op.lhs);
+ const index = try self.resolveInst(bin_op.rhs);
+ return self.sliceElemPtr(slice, index);
}
fn airArrayElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -2068,19 +2260,6 @@ pub const FuncGen = struct {
}
}
- fn airPtrPtrElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const ptr_ty = self.air.typeOf(bin_op.lhs).childType();
- if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null;
-
- const lhs = try self.resolveInst(bin_op.lhs);
- const rhs = try self.resolveInst(bin_op.rhs);
- const base_ptr = self.builder.buildLoad(lhs, "");
- const indices: [1]*const llvm.Value = .{rhs};
- const ptr = self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
- return self.load(ptr, ptr_ty);
- }
-
fn airStructFieldPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -2112,17 +2291,34 @@ pub const FuncGen = struct {
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
const struct_ty = self.air.typeOf(struct_field.struct_operand);
const struct_llvm_val = try self.resolveInst(struct_field.struct_operand);
- const field_index = llvmFieldIndex(struct_ty, struct_field.field_index);
- if (isByRef(struct_ty)) {
- const field_ptr = self.builder.buildStructGEP(struct_llvm_val, field_index, "");
- const field_ty = struct_ty.structFieldType(struct_field.field_index);
- if (isByRef(field_ty)) {
- return field_ptr;
- } else {
- return self.builder.buildLoad(field_ptr, "");
- }
+ const field_index = struct_field.field_index;
+ const field_ty = struct_ty.structFieldType(field_index);
+ if (!field_ty.hasCodeGenBits()) {
+ return null;
+ }
+
+ assert(isByRef(struct_ty));
+
+ const field_ptr = switch (struct_ty.zigTypeTag()) {
+ .Struct => blk: {
+ const llvm_field_index = llvmFieldIndex(struct_ty, field_index);
+ break :blk self.builder.buildStructGEP(struct_llvm_val, llvm_field_index, "");
+ },
+ .Union => blk: {
+ const llvm_field_ty = try self.dg.llvmType(field_ty);
+ const target = self.dg.module.getTarget();
+ const layout = struct_ty.unionGetLayout(target);
+ const payload_index = @boolToInt(layout.tag_align >= layout.payload_align);
+ const union_field_ptr = self.builder.buildStructGEP(struct_llvm_val, payload_index, "");
+ break :blk self.builder.buildBitCast(union_field_ptr, llvm_field_ty.pointerType(0), "");
+ },
+ else => unreachable,
+ };
+
+ if (isByRef(field_ty)) {
+ return field_ptr;
} else {
- return self.builder.buildExtractValue(struct_llvm_val, field_index, "");
+ return self.builder.buildLoad(field_ptr, "");
}
}
@@ -2154,17 +2350,19 @@ pub const FuncGen = struct {
const air_asm = self.air.extraData(Air.Asm, ty_pl.payload);
const zir = self.dg.decl.getFileScope().zir;
const extended = zir.instructions.items(.data)[air_asm.data.zir_index].extended;
- const zir_extra = zir.extraData(Zir.Inst.Asm, extended.operand);
- const asm_source = zir.nullTerminatedString(zir_extra.data.asm_source);
- const outputs_len = @truncate(u5, extended.small);
- const args_len = @truncate(u5, extended.small >> 5);
- const clobbers_len = @truncate(u5, extended.small >> 10);
const is_volatile = @truncate(u1, extended.small >> 15) != 0;
- const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[air_asm.end..][0..outputs_len]);
- const args = @bitCast([]const Air.Inst.Ref, self.air.extra[air_asm.end + outputs.len ..][0..args_len]);
+ if (!is_volatile and self.liveness.isUnused(inst)) {
+ return null;
+ }
+ const outputs_len = @truncate(u5, extended.small);
if (outputs_len > 1) {
return self.todo("implement llvm codegen for asm with more than 1 output", .{});
}
+ const args_len = @truncate(u5, extended.small >> 5);
+ const clobbers_len = @truncate(u5, extended.small >> 10);
+ const zir_extra = zir.extraData(Zir.Inst.Asm, extended.operand);
+ const asm_source = zir.nullTerminatedString(zir_extra.data.asm_source);
+ const args = @bitCast([]const Air.Inst.Ref, self.air.extra[air_asm.end..][0..args_len]);
var extra_i: usize = zir_extra.end;
const output_constraint: ?[]const u8 = out: {
@@ -2177,10 +2375,6 @@ pub const FuncGen = struct {
break :out null;
};
- if (!is_volatile and self.liveness.isUnused(inst)) {
- return null;
- }
-
var llvm_constraints: std.ArrayListUnmanaged(u8) = .{};
defer llvm_constraints.deinit(self.gpa);
@@ -2188,7 +2382,7 @@ pub const FuncGen = struct {
defer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
- const llvm_params_len = args.len + @boolToInt(output_constraint != null);
+ const llvm_params_len = args.len;
const llvm_param_types = try arena.alloc(*const llvm.Type, llvm_params_len);
const llvm_param_values = try arena.alloc(*const llvm.Value, llvm_params_len);
@@ -2200,7 +2394,8 @@ pub const FuncGen = struct {
if (total_i != 0) {
llvm_constraints.appendAssumeCapacity(',');
}
- llvm_constraints.appendSliceAssumeCapacity(constraint);
+ llvm_constraints.appendAssumeCapacity('=');
+ llvm_constraints.appendSliceAssumeCapacity(constraint[1..]);
total_i += 1;
}
@@ -2326,8 +2521,7 @@ pub const FuncGen = struct {
op: llvm.IntPredicate,
operand_is_ptr: bool,
) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
@@ -2341,7 +2535,7 @@ pub const FuncGen = struct {
return self.builder.buildICmp(op, loaded, zero, "");
}
- if (operand_is_ptr) {
+ if (operand_is_ptr or isByRef(err_union_ty)) {
const err_field_ptr = self.builder.buildStructGEP(operand, 0, "");
const loaded = self.builder.buildLoad(err_field_ptr, "");
return self.builder.buildICmp(op, loaded, zero, "");
@@ -2351,35 +2545,55 @@ pub const FuncGen = struct {
return self.builder.buildICmp(op, loaded, zero, "");
}
- fn airOptionalPayload(
- self: *FuncGen,
- inst: Air.Inst.Index,
- operand_is_ptr: bool,
- ) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
+ const optional_ty = self.air.typeOf(ty_op.operand).childType();
+ var buf: Type.Payload.ElemType = undefined;
+ const payload_ty = optional_ty.optionalChild(&buf);
+ if (!payload_ty.hasCodeGenBits()) {
+ // We have a pointer to a zero-bit value and we need to return
+ // a pointer to a zero-bit value.
+ return operand;
+ }
+ if (optional_ty.isPtrLikeOptional()) {
+ // The payload and the optional are the same value.
+ return operand;
+ }
+ const index_type = self.context.intType(32);
+ const indices: [2]*const llvm.Value = .{
+ index_type.constNull(), // dereference the pointer
+ index_type.constNull(), // first field is the payload
+ };
+ return self.builder.buildInBoundsGEP(operand, &indices, indices.len, "");
+ }
- if (operand_is_ptr) {
- const operand_ty = self.air.typeOf(ty_op.operand).elemType();
- if (operand_ty.isPtrLikeOptional()) {
- return self.builder.buildLoad(operand, "");
- }
+ fn airOptionalPayload(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
- const index_type = self.context.intType(32);
- var indices: [2]*const llvm.Value = .{
- index_type.constNull(), index_type.constNull(),
- };
- return self.builder.buildInBoundsGEP(operand, &indices, 2, "");
- }
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = try self.resolveInst(ty_op.operand);
+ const optional_ty = self.air.typeOf(ty_op.operand);
+ const payload_ty = self.air.typeOfIndex(inst);
+ if (!payload_ty.hasCodeGenBits()) return null;
- const operand_ty = self.air.typeOf(ty_op.operand);
- if (operand_ty.isPtrLikeOptional()) {
+ if (optional_ty.isPtrLikeOptional()) {
+ // Payload value is the same as the optional value.
return operand;
}
+ if (isByRef(payload_ty)) {
+ // We have a pointer and we need to return a pointer to the first field.
+ const index_type = self.context.intType(32);
+ const indices: [2]*const llvm.Value = .{
+ index_type.constNull(), // dereference the pointer
+ index_type.constNull(), // first field is the payload
+ };
+ return self.builder.buildInBoundsGEP(operand, &indices, indices.len, "");
+ }
+
return self.builder.buildExtractValue(operand, 0, "");
}
@@ -2388,22 +2602,16 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
operand_is_ptr: bool,
) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const err_union_ty = self.air.typeOf(ty_op.operand);
const payload_ty = err_union_ty.errorUnionPayload();
-
- if (!payload_ty.hasCodeGenBits()) {
- return null;
- }
-
- if (operand_is_ptr) {
+ if (!payload_ty.hasCodeGenBits()) return null;
+ if (operand_is_ptr or isByRef(payload_ty)) {
return self.builder.buildStructGEP(operand, 1, "");
}
-
return self.builder.buildExtractValue(operand, 1, "");
}
@@ -2425,7 +2633,7 @@ pub const FuncGen = struct {
return self.builder.buildLoad(operand, "");
}
- if (operand_is_ptr) {
+ if (operand_is_ptr or isByRef(payload_ty)) {
const err_field_ptr = self.builder.buildStructGEP(operand, 0, "");
return self.builder.buildLoad(err_field_ptr, "");
}
@@ -2437,9 +2645,9 @@ pub const FuncGen = struct {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const operand_ty = self.air.typeOf(ty_op.operand);
+ const payload_ty = self.air.typeOf(ty_op.operand);
const non_null_bit = self.context.intType(1).constAllOnes();
- if (!operand_ty.hasCodeGenBits()) return non_null_bit;
+ if (!payload_ty.hasCodeGenBits()) return non_null_bit;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.air.typeOfIndex(inst);
if (optional_ty.isPtrLikeOptional()) return operand;
@@ -2447,8 +2655,6 @@ pub const FuncGen = struct {
if (isByRef(optional_ty)) {
const optional_ptr = self.buildAlloca(llvm_optional_ty);
const payload_ptr = self.builder.buildStructGEP(optional_ptr, 0, "");
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = operand_ty.optionalChild(&buf);
var ptr_ty_payload: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
.data = payload_ty,
@@ -2494,10 +2700,35 @@ pub const FuncGen = struct {
}
fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const err_un_ty = self.air.typeOfIndex(inst);
+ const payload_ty = err_un_ty.errorUnionPayload();
+ const operand = try self.resolveInst(ty_op.operand);
+ if (!payload_ty.hasCodeGenBits()) {
+ return operand;
+ }
+ const err_un_llvm_ty = try self.dg.llvmType(err_un_ty);
+ if (isByRef(err_un_ty)) {
+ const result_ptr = self.buildAlloca(err_un_llvm_ty);
+ const err_ptr = self.builder.buildStructGEP(result_ptr, 0, "");
+ _ = self.builder.buildStore(operand, err_ptr);
+ const payload_ptr = self.builder.buildStructGEP(result_ptr, 1, "");
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = payload_ty,
+ };
+ const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ // TODO store undef to payload_ptr
+ _ = payload_ptr;
+ _ = payload_ptr_ty;
+ return result_ptr;
+ }
- return self.todo("implement llvm codegen for 'airWrapErrUnionErr'", .{});
+ const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), operand, 0, "");
+ // TODO set payload bytes to undef
+ return partial;
}
fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -2526,6 +2757,20 @@ pub const FuncGen = struct {
return self.builder.buildUMax(lhs, rhs, "");
}
+ fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const ptr = try self.resolveInst(bin_op.lhs);
+ const len = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
+ const llvm_slice_ty = try self.dg.llvmType(inst_ty);
+
+ const partial = self.builder.buildInsertValue(llvm_slice_ty.getUndef(), ptr, 0, "");
+ return self.builder.buildInsertValue(partial, len, 1, "");
+ }
+
fn airAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
@@ -2635,20 +2880,76 @@ pub const FuncGen = struct {
return self.builder.buildUMulFixSat(lhs, rhs, "");
}
- fn airDiv(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+
+ return self.builder.buildFDiv(lhs, rhs, "");
+ }
+
+ fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- if (inst_ty.isRuntimeFloat()) return self.builder.buildFDiv(lhs, rhs, "");
+ if (inst_ty.isRuntimeFloat()) {
+ const result = self.builder.buildFDiv(lhs, rhs, "");
+ return self.callTrunc(result, inst_ty);
+ }
if (inst_ty.isSignedInt()) return self.builder.buildSDiv(lhs, rhs, "");
return self.builder.buildUDiv(lhs, rhs, "");
}
+ fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
+
+ if (inst_ty.isRuntimeFloat()) {
+ const result = self.builder.buildFDiv(lhs, rhs, "");
+ return try self.callFloor(result, inst_ty);
+ }
+ if (inst_ty.isSignedInt()) {
+ // const d = @divTrunc(a, b);
+ // const r = @rem(a, b);
+ // return if (r == 0) d else d - ((a < 0) ^ (b < 0));
+ const result_llvm_ty = try self.dg.llvmType(inst_ty);
+ const zero = result_llvm_ty.constNull();
+ const div_trunc = self.builder.buildSDiv(lhs, rhs, "");
+ const rem = self.builder.buildSRem(lhs, rhs, "");
+ const rem_eq_0 = self.builder.buildICmp(.EQ, rem, zero, "");
+ const a_lt_0 = self.builder.buildICmp(.SLT, lhs, zero, "");
+ const b_lt_0 = self.builder.buildICmp(.SLT, rhs, zero, "");
+ const a_b_xor = self.builder.buildXor(a_lt_0, b_lt_0, "");
+ const a_b_xor_ext = self.builder.buildZExt(a_b_xor, div_trunc.typeOf(), "");
+ const d_sub_xor = self.builder.buildSub(div_trunc, a_b_xor_ext, "");
+ return self.builder.buildSelect(rem_eq_0, div_trunc, d_sub_xor, "");
+ }
+ return self.builder.buildUDiv(lhs, rhs, "");
+ }
+
+ fn airDivExact(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
+
+ if (inst_ty.isRuntimeFloat()) return self.builder.buildFDiv(lhs, rhs, "");
+ if (inst_ty.isSignedInt()) return self.builder.buildExactSDiv(lhs, rhs, "");
+ return self.builder.buildExactUDiv(lhs, rhs, "");
+ }
+
fn airRem(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
@@ -2691,26 +2992,42 @@ pub const FuncGen = struct {
}
fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const base_ptr = try self.resolveInst(bin_op.lhs);
const offset = try self.resolveInst(bin_op.rhs);
- const indices: [1]*const llvm.Value = .{offset};
- return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ const ptr_ty = self.air.typeOf(bin_op.lhs);
+ if (ptr_ty.ptrSize() == .One) {
+ // It's a pointer to an array, so according to LLVM we need an extra GEP index.
+ const indices: [2]*const llvm.Value = .{
+ self.context.intType(32).constNull(), offset,
+ };
+ return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ } else {
+ const indices: [1]*const llvm.Value = .{offset};
+ return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ }
}
fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const base_ptr = try self.resolveInst(bin_op.lhs);
const offset = try self.resolveInst(bin_op.rhs);
const negative_offset = self.builder.buildNeg(offset, "");
- const indices: [1]*const llvm.Value = .{negative_offset};
- return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ const ptr_ty = self.air.typeOf(bin_op.lhs);
+ if (ptr_ty.ptrSize() == .One) {
+ // It's a pointer to an array, so according to LLVM we need an extra GEP index.
+ const indices: [2]*const llvm.Value = .{
+ self.context.intType(32).constNull(), negative_offset,
+ };
+ return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ } else {
+ const indices: [1]*const llvm.Value = .{negative_offset};
+ return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ }
}
fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -2883,8 +3200,9 @@ pub const FuncGen = struct {
const inst_ty = self.air.typeOfIndex(inst);
const llvm_dest_ty = try self.dg.llvmType(inst_ty);
- // TODO look into pulling this logic out into a different AIR instruction than bitcast
- if (operand_ty.zigTypeTag() == .Vector and inst_ty.zigTypeTag() == .Array) {
+ if (operand_ty.zigTypeTag() == .Int and inst_ty.zigTypeTag() == .Pointer) {
+ return self.builder.buildIntToPtr(operand, llvm_dest_ty, "");
+ } else if (operand_ty.zigTypeTag() == .Vector and inst_ty.zigTypeTag() == .Array) {
const target = self.dg.module.getTarget();
const elem_ty = operand_ty.childType();
if (!isByRef(inst_ty)) {
@@ -2914,6 +3232,43 @@ pub const FuncGen = struct {
}
}
return array_ptr;
+ } else if (operand_ty.zigTypeTag() == .Array and inst_ty.zigTypeTag() == .Vector) {
+ const target = self.dg.module.getTarget();
+ const elem_ty = operand_ty.childType();
+ const llvm_vector_ty = try self.dg.llvmType(inst_ty);
+ if (!isByRef(operand_ty)) {
+ return self.dg.todo("implement bitcast non-ref array to vector", .{});
+ }
+
+ const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8;
+ if (bitcast_ok) {
+ const llvm_vector_ptr_ty = llvm_vector_ty.pointerType(0);
+ const casted_ptr = self.builder.buildBitCast(operand, llvm_vector_ptr_ty, "");
+ const vector = self.builder.buildLoad(casted_ptr, "");
+ // The array is aligned to the element's alignment, while the vector might have a completely
+ // different alignment. This means we need to enforce the alignment of this load.
+ vector.setAlignment(elem_ty.abiAlignment(target));
+ return vector;
+ } else {
+ // If the ABI size of the element type is not evenly divisible by size in bits;
+ // a simple bitcast will not work, and we fall back to extractelement.
+ const llvm_usize = try self.dg.llvmType(Type.usize);
+ const llvm_u32 = self.context.intType(32);
+ const zero = llvm_usize.constNull();
+ const vector_len = operand_ty.arrayLen();
+ var vector = llvm_vector_ty.getUndef();
+ var i: u64 = 0;
+ while (i < vector_len) : (i += 1) {
+ const index_usize = llvm_usize.constInt(i, .False);
+ const index_u32 = llvm_u32.constInt(i, .False);
+ const indexes: [2]*const llvm.Value = .{ zero, index_usize };
+ const elem_ptr = self.builder.buildInBoundsGEP(operand, &indexes, indexes.len, "");
+ const elem = self.builder.buildLoad(elem_ptr, "");
+ vector = self.builder.buildInsertElement(vector, elem, index_u32, "");
+ }
+
+ return vector;
+ }
}
return self.builder.buildBitCast(operand, llvm_dest_ty, "");
@@ -3298,6 +3653,37 @@ pub const FuncGen = struct {
}
}
+ fn callFloor(self: *FuncGen, arg: *const llvm.Value, ty: Type) !*const llvm.Value {
+ return self.callFloatUnary(arg, ty, "floor");
+ }
+
+ fn callCeil(self: *FuncGen, arg: *const llvm.Value, ty: Type) !*const llvm.Value {
+ return self.callFloatUnary(arg, ty, "ceil");
+ }
+
+ fn callTrunc(self: *FuncGen, arg: *const llvm.Value, ty: Type) !*const llvm.Value {
+ return self.callFloatUnary(arg, ty, "trunc");
+ }
+
+ fn callFloatUnary(self: *FuncGen, arg: *const llvm.Value, ty: Type, name: []const u8) !*const llvm.Value {
+ const target = self.dg.module.getTarget();
+
+ var fn_name_buf: [100]u8 = undefined;
+ const llvm_fn_name = std.fmt.bufPrintZ(&fn_name_buf, "llvm.{s}.f{d}", .{
+ name, ty.floatBits(target),
+ }) catch unreachable;
+
+ const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
+ const operand_llvm_ty = try self.dg.llvmType(ty);
+ const param_types = [_]*const llvm.Type{operand_llvm_ty};
+ const fn_type = llvm.functionType(operand_llvm_ty, &param_types, param_types.len, .False);
+ break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
+ };
+
+ const args: [1]*const llvm.Value = .{arg};
+ return self.builder.buildCall(llvm_fn, &args, args.len, .C, .Auto, "");
+ }
+
fn fieldPtr(
self: *FuncGen,
inst: Air.Inst.Index,
@@ -3336,6 +3722,16 @@ pub const FuncGen = struct {
return self.builder.buildBitCast(union_field_ptr, result_llvm_ty, "");
}
+ fn sliceElemPtr(
+ self: *FuncGen,
+ slice: *const llvm.Value,
+ index: *const llvm.Value,
+ ) *const llvm.Value {
+ const base_ptr = self.builder.buildExtractValue(slice, 0, "");
+ const indices: [1]*const llvm.Value = .{index};
+ return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ }
+
fn getIntrinsic(self: *FuncGen, name: []const u8) *const llvm.Value {
const id = llvm.lookupIntrinsicID(name.ptr, name.len);
assert(id != 0);
diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig
index ab4cf97350..43aca87532 100644
--- a/src/codegen/llvm/bindings.zig
+++ b/src/codegen/llvm/bindings.zig
@@ -181,6 +181,9 @@ pub const Value = opaque {
pub const setInitializer = LLVMSetInitializer;
extern fn LLVMSetInitializer(GlobalVar: *const Value, ConstantVal: *const Value) void;
+
+ pub const addCase = LLVMAddCase;
+ extern fn LLVMAddCase(Switch: *const Value, OnVal: *const Value, Dest: *const BasicBlock) void;
};
pub const Type = opaque {
@@ -234,6 +237,9 @@ pub const Type = opaque {
pub const getTypeKind = LLVMGetTypeKind;
extern fn LLVMGetTypeKind(Ty: *const Type) TypeKind;
+
+ pub const getElementType = LLVMGetElementType;
+ extern fn LLVMGetElementType(Ty: *const Type) *const Type;
};
pub const Module = opaque {
@@ -316,6 +322,12 @@ pub const VerifierFailureAction = enum(c_int) {
pub const constNeg = LLVMConstNeg;
extern fn LLVMConstNeg(ConstantVal: *const Value) *const Value;
+pub const constVector = LLVMConstVector;
+extern fn LLVMConstVector(
+ ScalarConstantVals: [*]*const Value,
+ Size: c_uint,
+) *const Value;
+
pub const getEnumAttributeKindForName = LLVMGetEnumAttributeKindForName;
extern fn LLVMGetEnumAttributeKindForName(Name: [*]const u8, SLen: usize) c_uint;
@@ -551,6 +563,9 @@ pub const Builder = opaque {
pub const buildCondBr = LLVMBuildCondBr;
extern fn LLVMBuildCondBr(*const Builder, If: *const Value, Then: *const BasicBlock, Else: *const BasicBlock) *const Value;
+ pub const buildSwitch = LLVMBuildSwitch;
+ extern fn LLVMBuildSwitch(*const Builder, V: *const Value, Else: *const BasicBlock, NumCases: c_uint) *const Value;
+
pub const buildPhi = LLVMBuildPhi;
extern fn LLVMBuildPhi(*const Builder, Ty: *const Type, Name: [*:0]const u8) *const Value;
@@ -570,6 +585,15 @@ pub const Builder = opaque {
Name: [*:0]const u8,
) *const Value;
+ pub const buildInsertElement = LLVMBuildInsertElement;
+ extern fn LLVMBuildInsertElement(
+ *const Builder,
+ VecVal: *const Value,
+ EltVal: *const Value,
+ Index: *const Value,
+ Name: [*:0]const u8,
+ ) *const Value;
+
pub const buildPtrToInt = LLVMBuildPtrToInt;
extern fn LLVMBuildPtrToInt(
*const Builder,
@@ -735,6 +759,12 @@ pub const Builder = opaque {
pub const buildSMin = ZigLLVMBuildSMin;
extern fn ZigLLVMBuildSMin(builder: *const Builder, LHS: *const Value, RHS: *const Value, name: [*:0]const u8) *const Value;
+
+ pub const buildExactUDiv = LLVMBuildExactUDiv;
+ extern fn LLVMBuildExactUDiv(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildExactSDiv = LLVMBuildExactSDiv;
+ extern fn LLVMBuildExactSDiv(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
};
pub const IntPredicate = enum(c_uint) {
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index 25a1d228e0..da2fa66fee 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -669,7 +669,6 @@ pub const DeclGen = struct {
.add, .addwrap => try self.airArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}),
.sub, .subwrap => try self.airArithOp(inst, .{.OpFSub, .OpISub, .OpISub}),
.mul, .mulwrap => try self.airArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}),
- .div => try self.airArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}),
.bit_and => try self.airBinOpSimple(inst, .OpBitwiseAnd),
.bit_or => try self.airBinOpSimple(inst, .OpBitwiseOr),
diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig
index 6902553257..75e6a1d78e 100644
--- a/src/codegen/wasm.zig
+++ b/src/codegen/wasm.zig
@@ -822,7 +822,7 @@ pub const Context = struct {
.subwrap => self.airWrapBinOp(inst, .sub),
.mul => self.airBinOp(inst, .mul),
.mulwrap => self.airWrapBinOp(inst, .mul),
- .div => self.airBinOp(inst, .div),
+ .div_trunc => self.airBinOp(inst, .div),
.bit_and => self.airBinOp(inst, .@"and"),
.bit_or => self.airBinOp(inst, .@"or"),
.bool_and => self.airBinOp(inst, .@"and"),
@@ -866,6 +866,7 @@ pub const Context = struct {
.struct_field_ptr_index_1 => self.airStructFieldPtrIndex(inst, 1),
.struct_field_ptr_index_2 => self.airStructFieldPtrIndex(inst, 2),
.struct_field_ptr_index_3 => self.airStructFieldPtrIndex(inst, 3),
+ .struct_field_val => self.airStructFieldVal(inst),
.switch_br => self.airSwitchBr(inst),
.unreach => self.airUnreachable(inst),
.wrap_optional => self.airWrapOptional(inst),
@@ -1456,6 +1457,15 @@ pub const Context = struct {
return WValue{ .local = struct_ptr.multi_value.index + index };
}
+ fn airStructFieldVal(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ if (self.liveness.isUnused(inst)) return WValue.none;
+
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
+ const struct_multivalue = self.resolveInst(extra.struct_operand).multi_value;
+ return WValue{ .local = struct_multivalue.index + extra.field_index };
+ }
+
fn airSwitchBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
// result type is always 'noreturn'
const blocktype = wasm.block_empty;
diff --git a/src/config.zig.in b/src/config.zig.in
index 62e8785ccb..f193fddb20 100644
--- a/src/config.zig.in
+++ b/src/config.zig.in
@@ -6,6 +6,7 @@ pub const llvm_has_arc = false;
pub const version: [:0]const u8 = "@ZIG_VERSION@";
pub const semver = @import("std").SemanticVersion.parse(version) catch unreachable;
pub const enable_logging: bool = @ZIG_ENABLE_LOGGING_BOOL@;
+pub const enable_link_snapshots: bool = false;
pub const enable_tracy = false;
pub const is_stage1 = true;
pub const skip_non_native = false;
diff --git a/src/link.zig b/src/link.zig
index 675c218d68..a1df48f759 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -127,6 +127,9 @@ pub const Options = struct {
/// WASI-only. Type of WASI execution model ("command" or "reactor").
wasi_exec_model: std.builtin.WasiExecModel = undefined,
+ /// (Zig compiler development) Enable dumping of linker's state as JSON.
+ enable_link_snapshots: bool = false,
+
pub fn effectiveOutputMode(options: Options) std.builtin.OutputMode {
return if (options.use_lld) .Obj else options.output_mode;
}
@@ -193,12 +196,16 @@ pub const File = struct {
/// rewriting it. A malicious file is detected as incremental link failure
/// and does not cause Illegal Behavior. This operation is not atomic.
pub fn openPath(allocator: *Allocator, options: Options) !*File {
+ if (options.object_format == .macho) {
+ return &(try MachO.openPath(allocator, options)).base;
+ }
+
const use_stage1 = build_options.is_stage1 and options.use_stage1;
if (use_stage1 or options.emit == null) {
return switch (options.object_format) {
.coff => &(try Coff.createEmpty(allocator, options)).base,
.elf => &(try Elf.createEmpty(allocator, options)).base,
- .macho => &(try MachO.createEmpty(allocator, options)).base,
+ .macho => unreachable,
.wasm => &(try Wasm.createEmpty(allocator, options)).base,
.plan9 => return &(try Plan9.createEmpty(allocator, options)).base,
.c => unreachable, // Reported error earlier.
@@ -216,7 +223,7 @@ pub const File = struct {
return switch (options.object_format) {
.coff => &(try Coff.createEmpty(allocator, options)).base,
.elf => &(try Elf.createEmpty(allocator, options)).base,
- .macho => &(try MachO.createEmpty(allocator, options)).base,
+ .macho => unreachable,
.plan9 => &(try Plan9.createEmpty(allocator, options)).base,
.wasm => &(try Wasm.createEmpty(allocator, options)).base,
.c => unreachable, // Reported error earlier.
@@ -236,7 +243,7 @@ pub const File = struct {
const file: *File = switch (options.object_format) {
.coff => &(try Coff.openPath(allocator, sub_path, options)).base,
.elf => &(try Elf.openPath(allocator, sub_path, options)).base,
- .macho => &(try MachO.openPath(allocator, sub_path, options)).base,
+ .macho => unreachable,
.plan9 => &(try Plan9.openPath(allocator, sub_path, options)).base,
.wasm => &(try Wasm.openPath(allocator, sub_path, options)).base,
.c => &(try C.openPath(allocator, sub_path, options)).base,
@@ -577,7 +584,11 @@ pub const File = struct {
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
}
- try base.flushModule(comp);
+ if (base.options.object_format == .macho) {
+ try base.cast(MachO).?.flushObject(comp);
+ } else {
+ try base.flushModule(comp);
+ }
const obj_basename = base.intermediary_basename.?;
const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
diff --git a/src/link/C/zig.h b/src/link/C/zig.h
index 72868e4400..32612fcc41 100644
--- a/src/link/C/zig.h
+++ b/src/link/C/zig.h
@@ -419,7 +419,7 @@ zig_mul_sat_u(u32, uint32_t, uint64_t)
zig_mul_sat_s(i32, int32_t, int64_t)
zig_mul_sat_u(u64, uint64_t, uint128_t)
zig_mul_sat_s(i64, int64_t, int128_t)
-zig_mul_sat_s(isize, intptr_t, int128_t)
+zig_mul_sat_s(isize, intptr_t, int128_t)
zig_mul_sat_s(short, short, int)
zig_mul_sat_s(int, int, long)
zig_mul_sat_s(long, long, long long)
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index fd009ca9f8..a371efaa8a 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -1263,6 +1263,10 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
if (self.base.options.link_libcpp) {
try argv.append(comp.libcxxabi_static_lib.?.full_object_path);
try argv.append(comp.libcxx_static_lib.?.full_object_path);
+ }
+
+ // libunwind dep
+ if (self.base.options.link_libunwind) {
try argv.append(comp.libunwind_static_lib.?.full_object_path);
}
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 0d8487ad79..2490ec9124 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -275,18 +275,15 @@ pub const SrcFn = struct {
};
};
-pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*MachO {
+pub fn openPath(allocator: *Allocator, options: link.Options) !*MachO {
assert(options.object_format == .macho);
- if (build_options.have_llvm and options.use_llvm) {
- const self = try createEmpty(allocator, options);
- errdefer self.base.destroy();
-
- self.llvm_object = try LlvmObject.create(allocator, sub_path, options);
- return self;
+ const use_stage1 = build_options.is_stage1 and options.use_stage1;
+ if (use_stage1 or options.emit == null) {
+ return createEmpty(allocator, options);
}
-
- const file = try options.emit.?.directory.handle.createFile(sub_path, .{
+ const emit = options.emit.?;
+ const file = try emit.directory.handle.createFile(emit.sub_path, .{
.truncate = false,
.read = true,
.mode = link.determineMode(options),
@@ -301,7 +298,20 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
self.base.file = file;
- if (options.output_mode == .Lib and options.link_mode == .Static) {
+ if (build_options.have_llvm and options.use_llvm and options.module != null) {
+ // TODO this intermediary_basename isn't enough; in the case of `zig build-exe`,
+ // we also want to put the intermediary object file in the cache while the
+ // main emit directory is the cwd.
+ const sub_path = try std.fmt.allocPrint(allocator, "{s}{s}", .{
+ emit.sub_path, options.object_format.fileExt(options.target.cpu.arch),
+ });
+ self.llvm_object = try LlvmObject.create(allocator, sub_path, options);
+ self.base.intermediary_basename = sub_path;
+ }
+
+ if (options.output_mode == .Lib and
+ options.link_mode == .Static and self.base.intermediary_basename != null)
+ {
return self;
}
@@ -384,16 +394,22 @@ pub fn flush(self: *MachO, comp: *Compilation) !void {
return error.TODOImplementWritingStaticLibFiles;
}
}
+ try self.flushModule(comp);
+}
+pub fn flushModule(self: *MachO, comp: *Compilation) !void {
const tracy = trace(@src());
defer tracy.end();
+ const use_stage1 = build_options.is_stage1 and self.base.options.use_stage1;
+ if (!use_stage1 and self.base.options.output_mode == .Obj)
+ return self.flushObject(comp);
+
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
- const use_stage1 = build_options.is_stage1 and self.base.options.use_stage1;
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
@@ -410,7 +426,7 @@ pub fn flush(self: *MachO, comp: *Compilation) !void {
}
const obj_basename = self.base.intermediary_basename orelse break :blk null;
- try self.flushModule(comp);
+ try self.flushObject(comp);
const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
} else null;
@@ -534,15 +550,16 @@ pub fn flush(self: *MachO, comp: *Compilation) !void {
.read = true,
.mode = link.determineMode(self.base.options),
});
- try self.populateMissingMetadata();
+ // Index 0 is always a null symbol.
try self.locals.append(self.base.allocator, .{
.n_strx = 0,
- .n_type = macho.N_UNDF,
+ .n_type = 0,
.n_sect = 0,
.n_desc = 0,
.n_value = 0,
});
try self.strtab.append(self.base.allocator, 0);
+ try self.populateMissingMetadata();
}
if (needs_full_relink) {
@@ -887,7 +904,45 @@ pub fn flush(self: *MachO, comp: *Compilation) !void {
sect.offset = 0;
}
- try self.flushModule(comp);
+ try self.setEntryPoint();
+ try self.updateSectionOrdinals();
+ try self.writeLinkeditSegment();
+
+ if (self.d_sym) |*ds| {
+ // Flush debug symbols bundle.
+ try ds.flushModule(self.base.allocator, self.base.options);
+ }
+
+ if (self.requires_adhoc_codesig) {
+ // Preallocate space for the code signature.
+ // We need to do this at this stage so that we have the load commands with proper values
+ // written out to the file.
+ // The most important here is to have the correct vm and filesize of the __LINKEDIT segment
+ // where the code signature goes into.
+ try self.writeCodeSignaturePadding();
+ }
+
+ try self.writeLoadCommands();
+ try self.writeHeader();
+
+ if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
+ log.debug("flushing. no_entry_point_found = true", .{});
+ self.error_flags.no_entry_point_found = true;
+ } else {
+ log.debug("flushing. no_entry_point_found = false", .{});
+ self.error_flags.no_entry_point_found = false;
+ }
+
+ assert(!self.load_commands_dirty);
+
+ if (self.requires_adhoc_codesig) {
+ try self.writeCodeSignature(); // code signing always comes last
+ }
+
+ if (build_options.enable_link_snapshots) {
+ if (self.base.options.enable_link_snapshots)
+ try self.snapshotState();
+ }
}
cache: {
@@ -909,46 +964,14 @@ pub fn flush(self: *MachO, comp: *Compilation) !void {
self.cold_start = false;
}
-pub fn flushModule(self: *MachO, comp: *Compilation) !void {
- _ = comp;
-
+pub fn flushObject(self: *MachO, comp: *Compilation) !void {
const tracy = trace(@src());
defer tracy.end();
- try self.setEntryPoint();
- try self.updateSectionOrdinals();
- try self.writeLinkeditSegment();
-
- if (self.d_sym) |*ds| {
- // Flush debug symbols bundle.
- try ds.flushModule(self.base.allocator, self.base.options);
- }
+ if (build_options.have_llvm)
+ if (self.llvm_object) |llvm_object| return llvm_object.flushModule(comp);
- if (self.requires_adhoc_codesig) {
- // Preallocate space for the code signature.
- // We need to do this at this stage so that we have the load commands with proper values
- // written out to the file.
- // The most important here is to have the correct vm and filesize of the __LINKEDIT segment
- // where the code signature goes into.
- try self.writeCodeSignaturePadding();
- }
-
- try self.writeLoadCommands();
- try self.writeHeader();
-
- if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
- log.debug("flushing. no_entry_point_found = true", .{});
- self.error_flags.no_entry_point_found = true;
- } else {
- log.debug("flushing. no_entry_point_found = false", .{});
- self.error_flags.no_entry_point_found = false;
- }
-
- assert(!self.load_commands_dirty);
-
- if (self.requires_adhoc_codesig) {
- try self.writeCodeSignature(); // code signing always comes last
- }
+ return error.TODOImplementWritingObjFiles;
}
fn resolveSearchDir(
@@ -2288,7 +2311,7 @@ fn createDsoHandleAtom(self: *MachO) !void {
nlist.n_desc = macho.N_WEAK_DEF;
try self.globals.append(self.base.allocator, nlist);
- _ = self.unresolved.fetchSwapRemove(resolv.where_index);
+ assert(self.unresolved.swapRemove(resolv.where_index));
undef.* = .{
.n_strx = 0,
@@ -2386,7 +2409,7 @@ fn resolveSymbolsInObject(self: *MachO, object_id: u16) !void {
const global = &self.globals.items[resolv.where_index];
if (symbolIsTentative(global.*)) {
- _ = self.tentatives.fetchSwapRemove(resolv.where_index);
+ assert(self.tentatives.swapRemove(resolv.where_index));
} else if (!(symbolIsWeakDef(sym) or symbolIsPext(sym)) and
!(symbolIsWeakDef(global.*) or symbolIsPext(global.*)))
{
@@ -2406,7 +2429,15 @@ fn resolveSymbolsInObject(self: *MachO, object_id: u16) !void {
continue;
},
.undef => {
- _ = self.unresolved.fetchSwapRemove(resolv.where_index);
+ const undef = &self.undefs.items[resolv.where_index];
+ undef.* = .{
+ .n_strx = 0,
+ .n_type = macho.N_UNDF,
+ .n_sect = 0,
+ .n_desc = 0,
+ .n_value = 0,
+ };
+ assert(self.unresolved.swapRemove(resolv.where_index));
},
}
@@ -2465,6 +2496,8 @@ fn resolveSymbolsInObject(self: *MachO, object_id: u16) !void {
.n_value = sym.n_value,
});
_ = try self.tentatives.getOrPut(self.base.allocator, global_sym_index);
+ assert(self.unresolved.swapRemove(resolv.where_index));
+
resolv.* = .{
.where = .global,
.where_index = global_sym_index,
@@ -2477,7 +2510,6 @@ fn resolveSymbolsInObject(self: *MachO, object_id: u16) !void {
.n_desc = 0,
.n_value = 0,
};
- _ = self.unresolved.fetchSwapRemove(resolv.where_index);
},
}
} else {
@@ -3035,6 +3067,7 @@ fn growAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64, match
}
pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {
+ if (self.llvm_object) |_| return;
if (decl.link.macho.local_sym_index != 0) return;
try self.locals.ensureUnusedCapacity(self.base.allocator, 1);
@@ -3380,7 +3413,7 @@ pub fn updateDeclExports(
const sym = &self.globals.items[resolv.where_index];
if (symbolIsTentative(sym.*)) {
- _ = self.tentatives.fetchSwapRemove(resolv.where_index);
+ assert(self.tentatives.swapRemove(resolv.where_index));
} else if (!is_weak and !(symbolIsWeakDef(sym.*) or symbolIsPext(sym.*))) {
_ = try module.failed_exports.put(
module.gpa,
@@ -3406,7 +3439,7 @@ pub fn updateDeclExports(
continue;
},
.undef => {
- _ = self.unresolved.fetchSwapRemove(resolv.where_index);
+ assert(self.unresolved.swapRemove(resolv.where_index));
_ = self.symbol_resolver.remove(n_strx);
},
}
@@ -3458,6 +3491,7 @@ pub fn updateDeclExports(
}
pub fn deleteExport(self: *MachO, exp: Export) void {
+ if (self.llvm_object) |_| return;
const sym_index = exp.sym_index orelse return;
self.globals_free_list.append(self.base.allocator, sym_index) catch {};
const global = &self.globals.items[sym_index];
@@ -4806,9 +4840,17 @@ fn writeSymbolTable(self: *MachO) !void {
}
}
+ var undefs = std.ArrayList(macho.nlist_64).init(self.base.allocator);
+ defer undefs.deinit();
+
+ for (self.undefs.items) |sym| {
+ if (sym.n_strx == 0) continue;
+ try undefs.append(sym);
+ }
+
const nlocals = locals.items.len;
const nexports = self.globals.items.len;
- const nundefs = self.undefs.items.len;
+ const nundefs = undefs.items.len;
const locals_off = symtab.symoff;
const locals_size = nlocals * @sizeOf(macho.nlist_64);
@@ -4823,7 +4865,7 @@ fn writeSymbolTable(self: *MachO) !void {
const undefs_off = exports_off + exports_size;
const undefs_size = nundefs * @sizeOf(macho.nlist_64);
log.debug("writing undefined symbols from 0x{x} to 0x{x}", .{ undefs_off, undefs_size + undefs_off });
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.undefs.items), undefs_off);
+ try self.base.file.?.pwriteAll(mem.sliceAsBytes(undefs.items), undefs_off);
symtab.nsyms = @intCast(u32, nlocals + nexports + nundefs);
seg.inner.filesize += locals_size + exports_size + undefs_size;
@@ -5168,3 +5210,274 @@ pub fn findFirst(comptime T: type, haystack: []T, start: usize, predicate: anyty
}
return i;
}
+
+fn snapshotState(self: *MachO) !void {
+ const emit = self.base.options.emit orelse {
+ log.debug("no emit directory found; skipping snapshot...", .{});
+ return;
+ };
+
+ const Snapshot = struct {
+ const Node = struct {
+ const Tag = enum {
+ section_start,
+ section_end,
+ atom_start,
+ atom_end,
+ relocation,
+
+ pub fn jsonStringify(
+ tag: Tag,
+ options: std.json.StringifyOptions,
+ out_stream: anytype,
+ ) !void {
+ _ = options;
+ switch (tag) {
+ .section_start => try out_stream.writeAll("\"section_start\""),
+ .section_end => try out_stream.writeAll("\"section_end\""),
+ .atom_start => try out_stream.writeAll("\"atom_start\""),
+ .atom_end => try out_stream.writeAll("\"atom_end\""),
+ .relocation => try out_stream.writeAll("\"relocation\""),
+ }
+ }
+ };
+ const Payload = struct {
+ name: []const u8 = "",
+ aliases: [][]const u8 = &[0][]const u8{},
+ is_global: bool = false,
+ target: u64 = 0,
+ };
+ address: u64,
+ tag: Tag,
+ payload: Payload,
+ };
+ timestamp: i128,
+ nodes: []Node,
+ };
+
+ var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
+ defer arena_allocator.deinit();
+ const arena = &arena_allocator.allocator;
+
+ const out_file = try emit.directory.handle.createFile("snapshots.json", .{
+ .truncate = self.cold_start,
+ .read = true,
+ });
+ defer out_file.close();
+
+ if (out_file.seekFromEnd(-1)) {
+ try out_file.writer().writeByte(',');
+ } else |err| switch (err) {
+ error.Unseekable => try out_file.writer().writeByte('['),
+ else => |e| return e,
+ }
+ var writer = out_file.writer();
+
+ var snapshot = Snapshot{
+ .timestamp = std.time.nanoTimestamp(),
+ .nodes = undefined,
+ };
+ var nodes = std.ArrayList(Snapshot.Node).init(arena);
+
+ for (self.section_ordinals.keys()) |key| {
+ const seg = self.load_commands.items[key.seg].Segment;
+ const sect = seg.sections.items[key.sect];
+ const sect_name = try std.fmt.allocPrint(arena, "{s},{s}", .{
+ commands.segmentName(sect),
+ commands.sectionName(sect),
+ });
+ try nodes.append(.{
+ .address = sect.addr,
+ .tag = .section_start,
+ .payload = .{ .name = sect_name },
+ });
+
+ var atom: *Atom = self.atoms.get(key) orelse {
+ try nodes.append(.{
+ .address = sect.addr + sect.size,
+ .tag = .section_end,
+ .payload = .{},
+ });
+ continue;
+ };
+
+ while (atom.prev) |prev| {
+ atom = prev;
+ }
+
+ while (true) {
+ const atom_sym = self.locals.items[atom.local_sym_index];
+ var node = Snapshot.Node{
+ .address = atom_sym.n_value,
+ .tag = .atom_start,
+ .payload = .{
+ .name = self.getString(atom_sym.n_strx),
+ .is_global = self.symbol_resolver.contains(atom_sym.n_strx),
+ },
+ };
+
+ var aliases = std.ArrayList([]const u8).init(arena);
+ for (atom.aliases.items) |loc| {
+ try aliases.append(self.getString(self.locals.items[loc].n_strx));
+ }
+ node.payload.aliases = aliases.toOwnedSlice();
+ try nodes.append(node);
+
+ var relocs = std.ArrayList(Snapshot.Node).init(arena);
+ try relocs.ensureTotalCapacity(atom.relocs.items.len);
+ for (atom.relocs.items) |rel| {
+ const arch = self.base.options.target.cpu.arch;
+ const source_addr = blk: {
+ const sym = self.locals.items[atom.local_sym_index];
+ break :blk sym.n_value + rel.offset;
+ };
+ const target_addr = blk: {
+ const is_via_got = got: {
+ switch (arch) {
+ .aarch64 => break :got switch (@intToEnum(macho.reloc_type_arm64, rel.@"type")) {
+ .ARM64_RELOC_GOT_LOAD_PAGE21, .ARM64_RELOC_GOT_LOAD_PAGEOFF12 => true,
+ else => false,
+ },
+ .x86_64 => break :got switch (@intToEnum(macho.reloc_type_x86_64, rel.@"type")) {
+ .X86_64_RELOC_GOT, .X86_64_RELOC_GOT_LOAD => true,
+ else => false,
+ },
+ else => unreachable,
+ }
+ };
+
+ if (is_via_got) {
+ const got_atom = self.got_entries_map.get(rel.target).?;
+ break :blk self.locals.items[got_atom.local_sym_index].n_value;
+ }
+
+ switch (rel.target) {
+ .local => |sym_index| {
+ const sym = self.locals.items[sym_index];
+ const is_tlv = is_tlv: {
+ const source_sym = self.locals.items[atom.local_sym_index];
+ const match = self.section_ordinals.keys()[source_sym.n_sect - 1];
+ const match_seg = self.load_commands.items[match.seg].Segment;
+ const match_sect = match_seg.sections.items[match.sect];
+ break :is_tlv commands.sectionType(match_sect) == macho.S_THREAD_LOCAL_VARIABLES;
+ };
+ if (is_tlv) {
+ const match_seg = self.load_commands.items[self.data_segment_cmd_index.?].Segment;
+ const base_address = inner: {
+ if (self.tlv_data_section_index) |i| {
+ break :inner match_seg.sections.items[i].addr;
+ } else if (self.tlv_bss_section_index) |i| {
+ break :inner match_seg.sections.items[i].addr;
+ } else unreachable;
+ };
+ break :blk sym.n_value - base_address;
+ }
+ break :blk sym.n_value;
+ },
+ .global => |n_strx| {
+ const resolv = self.symbol_resolver.get(n_strx).?;
+ switch (resolv.where) {
+ .global => break :blk self.globals.items[resolv.where_index].n_value,
+ .undef => {
+ break :blk if (self.stubs_map.get(n_strx)) |stub_atom|
+ self.locals.items[stub_atom.local_sym_index].n_value
+ else
+ 0;
+ },
+ }
+ },
+ }
+ };
+
+ relocs.appendAssumeCapacity(.{
+ .address = source_addr,
+ .tag = .relocation,
+ .payload = .{ .target = target_addr },
+ });
+ }
+
+ if (atom.contained.items.len == 0) {
+ try nodes.appendSlice(relocs.items);
+ } else {
+ // Need to reverse iteration order of relocs since by default for relocatable sources
+ // they come in reverse. For linking, this doesn't matter in any way, however, for
+ // arranging the memoryline for displaying it does.
+ std.mem.reverse(Snapshot.Node, relocs.items);
+
+ var next_i: usize = 0;
+ var last_rel: usize = 0;
+ while (next_i < atom.contained.items.len) : (next_i += 1) {
+ const loc = atom.contained.items[next_i];
+ const cont_sym = self.locals.items[loc.local_sym_index];
+ const cont_sym_name = self.getString(cont_sym.n_strx);
+ var contained_node = Snapshot.Node{
+ .address = cont_sym.n_value,
+ .tag = .atom_start,
+ .payload = .{
+ .name = cont_sym_name,
+ .is_global = self.symbol_resolver.contains(cont_sym.n_strx),
+ },
+ };
+
+ // Accumulate aliases
+ var inner_aliases = std.ArrayList([]const u8).init(arena);
+ while (true) {
+ if (next_i + 1 >= atom.contained.items.len) break;
+ const next_sym = self.locals.items[atom.contained.items[next_i + 1].local_sym_index];
+ if (next_sym.n_value != cont_sym.n_value) break;
+ const next_sym_name = self.getString(next_sym.n_strx);
+ if (self.symbol_resolver.contains(next_sym.n_strx)) {
+ try inner_aliases.append(contained_node.payload.name);
+ contained_node.payload.name = next_sym_name;
+ contained_node.payload.is_global = true;
+ } else try inner_aliases.append(next_sym_name);
+ next_i += 1;
+ }
+
+ const cont_size = if (next_i + 1 < atom.contained.items.len)
+ self.locals.items[atom.contained.items[next_i + 1].local_sym_index].n_value - cont_sym.n_value
+ else
+ atom_sym.n_value + atom.size - cont_sym.n_value;
+
+ contained_node.payload.aliases = inner_aliases.toOwnedSlice();
+ try nodes.append(contained_node);
+
+ for (relocs.items[last_rel..]) |rel, rel_i| {
+ if (rel.address >= cont_sym.n_value + cont_size) {
+ last_rel = rel_i;
+ break;
+ }
+ try nodes.append(rel);
+ }
+
+ try nodes.append(.{
+ .address = cont_sym.n_value + cont_size,
+ .tag = .atom_end,
+ .payload = .{},
+ });
+ }
+ }
+
+ try nodes.append(.{
+ .address = atom_sym.n_value + atom.size,
+ .tag = .atom_end,
+ .payload = .{},
+ });
+
+ if (atom.next) |next| {
+ atom = next;
+ } else break;
+ }
+
+ try nodes.append(.{
+ .address = sect.addr + sect.size,
+ .tag = .section_end,
+ .payload = .{},
+ });
+ }
+
+ snapshot.nodes = nodes.toOwnedSlice();
+
+ try std.json.stringify(snapshot, .{}, writer);
+ try writer.writeByte(']');
+}
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index c32d1f1d8f..07a20ac336 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -345,15 +345,9 @@ pub fn parseRelocs(self: *Atom, relocs: []macho.relocation_info, context: RelocC
const seg = context.object.load_commands.items[context.object.segment_cmd_index.?].Segment;
const sect = seg.sections.items[sect_id];
const match = (try context.macho_file.getMatchingSection(sect)) orelse unreachable;
- const sym_name = try std.fmt.allocPrint(context.allocator, "{s}_{s}_{s}", .{
- context.object.name,
- commands.segmentName(sect),
- commands.sectionName(sect),
- });
- defer context.allocator.free(sym_name);
const local_sym_index = @intCast(u32, context.macho_file.locals.items.len);
try context.macho_file.locals.append(context.allocator, .{
- .n_strx = try context.macho_file.makeString(sym_name),
+ .n_strx = 0,
.n_type = macho.N_SECT,
.n_sect = @intCast(u8, context.macho_file.section_ordinals.getIndex(match).? + 1),
.n_desc = 0,
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index de747eb4c7..f0a299182c 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -174,7 +174,13 @@ pub fn free(self: *Object, allocator: *Allocator, macho_file: *MachO) void {
if (atom.local_sym_index != 0) {
macho_file.locals_free_list.append(allocator, atom.local_sym_index) catch {};
const local = &macho_file.locals.items[atom.local_sym_index];
- local.n_type = 0;
+ local.* = .{
+ .n_strx = 0,
+ .n_type = 0,
+ .n_sect = 0,
+ .n_desc = 0,
+ .n_value = 0,
+ };
atom.local_sym_index = 0;
}
if (atom == last_atom) {
@@ -458,15 +464,9 @@ pub fn parseIntoAtoms(self: *Object, allocator: *Allocator, macho_file: *MachO)
// a temp one, unless we already did that when working out the relocations
// of other atoms.
const atom_local_sym_index = self.sections_as_symbols.get(sect_id) orelse blk: {
- const sym_name = try std.fmt.allocPrint(allocator, "{s}_{s}_{s}", .{
- self.name,
- segmentName(sect),
- sectionName(sect),
- });
- defer allocator.free(sym_name);
const atom_local_sym_index = @intCast(u32, macho_file.locals.items.len);
try macho_file.locals.append(allocator, .{
- .n_strx = try macho_file.makeString(sym_name),
+ .n_strx = 0,
.n_type = macho.N_SECT,
.n_sect = @intCast(u8, macho_file.section_ordinals.getIndex(match).? + 1),
.n_desc = 0,
diff --git a/src/main.zig b/src/main.zig
index d978df565d..a2dd8d1d96 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -27,7 +27,7 @@ const crash_report = @import("crash_report.zig");
pub usingnamespace crash_report.root_decls;
pub fn fatal(comptime format: []const u8, args: anytype) noreturn {
- std.log.emerg(format, args);
+ std.log.err(format, args);
process.exit(1);
}
@@ -94,7 +94,7 @@ const usage = if (debug_extensions_enabled) debug_usage else normal_usage;
pub const log_level: std.log.Level = switch (builtin.mode) {
.Debug => .debug,
.ReleaseSafe, .ReleaseFast => .info,
- .ReleaseSmall => .crit,
+ .ReleaseSmall => .err,
};
var log_scopes: std.ArrayListUnmanaged([]const u8) = .{};
@@ -120,14 +120,7 @@ pub fn log(
} else return;
}
- // We only recognize 4 log levels in this application.
- const level_txt = switch (level) {
- .emerg, .alert, .crit, .err => "error",
- .warn => "warning",
- .notice, .info => "info",
- .debug => "debug",
- };
- const prefix1 = level_txt;
+ const prefix1 = comptime level.asText();
const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): ";
// Print the message to stderr, silently ignoring any errors
@@ -436,6 +429,7 @@ const usage_build_generic =
\\ --verbose-llvm-cpu-features Enable compiler debug output for LLVM CPU features
\\ --debug-log [scope] Enable printing debug/info log messages for scope
\\ --debug-compile-errors Crash with helpful diagnostics at the first compile error
+ \\ --debug-link-snapshot Enable dumping of the linker's state in JSON format
\\
;
@@ -635,6 +629,7 @@ fn buildOutputType(
var major_subsystem_version: ?u32 = null;
var minor_subsystem_version: ?u32 = null;
var wasi_exec_model: ?std.builtin.WasiExecModel = null;
+ var enable_link_snapshots: bool = false;
var system_libs = std.ArrayList([]const u8).init(gpa);
defer system_libs.deinit();
@@ -932,6 +927,12 @@ fn buildOutputType(
} else {
try log_scopes.append(gpa, args[i]);
}
+ } else if (mem.eql(u8, arg, "--debug-link-snapshot")) {
+ if (!build_options.enable_link_snapshots) {
+ std.log.warn("Zig was compiled without linker snapshots enabled (-Dlink-snapshot). --debug-link-snapshot has no effect.", .{});
+ } else {
+ enable_link_snapshots = true;
+ }
} else if (mem.eql(u8, arg, "-fcompiler-rt")) {
want_compiler_rt = true;
} else if (mem.eql(u8, arg, "-fno-compiler-rt")) {
@@ -2149,6 +2150,7 @@ fn buildOutputType(
.subsystem = subsystem,
.wasi_exec_model = wasi_exec_model,
.debug_compile_errors = debug_compile_errors,
+ .enable_link_snapshots = enable_link_snapshots,
}) catch |err| {
fatal("unable to create compilation: {s}", .{@errorName(err)});
};
diff --git a/src/print_air.zig b/src/print_air.zig
index 861483abac..17efa8297d 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -111,7 +111,10 @@ const Writer = struct {
.mul,
.mulwrap,
.mul_sat,
- .div,
+ .div_float,
+ .div_trunc,
+ .div_floor,
+ .div_exact,
.rem,
.mod,
.ptr_add,
@@ -130,9 +133,7 @@ const Writer = struct {
.store,
.array_elem_val,
.slice_elem_val,
- .ptr_slice_elem_val,
.ptr_elem_val,
- .ptr_ptr_elem_val,
.shl,
.shl_exact,
.shl_sat,
@@ -183,6 +184,8 @@ const Writer = struct {
.wrap_errunion_err,
.slice_ptr,
.slice_len,
+ .ptr_slice_len_ptr,
+ .ptr_slice_ptr_ptr,
.struct_field_ptr_index_0,
.struct_field_ptr_index_1,
.struct_field_ptr_index_2,
@@ -199,7 +202,11 @@ const Writer = struct {
.loop,
=> try w.writeBlock(s, inst),
- .ptr_elem_ptr => try w.writePtrElemPtr(s, inst),
+ .slice,
+ .slice_elem_ptr,
+ .ptr_elem_ptr,
+ => try w.writeTyPlBin(s, inst),
+
.struct_field_ptr => try w.writeStructField(s, inst),
.struct_field_val => try w.writeStructField(s, inst),
.constant => try w.writeConstant(s, inst),
@@ -280,7 +287,7 @@ const Writer = struct {
try s.print(", {d}", .{extra.field_index});
}
- fn writePtrElemPtr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
+ fn writeTyPlBin(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
const extra = w.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -368,9 +375,52 @@ const Writer = struct {
}
fn writeAssembly(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
- _ = w;
- _ = inst;
- try s.writeAll("TODO");
+ const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
+ const air_asm = w.air.extraData(Air.Asm, ty_pl.payload);
+ const zir = w.zir;
+ const extended = zir.instructions.items(.data)[air_asm.data.zir_index].extended;
+ const zir_extra = zir.extraData(Zir.Inst.Asm, extended.operand);
+ const asm_source = zir.nullTerminatedString(zir_extra.data.asm_source);
+ const outputs_len = @truncate(u5, extended.small);
+ const args_len = @truncate(u5, extended.small >> 5);
+ const clobbers_len = @truncate(u5, extended.small >> 10);
+ const args = @bitCast([]const Air.Inst.Ref, w.air.extra[air_asm.end..][0..args_len]);
+
+ var extra_i: usize = zir_extra.end;
+ const output_constraint: ?[]const u8 = out: {
+ var i: usize = 0;
+ while (i < outputs_len) : (i += 1) {
+ const output = zir.extraData(Zir.Inst.Asm.Output, extra_i);
+ extra_i = output.end;
+ break :out zir.nullTerminatedString(output.data.constraint);
+ }
+ break :out null;
+ };
+
+ try s.print("\"{s}\"", .{asm_source});
+
+ if (output_constraint) |constraint| {
+ const ret_ty = w.air.typeOfIndex(inst);
+ try s.print(", {s} -> {}", .{ constraint, ret_ty });
+ }
+
+ for (args) |arg| {
+ const input = zir.extraData(Zir.Inst.Asm.Input, extra_i);
+ extra_i = input.end;
+ const constraint = zir.nullTerminatedString(input.data.constraint);
+
+ try s.print(", {s} = (", .{constraint});
+ try w.writeOperand(s, inst, 0, arg);
+ try s.writeByte(')');
+ }
+
+ const clobbers = zir.extra[extra_i..][0..clobbers_len];
+ for (clobbers) |clobber_index| {
+ const clobber = zir.nullTerminatedString(clobber_index);
+ try s.writeAll(", ~{");
+ try s.writeAll(clobber);
+ try s.writeAll("}");
+ }
}
fn writeDbgStmt(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
diff --git a/src/print_zir.zig b/src/print_zir.zig
index f0f282f55d..a3988986f0 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -184,7 +184,6 @@ const Writer = struct {
.is_non_err,
.is_non_err_ptr,
.typeof,
- .typeof_elem,
.struct_init_empty,
.type_info,
.size_of,
@@ -234,6 +233,8 @@ const Writer = struct {
.@"await",
.await_nosuspend,
.fence,
+ .switch_cond,
+ .switch_cond_ref,
=> try self.writeUnNode(stream, inst),
.ref,
@@ -347,7 +348,6 @@ const Writer = struct {
.reduce,
.atomic_load,
.bitcast,
- .bitcast_result_ptr,
.vector_type,
.maximum,
.minimum,
@@ -379,19 +379,7 @@ const Writer = struct {
.error_set_decl_anon => try self.writeErrorSetDecl(stream, inst, .anon),
.error_set_decl_func => try self.writeErrorSetDecl(stream, inst, .func),
- .switch_block => try self.writePlNodeSwitchBr(stream, inst, .none),
- .switch_block_else => try self.writePlNodeSwitchBr(stream, inst, .@"else"),
- .switch_block_under => try self.writePlNodeSwitchBr(stream, inst, .under),
- .switch_block_ref => try self.writePlNodeSwitchBr(stream, inst, .none),
- .switch_block_ref_else => try self.writePlNodeSwitchBr(stream, inst, .@"else"),
- .switch_block_ref_under => try self.writePlNodeSwitchBr(stream, inst, .under),
-
- .switch_block_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .none),
- .switch_block_else_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .@"else"),
- .switch_block_under_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .under),
- .switch_block_ref_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .none),
- .switch_block_ref_else_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .@"else"),
- .switch_block_ref_under_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .under),
+ .switch_block => try self.writePlNodeSwitchBlock(stream, inst),
.field_ptr,
.field_val,
@@ -1649,113 +1637,46 @@ const Writer = struct {
try self.writeSrc(stream, inst_data.src());
}
- fn writePlNodeSwitchBr(
- self: *Writer,
- stream: anytype,
- inst: Zir.Inst.Index,
- special_prong: Zir.SpecialProng,
- ) !void {
+ fn writePlNodeSwitchBlock(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const extra = self.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index);
- const special: struct {
- body: []const Zir.Inst.Index,
- end: usize,
- } = switch (special_prong) {
- .none => .{ .body = &.{}, .end = extra.end },
- .under, .@"else" => blk: {
- const body_len = self.code.extra[extra.end];
- const extra_body_start = extra.end + 1;
- break :blk .{
- .body = self.code.extra[extra_body_start..][0..body_len],
- .end = extra_body_start + body_len,
- };
- },
- };
-
- try self.writeInstRef(stream, extra.data.operand);
-
- self.indent += 2;
-
- if (special.body.len != 0) {
- const prong_name = switch (special_prong) {
- .@"else" => "else",
- .under => "_",
- else => unreachable,
- };
- try stream.writeAll(",\n");
- try stream.writeByteNTimes(' ', self.indent);
- try stream.print("{s} => ", .{prong_name});
- try self.writeBracedBody(stream, special.body);
- }
-
- var extra_index: usize = special.end;
- {
- var scalar_i: usize = 0;
- while (scalar_i < extra.data.cases_len) : (scalar_i += 1) {
- const item_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
- const body_len = self.code.extra[extra_index];
- extra_index += 1;
- const body = self.code.extra[extra_index..][0..body_len];
- extra_index += body_len;
-
- try stream.writeAll(",\n");
- try stream.writeByteNTimes(' ', self.indent);
- try self.writeInstRef(stream, item_ref);
- try stream.writeAll(" => ");
- try self.writeBracedBody(stream, body);
- }
- }
-
- self.indent -= 2;
- try stream.writeAll(") ");
- try self.writeSrc(stream, inst_data.src());
- }
+ var extra_index: usize = extra.end;
- fn writePlNodeSwitchBlockMulti(
- self: *Writer,
- stream: anytype,
- inst: Zir.Inst.Index,
- special_prong: Zir.SpecialProng,
- ) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Zir.Inst.SwitchBlockMulti, inst_data.payload_index);
- const special: struct {
- body: []const Zir.Inst.Index,
- end: usize,
- } = switch (special_prong) {
- .none => .{ .body = &.{}, .end = extra.end },
- .under, .@"else" => blk: {
- const body_len = self.code.extra[extra.end];
- const extra_body_start = extra.end + 1;
- break :blk .{
- .body = self.code.extra[extra_body_start..][0..body_len],
- .end = extra_body_start + body_len,
- };
- },
- };
+ const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
+ const multi_cases_len = self.code.extra[extra_index];
+ extra_index += 1;
+ break :blk multi_cases_len;
+ } else 0;
try self.writeInstRef(stream, extra.data.operand);
+ try self.writeFlag(stream, ", ref", extra.data.bits.is_ref);
self.indent += 2;
- if (special.body.len != 0) {
+ else_prong: {
+ const special_prong = extra.data.bits.specialProng();
const prong_name = switch (special_prong) {
.@"else" => "else",
.under => "_",
- else => unreachable,
+ else => break :else_prong,
};
+
+ const body_len = self.code.extra[extra_index];
+ extra_index += 1;
+ const body = self.code.extra[extra_index..][0..body_len];
+ extra_index += body.len;
+
try stream.writeAll(",\n");
try stream.writeByteNTimes(' ', self.indent);
try stream.print("{s} => ", .{prong_name});
- try self.writeBracedBody(stream, special.body);
+ try self.writeBracedBody(stream, body);
}
- var extra_index: usize = special.end;
{
+ const scalar_cases_len = extra.data.bits.scalar_cases_len;
var scalar_i: usize = 0;
- while (scalar_i < extra.data.scalar_cases_len) : (scalar_i += 1) {
+ while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const item_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
extra_index += 1;
const body_len = self.code.extra[extra_index];
@@ -1772,7 +1693,7 @@ const Writer = struct {
}
{
var multi_i: usize = 0;
- while (multi_i < extra.data.multi_cases_len) : (multi_i += 1) {
+ while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = self.code.extra[extra_index];
extra_index += 1;
const ranges_len = self.code.extra[extra_index];
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index 10dbd3b359..047fb54b85 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -2960,33 +2960,7 @@ static LLVMValueRef gen_div(CodeGen *g, bool want_runtime_safety, bool want_fast
}
return result;
case DivKindTrunc:
- {
- LLVMBasicBlockRef ltz_block = LLVMAppendBasicBlock(g->cur_fn_val, "DivTruncLTZero");
- LLVMBasicBlockRef gez_block = LLVMAppendBasicBlock(g->cur_fn_val, "DivTruncGEZero");
- LLVMBasicBlockRef end_block = LLVMAppendBasicBlock(g->cur_fn_val, "DivTruncEnd");
- LLVMValueRef ltz = LLVMBuildFCmp(g->builder, LLVMRealOLT, val1, zero, "");
- if (operand_type->id == ZigTypeIdVector) {
- ltz = ZigLLVMBuildOrReduce(g->builder, ltz);
- }
- LLVMBuildCondBr(g->builder, ltz, ltz_block, gez_block);
-
- LLVMPositionBuilderAtEnd(g->builder, ltz_block);
- LLVMValueRef ceiled = gen_float_op(g, result, operand_type, BuiltinFnIdCeil);
- LLVMBasicBlockRef ceiled_end_block = LLVMGetInsertBlock(g->builder);
- LLVMBuildBr(g->builder, end_block);
-
- LLVMPositionBuilderAtEnd(g->builder, gez_block);
- LLVMValueRef floored = gen_float_op(g, result, operand_type, BuiltinFnIdFloor);
- LLVMBasicBlockRef floored_end_block = LLVMGetInsertBlock(g->builder);
- LLVMBuildBr(g->builder, end_block);
-
- LLVMPositionBuilderAtEnd(g->builder, end_block);
- LLVMValueRef phi = LLVMBuildPhi(g->builder, get_llvm_type(g, operand_type), "");
- LLVMValueRef incoming_values[] = { ceiled, floored };
- LLVMBasicBlockRef incoming_blocks[] = { ceiled_end_block, floored_end_block };
- LLVMAddIncoming(phi, incoming_values, incoming_blocks, 2);
- return phi;
- }
+ return gen_float_op(g, result, operand_type, BuiltinFnIdTrunc);
case DivKindFloor:
return gen_float_op(g, result, operand_type, BuiltinFnIdFloor);
}
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 7247ed50a9..84e809730e 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -575,12 +575,14 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
const fn_decl_loc = fn_decl.getLocation();
const has_body = fn_decl.hasBody();
const storage_class = fn_decl.getStorageClass();
+ const is_always_inline = has_body and fn_decl.hasAlwaysInlineAttr();
var decl_ctx = FnDeclContext{
.fn_name = fn_name,
.has_body = has_body,
.storage_class = storage_class,
+ .is_always_inline = is_always_inline,
.is_export = switch (storage_class) {
- .None => has_body and !fn_decl.isInlineSpecified(),
+ .None => has_body and !is_always_inline and !fn_decl.isInlineSpecified(),
.Extern, .Static => false,
.PrivateExtern => return failDecl(c, fn_decl_loc, fn_name, "unsupported storage class: private extern", .{}),
.Auto => unreachable, // Not legal on functions
@@ -615,6 +617,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
decl_ctx.has_body = false;
decl_ctx.storage_class = .Extern;
decl_ctx.is_export = false;
+ decl_ctx.is_always_inline = false;
try warn(c, &c.global_scope.base, fn_decl_loc, "TODO unable to translate variadic function, demoted to extern", .{});
}
break :blk transFnProto(c, fn_decl, fn_proto_type, fn_decl_loc, decl_ctx, true) catch |err| switch (err) {
@@ -653,6 +656,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
const param_name = param.name orelse {
proto_node.data.is_extern = true;
proto_node.data.is_export = false;
+ proto_node.data.is_inline = false;
try warn(c, &c.global_scope.base, fn_decl_loc, "function {s} parameter has no name, demoted to extern", .{fn_name});
return addTopLevelDecl(c, fn_name, Node.initPayload(&proto_node.base));
};
@@ -685,6 +689,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
=> {
proto_node.data.is_extern = true;
proto_node.data.is_export = false;
+ proto_node.data.is_inline = false;
try warn(c, &c.global_scope.base, fn_decl_loc, "unable to translate function, demoted to extern", .{});
return addTopLevelDecl(c, fn_name, Node.initPayload(&proto_node.base));
},
@@ -704,6 +709,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
=> {
proto_node.data.is_extern = true;
proto_node.data.is_export = false;
+ proto_node.data.is_inline = false;
try warn(c, &c.global_scope.base, fn_decl_loc, "unable to create a return value for function, demoted to extern", .{});
return addTopLevelDecl(c, fn_name, Node.initPayload(&proto_node.base));
},
@@ -974,6 +980,7 @@ fn buildFlexibleArrayFn(
.is_pub = true,
.is_extern = false,
.is_export = false,
+ .is_inline = false,
.is_var_args = false,
.name = field_name,
.linksection_string = null,
@@ -2808,16 +2815,18 @@ fn maybeBlockify(c: *Context, scope: *Scope, stmt: *const clang.Stmt) TransError
.NullStmtClass,
.WhileStmtClass,
=> return transStmt(c, scope, stmt, .unused),
- else => {
- var block_scope = try Scope.Block.init(c, scope, false);
- defer block_scope.deinit();
- const result = try transStmt(c, &block_scope.base, stmt, .unused);
- try block_scope.statements.append(result);
- return block_scope.complete(c);
- },
+ else => return blockify(c, scope, stmt),
}
}
+fn blockify(c: *Context, scope: *Scope, stmt: *const clang.Stmt) TransError!Node {
+ var block_scope = try Scope.Block.init(c, scope, false);
+ defer block_scope.deinit();
+ const result = try transStmt(c, &block_scope.base, stmt, .unused);
+ try block_scope.statements.append(result);
+ return block_scope.complete(c);
+}
+
fn transIfStmt(
c: *Context,
scope: *Scope,
@@ -2835,9 +2844,21 @@ fn transIfStmt(
const cond_expr = @ptrCast(*const clang.Expr, stmt.getCond());
const cond = try transBoolExpr(c, &cond_scope.base, cond_expr, .used);
- const then_body = try maybeBlockify(c, scope, stmt.getThen());
+ const then_stmt = stmt.getThen();
+ const else_stmt = stmt.getElse();
+ const then_class = then_stmt.getStmtClass();
+ // block needed to keep else statement from attaching to inner while
+ const must_blockify = (else_stmt != null) and switch (then_class) {
+ .DoStmtClass, .ForStmtClass, .WhileStmtClass => true,
+ else => false,
+ };
+
+ const then_body = if (must_blockify)
+ try blockify(c, scope, then_stmt)
+ else
+ try maybeBlockify(c, scope, then_stmt);
- const else_body = if (stmt.getElse()) |expr|
+ const else_body = if (else_stmt) |expr|
try maybeBlockify(c, scope, expr)
else
null;
@@ -4807,6 +4828,7 @@ const FnDeclContext = struct {
fn_name: []const u8,
has_body: bool,
storage_class: clang.StorageClass,
+ is_always_inline: bool,
is_export: bool,
};
@@ -4857,7 +4879,7 @@ fn transFnNoProto(
is_pub: bool,
) !*ast.Payload.Func {
const cc = try transCC(c, fn_ty, source_loc);
- const is_var_args = if (fn_decl_context) |ctx| (!ctx.is_export and ctx.storage_class != .Static) else true;
+ const is_var_args = if (fn_decl_context) |ctx| (!ctx.is_export and ctx.storage_class != .Static and !ctx.is_always_inline) else true;
return finishTransFnProto(c, null, null, fn_ty, source_loc, fn_decl_context, is_var_args, cc, is_pub);
}
@@ -4874,9 +4896,9 @@ fn finishTransFnProto(
) !*ast.Payload.Func {
const is_export = if (fn_decl_context) |ctx| ctx.is_export else false;
const is_extern = if (fn_decl_context) |ctx| !ctx.has_body else false;
+ const is_inline = if (fn_decl_context) |ctx| ctx.is_always_inline else false;
const scope = &c.global_scope.base;
- // TODO check for always_inline attribute
// TODO check for align attribute
var fn_params = std.ArrayList(ast.Payload.Param).init(c.gpa);
@@ -4920,7 +4942,7 @@ fn finishTransFnProto(
const alignment = if (fn_decl) |decl| zigAlignment(decl.getAlignedAttribute(c.clang_context)) else null;
- const explicit_callconv = if ((is_export or is_extern) and cc == .C) null else cc;
+ const explicit_callconv = if ((is_inline or is_export or is_extern) and cc == .C) null else cc;
const return_type_node = blk: {
if (fn_ty.getNoReturnAttr()) {
@@ -4949,6 +4971,7 @@ fn finishTransFnProto(
.is_pub = is_pub,
.is_extern = is_extern,
.is_export = is_export,
+ .is_inline = is_inline,
.is_var_args = is_var_args,
.name = name,
.linksection_string = linksection_string,
diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig
index a86ec0d414..315f22d7f2 100644
--- a/src/translate_c/ast.zig
+++ b/src/translate_c/ast.zig
@@ -540,6 +540,7 @@ pub const Payload = struct {
is_pub: bool,
is_extern: bool,
is_export: bool,
+ is_inline: bool,
is_var_args: bool,
name: ?[]const u8,
linksection_string: ?[]const u8,
@@ -2614,6 +2615,7 @@ fn renderFunc(c: *Context, node: Node) !NodeIndex {
if (payload.is_pub) _ = try c.addToken(.keyword_pub, "pub");
if (payload.is_extern) _ = try c.addToken(.keyword_extern, "extern");
if (payload.is_export) _ = try c.addToken(.keyword_export, "export");
+ if (payload.is_inline) _ = try c.addToken(.keyword_inline, "inline");
const fn_token = try c.addToken(.keyword_fn, "fn");
if (payload.name) |some| _ = try c.addIdentifier(some);
diff --git a/src/type.zig b/src/type.zig
index d845d8a49a..4682b3ed10 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -1529,6 +1529,7 @@ pub const Type = extern union {
return fast_result;
}
+ /// Returns 0 if the pointer is naturally aligned and the element type is 0-bit.
pub fn ptrAlignment(self: Type, target: Target) u32 {
switch (self.tag()) {
.single_const_pointer,
@@ -1693,15 +1694,15 @@ pub const Type = extern union {
},
.error_union => {
- const payload = self.castTag(.error_union).?.data;
- if (!payload.error_set.hasCodeGenBits()) {
- return payload.payload.abiAlignment(target);
- } else if (!payload.payload.hasCodeGenBits()) {
- return payload.error_set.abiAlignment(target);
+ const data = self.castTag(.error_union).?.data;
+ if (!data.error_set.hasCodeGenBits()) {
+ return data.payload.abiAlignment(target);
+ } else if (!data.payload.hasCodeGenBits()) {
+ return data.error_set.abiAlignment(target);
}
- return std.math.max(
- payload.payload.abiAlignment(target),
- payload.error_set.abiAlignment(target),
+ return @maximum(
+ data.payload.abiAlignment(target),
+ data.error_set.abiAlignment(target),
);
},
@@ -1739,10 +1740,10 @@ pub const Type = extern union {
.empty_struct,
.void,
+ .c_void,
=> return 0,
.empty_struct_literal,
- .c_void,
.type,
.comptime_int,
.comptime_float,
@@ -1763,6 +1764,7 @@ pub const Type = extern union {
}
/// Asserts the type has the ABI size already resolved.
+ /// Types that return false for hasCodeGenBits() return 0.
pub fn abiSize(self: Type, target: Target) u64 {
return switch (self.tag()) {
.fn_noreturn_no_args => unreachable, // represents machine code; not a pointer
@@ -1770,53 +1772,32 @@ pub const Type = extern union {
.fn_naked_noreturn_no_args => unreachable, // represents machine code; not a pointer
.fn_ccc_void_no_args => unreachable, // represents machine code; not a pointer
.function => unreachable, // represents machine code; not a pointer
- .c_void => unreachable,
- .type => unreachable,
- .comptime_int => unreachable,
- .comptime_float => unreachable,
+ .@"opaque" => unreachable, // no size available
+ .bound_fn => unreachable, // TODO remove from the language
.noreturn => unreachable,
- .@"null" => unreachable,
- .@"undefined" => unreachable,
- .enum_literal => unreachable,
- .single_const_pointer_to_comptime_int => unreachable,
- .empty_struct_literal => unreachable,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
- .@"opaque" => unreachable,
.var_args_param => unreachable,
.generic_poison => unreachable,
- .type_info => unreachable,
- .bound_fn => unreachable,
+ .call_options => unreachable, // missing call to resolveTypeFields
+ .export_options => unreachable, // missing call to resolveTypeFields
+ .extern_options => unreachable, // missing call to resolveTypeFields
+ .type_info => unreachable, // missing call to resolveTypeFields
- .empty_struct, .void => 0,
-
- .@"struct" => {
- const fields = self.structFields();
- if (self.castTag(.@"struct")) |payload| {
- const struct_obj = payload.data;
- assert(struct_obj.status == .have_layout);
- const is_packed = struct_obj.layout == .Packed;
- if (is_packed) @panic("TODO packed structs");
- }
- var size: u64 = 0;
- var big_align: u32 = 0;
- for (fields.values()) |field| {
- if (!field.ty.hasCodeGenBits()) continue;
+ .c_void,
+ .type,
+ .comptime_int,
+ .comptime_float,
+ .@"null",
+ .@"undefined",
+ .enum_literal,
+ .single_const_pointer_to_comptime_int,
+ .empty_struct_literal,
+ .empty_struct,
+ .void,
+ => 0,
- const field_align = a: {
- if (field.abi_align.tag() == .abi_align_default) {
- break :a field.ty.abiAlignment(target);
- } else {
- break :a @intCast(u32, field.abi_align.toUnsignedInt());
- }
- };
- big_align = @maximum(big_align, field_align);
- size = std.mem.alignForwardGeneric(u64, size, field_align);
- size += field.ty.abiSize(target);
- }
- size = std.mem.alignForwardGeneric(u64, size, big_align);
- return size;
- },
+ .@"struct" => return self.structFieldOffset(self.structFieldCount(), target),
.enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = self.intTagType(&buffer);
@@ -1836,9 +1817,6 @@ pub const Type = extern union {
.address_space,
.float_mode,
.reduce_op,
- .call_options,
- .export_options,
- .extern_options,
=> return 1,
.array_u8 => self.castTag(.array_u8).?.data,
@@ -1942,15 +1920,25 @@ pub const Type = extern union {
},
.error_union => {
- const payload = self.castTag(.error_union).?.data;
- if (!payload.error_set.hasCodeGenBits() and !payload.payload.hasCodeGenBits()) {
+ const data = self.castTag(.error_union).?.data;
+ if (!data.error_set.hasCodeGenBits() and !data.payload.hasCodeGenBits()) {
return 0;
- } else if (!payload.error_set.hasCodeGenBits()) {
- return payload.payload.abiSize(target);
- } else if (!payload.payload.hasCodeGenBits()) {
- return payload.error_set.abiSize(target);
+ } else if (!data.error_set.hasCodeGenBits()) {
+ return data.payload.abiSize(target);
+ } else if (!data.payload.hasCodeGenBits()) {
+ return data.error_set.abiSize(target);
}
- std.debug.panic("TODO abiSize error union {}", .{self});
+ const code_align = abiAlignment(data.error_set, target);
+ const payload_align = abiAlignment(data.payload, target);
+ const big_align = @maximum(code_align, payload_align);
+ const payload_size = abiSize(data.payload, target);
+
+ var size: u64 = 0;
+ size += abiSize(data.error_set, target);
+ size = std.mem.alignForwardGeneric(u64, size, payload_align);
+ size += payload_size;
+ size = std.mem.alignForwardGeneric(u64, size, big_align);
+ return size;
},
};
}
@@ -2509,6 +2497,20 @@ pub const Type = extern union {
};
}
+ /// Returns the type of a pointer to an element.
+ /// Asserts that the type is a pointer, and that the element type is indexable.
+ /// For *[N]T, return *T
+ /// For [*]T, returns *T
+ /// For []T, returns *T
+ /// Handles const-ness and address spaces in particular.
+ pub fn elemPtrType(ptr_ty: Type, arena: *Allocator) !Type {
+ return try Type.ptr(arena, .{
+ .pointee_type = ptr_ty.elemType2(),
+ .mutable = ptr_ty.ptrIsMutable(),
+ .@"addrspace" = ptr_ty.ptrAddressSpace(),
+ });
+ }
+
fn shallowElemType(child_ty: Type) Type {
return switch (child_ty.zigTypeTag()) {
.Array, .Vector => child_ty.childType(),
@@ -2573,12 +2575,14 @@ pub const Type = extern union {
pub fn unionFields(ty: Type) Module.Union.Fields {
const union_obj = ty.cast(Payload.Union).?.data;
+ assert(union_obj.haveFieldTypes());
return union_obj.fields;
}
pub fn unionFieldType(ty: Type, enum_tag: Value) Type {
const union_obj = ty.cast(Payload.Union).?.data;
const index = union_obj.tag_ty.enumTagFieldIndex(enum_tag).?;
+ assert(union_obj.haveFieldTypes());
return union_obj.fields.values()[index].ty;
}
@@ -2617,6 +2621,17 @@ pub const Type = extern union {
};
}
+ /// Returns true if it is an error set that includes anyerror, false otherwise.
+ /// Note that the result may be a false negative if the type did not get error set
+ /// resolution prior to this call.
+ pub fn isAnyError(ty: Type) bool {
+ return switch (ty.tag()) {
+ .anyerror => true,
+ .error_set_inferred => ty.castTag(.error_set_inferred).?.data.is_anyerror,
+ else => false,
+ };
+ }
+
/// Asserts the type is an array or vector.
pub fn arrayLen(ty: Type) u64 {
return switch (ty.tag()) {
@@ -2653,7 +2668,7 @@ pub const Type = extern union {
.pointer => return self.castTag(.pointer).?.data.sentinel,
.array_sentinel => return self.castTag(.array_sentinel).?.data.sentinel,
- .array_u8_sentinel_0 => return Value.initTag(.zero),
+ .array_u8_sentinel_0 => return Value.zero,
else => unreachable,
};
@@ -3073,6 +3088,14 @@ pub const Type = extern union {
}
return Value.initTag(.empty_struct_value);
},
+ .enum_numbered => {
+ const enum_numbered = ty.castTag(.enum_numbered).?.data;
+ if (enum_numbered.fields.count() == 1) {
+ return enum_numbered.values.keys()[0];
+ } else {
+ return null;
+ }
+ },
.enum_full => {
const enum_full = ty.castTag(.enum_full).?.data;
if (enum_full.fields.count() == 1) {
@@ -3084,13 +3107,19 @@ pub const Type = extern union {
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
if (enum_simple.fields.count() == 1) {
- return Value.initTag(.zero);
+ return Value.zero;
+ } else {
+ return null;
+ }
+ },
+ .enum_nonexhaustive => {
+ const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty;
+ if (!tag_ty.hasCodeGenBits()) {
+ return Value.zero;
} else {
return null;
}
},
- .enum_nonexhaustive => ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty,
- .enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty,
.@"union" => {
return null; // TODO
},
@@ -3106,7 +3135,7 @@ pub const Type = extern union {
.int_unsigned, .int_signed => {
if (ty.cast(Payload.Bits).?.data == 0) {
- return Value.initTag(.zero);
+ return Value.zero;
} else {
return null;
}
@@ -3114,8 +3143,9 @@ pub const Type = extern union {
.vector, .array, .array_u8 => {
if (ty.arrayLen() == 0)
return Value.initTag(.empty_array);
- ty = ty.elemType();
- continue;
+ if (ty.elemType().onePossibleValue() != null)
+ return Value.initTag(.the_only_possible_value);
+ return null;
},
.inferred_alloc_const => unreachable,
@@ -3156,7 +3186,7 @@ pub const Type = extern union {
const info = self.intInfo(target);
if (info.signedness == .unsigned) {
- return Value.initTag(.zero);
+ return Value.zero;
}
if (info.bits <= 6) {
@@ -3229,14 +3259,11 @@ pub const Type = extern union {
};
}
- pub fn enumFieldCount(ty: Type) usize {
- switch (ty.tag()) {
- .enum_full, .enum_nonexhaustive => {
- const enum_full = ty.cast(Payload.EnumFull).?.data;
- return enum_full.fields.count();
- },
- .enum_simple => return ty.castTag(.enum_simple).?.data.fields.count(),
- .enum_numbered => return ty.castTag(.enum_numbered).?.data.fields.count(),
+ pub fn enumFields(ty: Type) Module.EnumFull.NameMap {
+ return switch (ty.tag()) {
+ .enum_full, .enum_nonexhaustive => ty.cast(Payload.EnumFull).?.data.fields,
+ .enum_simple => ty.castTag(.enum_simple).?.data.fields,
+ .enum_numbered => ty.castTag(.enum_numbered).?.data.fields,
.atomic_order,
.atomic_rmw_op,
.calling_convention,
@@ -3247,65 +3274,20 @@ pub const Type = extern union {
.export_options,
.extern_options,
=> @panic("TODO resolve std.builtin types"),
-
else => unreachable,
- }
+ };
+ }
+
+ pub fn enumFieldCount(ty: Type) usize {
+ return ty.enumFields().count();
}
pub fn enumFieldName(ty: Type, field_index: usize) []const u8 {
- switch (ty.tag()) {
- .enum_full, .enum_nonexhaustive => {
- const enum_full = ty.cast(Payload.EnumFull).?.data;
- return enum_full.fields.keys()[field_index];
- },
- .enum_simple => {
- const enum_simple = ty.castTag(.enum_simple).?.data;
- return enum_simple.fields.keys()[field_index];
- },
- .enum_numbered => {
- const enum_numbered = ty.castTag(.enum_numbered).?.data;
- return enum_numbered.fields.keys()[field_index];
- },
- .atomic_order,
- .atomic_rmw_op,
- .calling_convention,
- .address_space,
- .float_mode,
- .reduce_op,
- .call_options,
- .export_options,
- .extern_options,
- => @panic("TODO resolve std.builtin types"),
- else => unreachable,
- }
+ return ty.enumFields().keys()[field_index];
}
pub fn enumFieldIndex(ty: Type, field_name: []const u8) ?usize {
- switch (ty.tag()) {
- .enum_full, .enum_nonexhaustive => {
- const enum_full = ty.cast(Payload.EnumFull).?.data;
- return enum_full.fields.getIndex(field_name);
- },
- .enum_simple => {
- const enum_simple = ty.castTag(.enum_simple).?.data;
- return enum_simple.fields.getIndex(field_name);
- },
- .enum_numbered => {
- const enum_numbered = ty.castTag(.enum_numbered).?.data;
- return enum_numbered.fields.getIndex(field_name);
- },
- .atomic_order,
- .atomic_rmw_op,
- .calling_convention,
- .address_space,
- .float_mode,
- .reduce_op,
- .call_options,
- .export_options,
- .extern_options,
- => @panic("TODO resolve std.builtin types"),
- else => unreachable,
- }
+ return ty.enumFields().getIndex(field_name);
}
/// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or
@@ -3376,6 +3358,7 @@ pub const Type = extern union {
.empty_struct => return .{},
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
+ assert(struct_obj.haveFieldTypes());
return struct_obj.fields;
},
else => unreachable,
@@ -3408,6 +3391,51 @@ pub const Type = extern union {
}
}
+ /// Supports structs and unions.
+ pub fn structFieldOffset(ty: Type, index: usize, target: Target) u64 {
+ switch (ty.tag()) {
+ .@"struct" => {
+ const struct_obj = ty.castTag(.@"struct").?.data;
+ assert(struct_obj.status == .have_layout);
+ const is_packed = struct_obj.layout == .Packed;
+ if (is_packed) @panic("TODO packed structs");
+
+ var offset: u64 = 0;
+ var big_align: u32 = 0;
+ for (struct_obj.fields.values()) |field, i| {
+ if (!field.ty.hasCodeGenBits()) continue;
+
+ const field_align = a: {
+ if (field.abi_align.tag() == .abi_align_default) {
+ break :a field.ty.abiAlignment(target);
+ } else {
+ break :a @intCast(u32, field.abi_align.toUnsignedInt());
+ }
+ };
+ big_align = @maximum(big_align, field_align);
+ offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+ if (i == index) return offset;
+ offset += field.ty.abiSize(target);
+ }
+ offset = std.mem.alignForwardGeneric(u64, offset, big_align);
+ return offset;
+ },
+ .@"union" => return 0,
+ .union_tagged => {
+ const union_obj = ty.castTag(.union_tagged).?.data;
+ const layout = union_obj.getLayout(target, true);
+ if (layout.tag_align >= layout.payload_align) {
+ // {Tag, Payload}
+ return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align);
+ } else {
+ // {Payload, Tag}
+ return 0;
+ }
+ },
+ else => unreachable,
+ }
+ }
+
pub fn declSrcLoc(ty: Type) Module.SrcLoc {
return declSrcLocOrNull(ty).?;
}
@@ -3868,10 +3896,44 @@ pub const Type = extern union {
pub const base_tag = Tag.error_set_inferred;
base: Payload = Payload{ .tag = base_tag },
- data: struct {
+ data: Data,
+
+ pub const Data = struct {
func: *Module.Fn,
+ /// Direct additions to the inferred error set via `return error.Foo;`.
map: std.StringHashMapUnmanaged(void),
- },
+ /// Other functions with inferred error sets which this error set includes.
+ functions: std.AutoHashMapUnmanaged(*Module.Fn, void),
+ is_anyerror: bool,
+
+ pub fn addErrorSet(self: *Data, gpa: *Allocator, err_set_ty: Type) !void {
+ switch (err_set_ty.tag()) {
+ .error_set => {
+ const names = err_set_ty.castTag(.error_set).?.data.names();
+ for (names) |name| {
+ try self.map.put(gpa, name, {});
+ }
+ },
+ .error_set_single => {
+ const name = err_set_ty.castTag(.error_set_single).?.data;
+ try self.map.put(gpa, name, {});
+ },
+ .error_set_inferred => {
+ const func = err_set_ty.castTag(.error_set_inferred).?.data.func;
+ try self.functions.put(gpa, func, {});
+ var it = func.owner_decl.ty.fnReturnType().errorUnionSet()
+ .castTag(.error_set_inferred).?.data.map.iterator();
+ while (it.next()) |entry| {
+ try self.map.put(gpa, entry.key_ptr.*, {});
+ }
+ },
+ .anyerror => {
+ self.is_anyerror = true;
+ },
+ else => unreachable,
+ }
+ }
+ };
};
pub const Pointer = struct {
@@ -3959,6 +4021,7 @@ pub const Type = extern union {
pub const @"u8" = initTag(.u8);
pub const @"bool" = initTag(.bool);
pub const @"usize" = initTag(.usize);
+ pub const @"isize" = initTag(.isize);
pub const @"comptime_int" = initTag(.comptime_int);
pub const @"void" = initTag(.void);
pub const @"type" = initTag(.type);
@@ -4003,7 +4066,7 @@ pub const Type = extern union {
) Allocator.Error!Type {
if (elem_type.eql(Type.u8)) {
if (sent) |some| {
- if (some.eql(Value.initTag(.zero), elem_type)) {
+ if (some.eql(Value.zero, elem_type)) {
return Tag.array_u8_sentinel_0.create(arena, len);
}
} else {
diff --git a/src/value.zig b/src/value.zig
index 127c6c27b4..19c2a73666 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -86,6 +86,8 @@ pub const Value = extern union {
one,
void_value,
unreachable_value,
+ /// The only possible value for a particular type, which is stored externally.
+ the_only_possible_value,
null_value,
bool_true,
bool_false,
@@ -112,9 +114,9 @@ pub const Value = extern union {
/// This Tag will never be seen by machine codegen backends. It is changed into a
/// `decl_ref` when a comptime variable goes out of scope.
decl_ref_mut,
- /// Pointer to a specific element of an array.
+ /// Pointer to a specific element of an array, vector or slice.
elem_ptr,
- /// Pointer to a specific field of a struct.
+ /// Pointer to a specific field of a struct or union.
field_ptr,
/// A slice of u8 whose memory is managed externally.
bytes,
@@ -226,6 +228,7 @@ pub const Value = extern union {
.one,
.void_value,
.unreachable_value,
+ .the_only_possible_value,
.empty_struct_value,
.empty_array,
.null_value,
@@ -415,6 +418,7 @@ pub const Value = extern union {
.one,
.void_value,
.unreachable_value,
+ .the_only_possible_value,
.empty_array,
.null_value,
.bool_true,
@@ -510,7 +514,9 @@ pub const Value = extern union {
.base = payload.base,
.data = try arena.alloc(Value, payload.data.len),
};
- std.mem.copy(Value, new_payload.data, payload.data);
+ for (new_payload.data) |*elem, i| {
+ elem.* = try payload.data[i].copy(arena);
+ }
return Value{ .ptr_otherwise = &new_payload.base };
},
.slice => {
@@ -664,6 +670,7 @@ pub const Value = extern union {
.one => return out_stream.writeAll("1"),
.void_value => return out_stream.writeAll("{}"),
.unreachable_value => return out_stream.writeAll("unreachable"),
+ .the_only_possible_value => return out_stream.writeAll("(the only possible value)"),
.bool_true => return out_stream.writeAll("true"),
.bool_false => return out_stream.writeAll("false"),
.ty => return val.castTag(.ty).?.data.format("", options, out_stream),
@@ -755,6 +762,8 @@ pub const Value = extern union {
const decl_val = try decl.value();
return decl_val.toAllocatedBytes(decl.ty, allocator);
},
+ .the_only_possible_value => return &[_]u8{},
+ .slice => return toAllocatedBytes(val.castTag(.slice).?.data.ptr, ty, allocator),
else => unreachable,
}
}
@@ -847,53 +856,63 @@ pub const Value = extern union {
// TODO should `@intToEnum` do this `@intCast` for you?
return @intToEnum(E, @intCast(@typeInfo(E).Enum.tag_type, field_index));
},
+ .the_only_possible_value => {
+ const fields = std.meta.fields(E);
+ assert(fields.len == 1);
+ return @intToEnum(E, fields[0].value);
+ },
else => unreachable,
}
}
pub fn enumToInt(val: Value, ty: Type, buffer: *Payload.U64) Value {
- if (val.castTag(.enum_field_index)) |enum_field_payload| {
- const field_index = enum_field_payload.data;
- switch (ty.tag()) {
- .enum_full, .enum_nonexhaustive => {
- const enum_full = ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- return enum_full.values.keys()[field_index];
- } else {
- // Field index and integer values are the same.
- buffer.* = .{
- .base = .{ .tag = .int_u64 },
- .data = field_index,
- };
- return Value.initPayload(&buffer.base);
- }
- },
- .enum_numbered => {
- const enum_obj = ty.castTag(.enum_numbered).?.data;
- if (enum_obj.values.count() != 0) {
- return enum_obj.values.keys()[field_index];
- } else {
- // Field index and integer values are the same.
- buffer.* = .{
- .base = .{ .tag = .int_u64 },
- .data = field_index,
- };
- return Value.initPayload(&buffer.base);
- }
- },
- .enum_simple => {
+ const field_index = switch (val.tag()) {
+ .enum_field_index => val.castTag(.enum_field_index).?.data,
+ .the_only_possible_value => blk: {
+ assert(ty.enumFieldCount() == 1);
+ break :blk 0;
+ },
+ // Assume it is already an integer and return it directly.
+ else => return val,
+ };
+
+ switch (ty.tag()) {
+ .enum_full, .enum_nonexhaustive => {
+ const enum_full = ty.cast(Type.Payload.EnumFull).?.data;
+ if (enum_full.values.count() != 0) {
+ return enum_full.values.keys()[field_index];
+ } else {
// Field index and integer values are the same.
buffer.* = .{
.base = .{ .tag = .int_u64 },
.data = field_index,
};
return Value.initPayload(&buffer.base);
- },
- else => unreachable,
- }
+ }
+ },
+ .enum_numbered => {
+ const enum_obj = ty.castTag(.enum_numbered).?.data;
+ if (enum_obj.values.count() != 0) {
+ return enum_obj.values.keys()[field_index];
+ } else {
+ // Field index and integer values are the same.
+ buffer.* = .{
+ .base = .{ .tag = .int_u64 },
+ .data = field_index,
+ };
+ return Value.initPayload(&buffer.base);
+ }
+ },
+ .enum_simple => {
+ // Field index and integer values are the same.
+ buffer.* = .{
+ .base = .{ .tag = .int_u64 },
+ .data = field_index,
+ };
+ return Value.initPayload(&buffer.base);
+ },
+ else => unreachable,
}
- // Assume it is already an integer and return it directly.
- return val;
}
/// Asserts the value is an integer.
@@ -901,6 +920,7 @@ pub const Value = extern union {
switch (self.tag()) {
.zero,
.bool_false,
+ .the_only_possible_value, // i0, u0
=> return BigIntMutable.init(&space.limbs, 0).toConst(),
.one,
@@ -917,32 +937,40 @@ pub const Value = extern union {
}
}
- /// Asserts the value is an integer and it fits in a u64
- pub fn toUnsignedInt(self: Value) u64 {
- switch (self.tag()) {
+ /// If the value fits in a u64, return it, otherwise null.
+ /// Asserts not undefined.
+ pub fn getUnsignedInt(val: Value) ?u64 {
+ switch (val.tag()) {
.zero,
.bool_false,
+ .the_only_possible_value, // i0, u0
=> return 0,
.one,
.bool_true,
=> return 1,
- .int_u64 => return self.castTag(.int_u64).?.data,
- .int_i64 => return @intCast(u64, self.castTag(.int_i64).?.data),
- .int_big_positive => return self.castTag(.int_big_positive).?.asBigInt().to(u64) catch unreachable,
- .int_big_negative => return self.castTag(.int_big_negative).?.asBigInt().to(u64) catch unreachable,
+ .int_u64 => return val.castTag(.int_u64).?.data,
+ .int_i64 => return @intCast(u64, val.castTag(.int_i64).?.data),
+ .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(u64) catch null,
+ .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(u64) catch null,
.undef => unreachable,
- else => unreachable,
+ else => return null,
}
}
+ /// Asserts the value is an integer and it fits in a u64
+ pub fn toUnsignedInt(val: Value) u64 {
+ return getUnsignedInt(val).?;
+ }
+
/// Asserts the value is an integer and it fits in a i64
pub fn toSignedInt(self: Value) i64 {
switch (self.tag()) {
.zero,
.bool_false,
+ .the_only_possible_value, // i0, u0
=> return 0,
.one,
@@ -993,6 +1021,14 @@ pub const Value = extern union {
const bits = ty.intInfo(target).bits;
bigint.writeTwosComplement(buffer, bits, target.cpu.arch.endian());
},
+ .Enum => {
+ var enum_buffer: Payload.U64 = undefined;
+ const int_val = val.enumToInt(ty, &enum_buffer);
+ var bigint_buffer: BigIntSpace = undefined;
+ const bigint = int_val.toBigInt(&bigint_buffer);
+ const bits = ty.intInfo(target).bits;
+ bigint.writeTwosComplement(buffer, bits, target.cpu.arch.endian());
+ },
.Float => switch (ty.floatBits(target)) {
16 => return floatWriteToMemory(f16, val.toFloat(f16), target, buffer),
32 => return floatWriteToMemory(f32, val.toFloat(f32), target, buffer),
@@ -1000,6 +1036,19 @@ pub const Value = extern union {
128 => return floatWriteToMemory(f128, val.toFloat(f128), target, buffer),
else => unreachable,
},
+ .Array, .Vector => {
+ const len = ty.arrayLen();
+ const elem_ty = ty.childType();
+ const elem_size = elem_ty.abiSize(target);
+ var elem_i: usize = 0;
+ var elem_value_buf: ElemValueBuffer = undefined;
+ var buf_off: usize = 0;
+ while (elem_i < len) : (elem_i += 1) {
+ const elem_val = val.elemValueBuffer(elem_i, &elem_value_buf);
+ writeToMemory(elem_val, elem_ty, target, buffer[buf_off..]);
+ buf_off += elem_size;
+ }
+ },
else => @panic("TODO implement writeToMemory for more types"),
}
}
@@ -1124,6 +1173,11 @@ pub const Value = extern union {
@panic("TODO implement int_big_negative Value clz");
},
+ .the_only_possible_value => {
+ assert(ty_bits == 0);
+ return ty_bits;
+ },
+
else => unreachable,
}
}
@@ -1134,6 +1188,7 @@ pub const Value = extern union {
switch (self.tag()) {
.zero,
.bool_false,
+ .the_only_possible_value,
=> return 0,
.one,
@@ -1213,6 +1268,11 @@ pub const Value = extern union {
else => unreachable,
},
+ .the_only_possible_value => {
+ assert(ty.intInfo(target).bits == 0);
+ return true;
+ },
+
else => unreachable,
}
}
@@ -1251,7 +1311,7 @@ pub const Value = extern union {
/// Asserts the value is numeric
pub fn isZero(self: Value) bool {
return switch (self.tag()) {
- .zero => true,
+ .zero, .the_only_possible_value => true,
.one => false,
.int_u64 => self.castTag(.int_u64).?.data == 0,
@@ -1272,6 +1332,7 @@ pub const Value = extern union {
return switch (lhs.tag()) {
.zero,
.bool_false,
+ .the_only_possible_value,
=> .eq,
.one,
@@ -1354,7 +1415,7 @@ pub const Value = extern union {
assert(b_tag != .undef);
if (a_tag == b_tag) {
switch (a_tag) {
- .void_value, .null_value => return true,
+ .void_value, .null_value, .the_only_possible_value => return true,
.enum_literal => {
const a_name = a.castTag(.enum_literal).?.data;
const b_name = b.castTag(.enum_literal).?.data;
@@ -1371,6 +1432,16 @@ pub const Value = extern union {
var buffer: Type.Payload.ElemType = undefined;
return eql(a_payload, b_payload, ty.optionalChild(&buffer));
},
+ .slice => {
+ const a_payload = a.castTag(.slice).?.data;
+ const b_payload = b.castTag(.slice).?.data;
+ if (!eql(a_payload.len, b_payload.len, Type.usize)) return false;
+
+ var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
+ const ptr_ty = ty.slicePtrFieldType(&ptr_buf);
+
+ return eql(a_payload.ptr, b_payload.ptr, ptr_ty);
+ },
.elem_ptr => @panic("TODO: Implement more pointer eql cases"),
.field_ptr => @panic("TODO: Implement more pointer eql cases"),
.eu_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
@@ -1444,6 +1515,14 @@ pub const Value = extern union {
.variable,
=> std.hash.autoHash(hasher, val.pointerDecl().?),
+ .slice => {
+ const slice = val.castTag(.slice).?.data;
+ var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
+ const ptr_ty = ty.slicePtrFieldType(&ptr_buf);
+ hash(slice.ptr, ptr_ty, hasher);
+ hash(slice.len, Type.usize, hasher);
+ },
+
.elem_ptr => @panic("TODO: Implement more pointer hashing cases"),
.field_ptr => @panic("TODO: Implement more pointer hashing cases"),
.eu_payload_ptr => @panic("TODO: Implement more pointer hashing cases"),
@@ -1555,60 +1634,6 @@ pub const Value = extern union {
}
};
- /// Asserts the value is a pointer and dereferences it.
- /// Returns error.AnalysisFail if the pointer points to a Decl that failed semantic analysis.
- pub fn pointerDeref(val: Value, arena: *Allocator) error{ AnalysisFail, OutOfMemory }!?Value {
- const sub_val: Value = switch (val.tag()) {
- .decl_ref_mut => sub_val: {
- // The decl whose value we are obtaining here may be overwritten with
- // a different value, which would invalidate this memory. So we must
- // copy here.
- const sub_val = try val.castTag(.decl_ref_mut).?.data.decl.value();
- break :sub_val try sub_val.copy(arena);
- },
- .decl_ref => try val.castTag(.decl_ref).?.data.value(),
- .elem_ptr => blk: {
- const elem_ptr = val.castTag(.elem_ptr).?.data;
- const array_val = (try elem_ptr.array_ptr.pointerDeref(arena)) orelse return null;
- break :blk try array_val.elemValue(arena, elem_ptr.index);
- },
- .field_ptr => blk: {
- const field_ptr = val.castTag(.field_ptr).?.data;
- const container_val = (try field_ptr.container_ptr.pointerDeref(arena)) orelse return null;
- break :blk try container_val.fieldValue(arena, field_ptr.field_index);
- },
- .eu_payload_ptr => blk: {
- const err_union_ptr = val.castTag(.eu_payload_ptr).?.data;
- const err_union_val = (try err_union_ptr.pointerDeref(arena)) orelse return null;
- break :blk err_union_val.castTag(.eu_payload).?.data;
- },
- .opt_payload_ptr => blk: {
- const opt_ptr = val.castTag(.opt_payload_ptr).?.data;
- const opt_val = (try opt_ptr.pointerDeref(arena)) orelse return null;
- break :blk opt_val.castTag(.opt_payload).?.data;
- },
-
- .zero,
- .one,
- .int_u64,
- .int_i64,
- .int_big_positive,
- .int_big_negative,
- .variable,
- .extern_fn,
- .function,
- => return null,
-
- else => unreachable,
- };
- if (sub_val.tag() == .variable) {
- // This would be loading a runtime value at compile-time so we return
- // the indicator that this pointer dereference requires being done at runtime.
- return null;
- }
- return sub_val;
- }
-
pub fn isComptimeMutablePtr(val: Value) bool {
return switch (val.tag()) {
.decl_ref_mut => true,
@@ -1706,6 +1731,9 @@ pub const Value = extern union {
.decl_ref => return val.castTag(.decl_ref).?.data.val.elemValueAdvanced(index, arena, buffer),
.decl_ref_mut => return val.castTag(.decl_ref_mut).?.data.decl.val.elemValueAdvanced(index, arena, buffer),
+ // The child type of arrays which have only one possible value need to have only one possible value itself.
+ .the_only_possible_value => return val,
+
else => unreachable,
}
}
@@ -1722,6 +1750,8 @@ pub const Value = extern union {
// TODO assert the tag is correct
return payload.val;
},
+ // Structs which have only one possible value need to consist of members which have only one possible value.
+ .the_only_possible_value => return val,
else => unreachable,
}
@@ -1737,29 +1767,54 @@ pub const Value = extern union {
/// Returns a pointer to the element value at the index.
pub fn elemPtr(self: Value, allocator: *Allocator, index: usize) !Value {
- if (self.castTag(.elem_ptr)) |elem_ptr| {
- return Tag.elem_ptr.create(allocator, .{
- .array_ptr = elem_ptr.data.array_ptr,
- .index = elem_ptr.data.index + index,
- });
+ switch (self.tag()) {
+ .elem_ptr => {
+ const elem_ptr = self.castTag(.elem_ptr).?.data;
+ return Tag.elem_ptr.create(allocator, .{
+ .array_ptr = elem_ptr.array_ptr,
+ .index = elem_ptr.index + index,
+ });
+ },
+ .slice => return Tag.elem_ptr.create(allocator, .{
+ .array_ptr = self.castTag(.slice).?.data.ptr,
+ .index = index,
+ }),
+ else => return Tag.elem_ptr.create(allocator, .{
+ .array_ptr = self,
+ .index = index,
+ }),
}
-
- return Tag.elem_ptr.create(allocator, .{
- .array_ptr = self,
- .index = index,
- });
}
pub fn isUndef(self: Value) bool {
return self.tag() == .undef;
}
- /// Valid for all types. Asserts the value is not undefined and not unreachable.
+ /// Asserts the value is not undefined and not unreachable.
+ /// Integer value 0 is considered null because of C pointers.
pub fn isNull(self: Value) bool {
return switch (self.tag()) {
.null_value => true,
.opt_payload => false,
+ // If it's not one of those two tags then it must be a C pointer value,
+ // in which case the value 0 is null and other values are non-null.
+
+ .zero,
+ .bool_false,
+ .the_only_possible_value,
+ => true,
+
+ .one,
+ .bool_true,
+ => false,
+
+ .int_u64,
+ .int_i64,
+ .int_big_positive,
+ .int_big_negative,
+ => compareWithZero(self, .eq),
+
.undef => unreachable,
.unreachable_value => unreachable,
.inferred_alloc => unreachable,
@@ -1817,16 +1872,26 @@ pub const Value = extern union {
};
}
- pub fn intToFloat(val: Value, allocator: *Allocator, dest_ty: Type, target: Target) !Value {
+ pub fn intToFloat(val: Value, arena: *Allocator, dest_ty: Type, target: Target) !Value {
switch (val.tag()) {
.undef, .zero, .one => return val,
+ .the_only_possible_value => return Value.initTag(.zero), // for i0, u0
.int_u64 => {
- return intToFloatInner(val.castTag(.int_u64).?.data, allocator, dest_ty, target);
+ return intToFloatInner(val.castTag(.int_u64).?.data, arena, dest_ty, target);
},
.int_i64 => {
- return intToFloatInner(val.castTag(.int_i64).?.data, allocator, dest_ty, target);
+ return intToFloatInner(val.castTag(.int_i64).?.data, arena, dest_ty, target);
+ },
+ .int_big_positive => {
+ const limbs = val.castTag(.int_big_positive).?.data;
+ const float = bigIntToFloat(limbs, true);
+ return floatToValue(float, arena, dest_ty, target);
+ },
+ .int_big_negative => {
+ const limbs = val.castTag(.int_big_negative).?.data;
+ const float = bigIntToFloat(limbs, false);
+ return floatToValue(float, arena, dest_ty, target);
},
- .int_big_positive, .int_big_negative => @panic("big int to float"),
else => unreachable,
}
}
@@ -1841,6 +1906,16 @@ pub const Value = extern union {
}
}
+ fn floatToValue(float: f128, arena: *Allocator, dest_ty: Type, target: Target) !Value {
+ switch (dest_ty.floatBits(target)) {
+ 16 => return Value.Tag.float_16.create(arena, @floatCast(f16, float)),
+ 32 => return Value.Tag.float_32.create(arena, @floatCast(f32, float)),
+ 64 => return Value.Tag.float_64.create(arena, @floatCast(f64, float)),
+ 128 => return Value.Tag.float_128.create(arena, float),
+ else => unreachable,
+ }
+ }
+
/// Supports both floats and ints; handles undefined.
pub fn numberAddWrap(
lhs: Value,
@@ -2081,6 +2156,32 @@ pub const Value = extern union {
};
}
+ /// operands must be integers; handles undefined.
+ pub fn bitwiseNot(val: Value, ty: Type, arena: *Allocator, target: Target) !Value {
+ if (val.isUndef()) return Value.initTag(.undef);
+
+ const info = ty.intInfo(target);
+
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var val_space: Value.BigIntSpace = undefined;
+ const val_bigint = val.toBigInt(&val_space);
+ const limbs = try arena.alloc(
+ std.math.big.Limb,
+ std.math.big.int.calcTwosCompLimbCount(info.bits),
+ );
+
+ var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
+ result_bigint.bitNotWrap(val_bigint, info.signedness, info.bits);
+ const result_limbs = result_bigint.limbs[0..result_bigint.len];
+
+ if (result_bigint.positive) {
+ return Value.Tag.int_big_positive.create(arena, result_limbs);
+ } else {
+ return Value.Tag.int_big_negative.create(arena, result_limbs);
+ }
+ }
+
/// operands must be integers; handles undefined.
pub fn bitwiseAnd(lhs: Value, rhs: Value, arena: *Allocator) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
@@ -2093,7 +2194,8 @@ pub const Value = extern union {
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs = try arena.alloc(
std.math.big.Limb,
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
+ // + 1 for negatives
+ std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitAnd(lhs_bigint, rhs_bigint);
@@ -2157,7 +2259,8 @@ pub const Value = extern union {
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs = try arena.alloc(
std.math.big.Limb,
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
+ // + 1 for negatives
+ std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitXor(lhs_bigint, rhs_bigint);
@@ -2223,19 +2326,50 @@ pub const Value = extern union {
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs_q = try allocator.alloc(
std.math.big.Limb,
- lhs_bigint.limbs.len + rhs_bigint.limbs.len + 1,
+ lhs_bigint.limbs.len,
);
const limbs_r = try allocator.alloc(
std.math.big.Limb,
+ rhs_bigint.limbs.len,
+ );
+ const limbs_buffer = try allocator.alloc(
+ std.math.big.Limb,
+ std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
+ );
+ var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
+ var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
+ result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
+ const result_limbs = result_q.limbs[0..result_q.len];
+
+ if (result_q.positive) {
+ return Value.Tag.int_big_positive.create(allocator, result_limbs);
+ } else {
+ return Value.Tag.int_big_negative.create(allocator, result_limbs);
+ }
+ }
+
+ pub fn intDivFloor(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var lhs_space: Value.BigIntSpace = undefined;
+ var rhs_space: Value.BigIntSpace = undefined;
+ const lhs_bigint = lhs.toBigInt(&lhs_space);
+ const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const limbs_q = try allocator.alloc(
+ std.math.big.Limb,
lhs_bigint.limbs.len,
);
+ const limbs_r = try allocator.alloc(
+ std.math.big.Limb,
+ rhs_bigint.limbs.len,
+ );
const limbs_buffer = try allocator.alloc(
std.math.big.Limb,
std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
);
var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
- result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer, null);
+ result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
const result_limbs = result_q.limbs[0..result_q.len];
if (result_q.positive) {
@@ -2254,11 +2388,13 @@ pub const Value = extern union {
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs_q = try allocator.alloc(
std.math.big.Limb,
- lhs_bigint.limbs.len + rhs_bigint.limbs.len + 1,
+ lhs_bigint.limbs.len,
);
const limbs_r = try allocator.alloc(
std.math.big.Limb,
- lhs_bigint.limbs.len,
+ // TODO: consider reworking Sema to re-use Values rather than
+ // always producing new Value objects.
+ rhs_bigint.limbs.len,
);
const limbs_buffer = try allocator.alloc(
std.math.big.Limb,
@@ -2266,7 +2402,7 @@ pub const Value = extern union {
);
var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
- result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer, null);
+ result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
const result_limbs = result_r.limbs[0..result_r.len];
if (result_r.positive) {
@@ -2285,11 +2421,11 @@ pub const Value = extern union {
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs_q = try allocator.alloc(
std.math.big.Limb,
- lhs_bigint.limbs.len + rhs_bigint.limbs.len + 1,
+ lhs_bigint.limbs.len,
);
const limbs_r = try allocator.alloc(
std.math.big.Limb,
- lhs_bigint.limbs.len,
+ rhs_bigint.limbs.len,
);
const limbs_buffer = try allocator.alloc(
std.math.big.Limb,
@@ -2297,7 +2433,7 @@ pub const Value = extern union {
);
var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
- result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer, null);
+ result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
const result_limbs = result_r.limbs[0..result_r.len];
if (result_r.positive) {
@@ -2555,6 +2691,68 @@ pub const Value = extern union {
}
}
+ pub fn floatDivFloor(
+ lhs: Value,
+ rhs: Value,
+ float_type: Type,
+ arena: *Allocator,
+ ) !Value {
+ switch (float_type.tag()) {
+ .f16 => {
+ const lhs_val = lhs.toFloat(f16);
+ const rhs_val = rhs.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @divFloor(lhs_val, rhs_val));
+ },
+ .f32 => {
+ const lhs_val = lhs.toFloat(f32);
+ const rhs_val = rhs.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @divFloor(lhs_val, rhs_val));
+ },
+ .f64 => {
+ const lhs_val = lhs.toFloat(f64);
+ const rhs_val = rhs.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @divFloor(lhs_val, rhs_val));
+ },
+ .f128, .comptime_float, .c_longdouble => {
+ const lhs_val = lhs.toFloat(f128);
+ const rhs_val = rhs.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @divFloor(lhs_val, rhs_val));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn floatDivTrunc(
+ lhs: Value,
+ rhs: Value,
+ float_type: Type,
+ arena: *Allocator,
+ ) !Value {
+ switch (float_type.tag()) {
+ .f16 => {
+ const lhs_val = lhs.toFloat(f16);
+ const rhs_val = rhs.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @divTrunc(lhs_val, rhs_val));
+ },
+ .f32 => {
+ const lhs_val = lhs.toFloat(f32);
+ const rhs_val = rhs.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @divTrunc(lhs_val, rhs_val));
+ },
+ .f64 => {
+ const lhs_val = lhs.toFloat(f64);
+ const rhs_val = rhs.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @divTrunc(lhs_val, rhs_val));
+ },
+ .f128, .comptime_float, .c_longdouble => {
+ const lhs_val = lhs.toFloat(f128);
+ const rhs_val = rhs.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @divTrunc(lhs_val, rhs_val));
+ },
+ else => unreachable,
+ }
+ }
+
pub fn floatMul(
lhs: Value,
rhs: Value,
@@ -2798,6 +2996,7 @@ pub const Value = extern union {
pub const negative_one: Value = .{ .ptr_otherwise = &negative_one_payload.base };
pub const undef = initTag(.undef);
pub const @"void" = initTag(.void_value);
+ pub const @"null" = initTag(.null_value);
};
var negative_one_payload: Value.Payload.I64 = .{
diff --git a/src/zig_clang.cpp b/src/zig_clang.cpp
index f5b04ddd9d..deefc04c0a 100644
--- a/src/zig_clang.cpp
+++ b/src/zig_clang.cpp
@@ -2120,6 +2120,11 @@ bool ZigClangFunctionDecl_isInlineSpecified(const struct ZigClangFunctionDecl *s
return casted->isInlineSpecified();
}
+bool ZigClangFunctionDecl_hasAlwaysInlineAttr(const struct ZigClangFunctionDecl *self) {
+ auto casted = reinterpret_cast<const clang::FunctionDecl *>(self);
+ return casted->hasAttr<clang::AlwaysInlineAttr>();
+}
+
const char* ZigClangFunctionDecl_getSectionAttribute(const struct ZigClangFunctionDecl *self, size_t *len) {
auto casted = reinterpret_cast<const clang::FunctionDecl *>(self);
if (const clang::SectionAttr *SA = casted->getAttr<clang::SectionAttr>()) {
diff --git a/src/zig_clang.h b/src/zig_clang.h
index f704b50b18..af44e51cdd 100644
--- a/src/zig_clang.h
+++ b/src/zig_clang.h
@@ -1111,6 +1111,7 @@ ZIG_EXTERN_C bool ZigClangFunctionDecl_doesDeclarationForceExternallyVisibleDefi
ZIG_EXTERN_C bool ZigClangFunctionDecl_isThisDeclarationADefinition(const struct ZigClangFunctionDecl *);
ZIG_EXTERN_C bool ZigClangFunctionDecl_doesThisDeclarationHaveABody(const struct ZigClangFunctionDecl *);
ZIG_EXTERN_C bool ZigClangFunctionDecl_isInlineSpecified(const struct ZigClangFunctionDecl *);
+ZIG_EXTERN_C bool ZigClangFunctionDecl_hasAlwaysInlineAttr(const struct ZigClangFunctionDecl *);
ZIG_EXTERN_C bool ZigClangFunctionDecl_isDefined(const struct ZigClangFunctionDecl *);
ZIG_EXTERN_C const struct ZigClangFunctionDecl* ZigClangFunctionDecl_getDefinition(const struct ZigClangFunctionDecl *);
ZIG_EXTERN_C const char* ZigClangFunctionDecl_getSectionAttribute(const struct ZigClangFunctionDecl *, size_t *);