aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorVeikka Tuominen <git@vexu.eu>2022-11-21 13:48:23 +0200
committerGitHub <noreply@github.com>2022-11-21 13:48:23 +0200
commit7c527c6dfe590a1251b51f6e1cfe8a4ba9bb0d67 (patch)
tree2bf79b09302f3c574b18b50591fdad9a555b6e8f /src
parent89c374cd2dff36477ac1513006c03721ef946a2a (diff)
parentbbcd959c2b1721d81af5829b56e01cc472b58816 (diff)
downloadzig-7c527c6dfe590a1251b51f6e1cfe8a4ba9bb0d67.tar.gz
zig-7c527c6dfe590a1251b51f6e1cfe8a4ba9bb0d67.zip
Merge pull request #13585 from Vexu/stage2-fixes
Stage2 bug fixes
Diffstat (limited to 'src')
-rw-r--r--src/Module.zig8
-rw-r--r--src/Sema.zig98
-rw-r--r--src/codegen/llvm.zig121
-rw-r--r--src/codegen/llvm/bindings.zig4
-rw-r--r--src/main.zig6
-rw-r--r--src/print_air.zig6
-rw-r--r--src/print_zir.zig18
-rw-r--r--src/type.zig81
-rw-r--r--src/value.zig12
-rw-r--r--src/zig_llvm.cpp30
-rw-r--r--src/zig_llvm.h2
11 files changed, 299 insertions, 87 deletions
diff --git a/src/Module.zig b/src/Module.zig
index af29a591cc..d598993c3f 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -1044,7 +1044,8 @@ pub const Struct = struct {
.root => return queryFieldSrc(tree.*, query, file, tree.containerDeclRoot()),
- else => unreachable,
+ // This struct was generated using @Type
+ else => return s.srcLoc(mod),
}
}
@@ -1270,7 +1271,8 @@ pub const Union = struct {
.tagged_union_enum_tag,
.tagged_union_enum_tag_trailing,
=> return queryFieldSrc(tree.*, query, file, tree.taggedUnionEnumTag(node)),
- else => unreachable,
+ // This union was generated using @Type
+ else => return u.srcLoc(mod),
}
}
@@ -4631,7 +4633,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
const address_space_src: LazySrcLoc = .{ .node_offset_var_decl_addrspace = 0 };
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = 0 };
const init_src: LazySrcLoc = .{ .node_offset_var_decl_init = 0 };
- const decl_tv = try sema.resolveInstValue(&block_scope, init_src, result_ref, undefined);
+ const decl_tv = try sema.resolveInstValue(&block_scope, init_src, result_ref, "global variable initializer must be comptime-known");
// Note this resolves the type of the Decl, not the value; if this Decl
// is a struct, for example, this resolves `type` (which needs no resolution),
diff --git a/src/Sema.zig b/src/Sema.zig
index a6811d37fd..a1c7fa7b91 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -128,7 +128,7 @@ pub const Block = struct {
/// Shared among all child blocks.
sema: *Sema,
/// The namespace to use for lookups from this source block
- /// When analyzing fields, this is different from src_decl.src_namepsace.
+ /// When analyzing fields, this is different from src_decl.src_namespace.
namespace: *Namespace,
/// The AIR instructions generated for this block.
instructions: std.ArrayListUnmanaged(Air.Inst.Index),
@@ -1897,10 +1897,15 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime(
}
i -= Air.Inst.Ref.typed_value_map.len;
+ const air_tags = sema.air_instructions.items(.tag);
if (try sema.typeHasOnePossibleValue(sema.typeOf(inst))) |opv| {
+ if (air_tags[i] == .constant) {
+ const ty_pl = sema.air_instructions.items(.data)[i].ty_pl;
+ const val = sema.air_values.items[ty_pl.payload];
+ if (val.tag() == .variable) return val;
+ }
return opv;
}
- const air_tags = sema.air_instructions.items(.tag);
switch (air_tags[i]) {
.constant => {
const ty_pl = sema.air_instructions.items(.data)[i].ty_pl;
@@ -4106,6 +4111,7 @@ fn validateStructInit(
.{fqn},
);
}
+ root_msg = null;
return sema.failWithOwnedErrorMsg(msg);
}
@@ -4225,7 +4231,6 @@ fn validateStructInit(
}
if (root_msg) |msg| {
- root_msg = null;
if (struct_ty.castTag(.@"struct")) |struct_obj| {
const fqn = try struct_obj.data.getFullyQualifiedName(sema.mod);
defer gpa.free(fqn);
@@ -4236,6 +4241,7 @@ fn validateStructInit(
.{fqn},
);
}
+ root_msg = null;
return sema.failWithOwnedErrorMsg(msg);
}
@@ -4283,29 +4289,42 @@ fn zirValidateArrayInit(
const array_ty = sema.typeOf(array_ptr).childType();
const array_len = array_ty.arrayLen();
- if (instrs.len != array_len and array_ty.isTuple()) {
- const struct_obj = array_ty.castTag(.tuple).?.data;
- var root_msg: ?*Module.ErrorMsg = null;
- errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
+ if (instrs.len != array_len) switch (array_ty.zigTypeTag()) {
+ .Struct => {
+ const struct_obj = array_ty.castTag(.tuple).?.data;
+ var root_msg: ?*Module.ErrorMsg = null;
+ errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
- for (struct_obj.values) |default_val, i| {
- if (i < instrs.len) continue;
+ for (struct_obj.values) |default_val, i| {
+ if (i < instrs.len) continue;
- if (default_val.tag() == .unreachable_value) {
- const template = "missing tuple field with index {d}";
- if (root_msg) |msg| {
- try sema.errNote(block, init_src, msg, template, .{i});
- } else {
- root_msg = try sema.errMsg(block, init_src, template, .{i});
+ if (default_val.tag() == .unreachable_value) {
+ const template = "missing tuple field with index {d}";
+ if (root_msg) |msg| {
+ try sema.errNote(block, init_src, msg, template, .{i});
+ } else {
+ root_msg = try sema.errMsg(block, init_src, template, .{i});
+ }
}
}
- }
- if (root_msg) |msg| {
- root_msg = null;
- return sema.failWithOwnedErrorMsg(msg);
- }
- }
+ if (root_msg) |msg| {
+ root_msg = null;
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+ },
+ .Array => {
+ return sema.fail(block, init_src, "expected {d} array elements; found {d}", .{
+ array_len, instrs.len,
+ });
+ },
+ .Vector => {
+ return sema.fail(block, init_src, "expected {d} vector elements; found {d}", .{
+ array_len, instrs.len,
+ });
+ },
+ else => unreachable,
+ };
if ((is_comptime or block.is_comptime) and
(try sema.resolveDefinedValue(block, init_src, array_ptr)) != null)
@@ -17080,7 +17099,6 @@ fn finishStructInit(
}
if (root_msg) |msg| {
- root_msg = null;
if (struct_ty.castTag(.@"struct")) |struct_obj| {
const fqn = try struct_obj.data.getFullyQualifiedName(sema.mod);
defer gpa.free(fqn);
@@ -17091,6 +17109,7 @@ fn finishStructInit(
.{fqn},
);
}
+ root_msg = null;
return sema.failWithOwnedErrorMsg(msg);
}
@@ -18778,8 +18797,8 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ptr_ty = try sema.resolveType(block, src, extra.lhs);
- const elem_ty = ptr_ty.elemType2();
try sema.checkPtrType(block, type_src, ptr_ty);
+ const elem_ty = ptr_ty.elemType2();
const target = sema.mod.getTarget();
const ptr_align = try ptr_ty.ptrAlignmentAdvanced(target, sema);
@@ -24307,7 +24326,10 @@ fn coerceExtra(
},
.Int, .ComptimeInt => switch (inst_ty.zigTypeTag()) {
.Float, .ComptimeFloat => float: {
- const val = (try sema.resolveDefinedValue(block, inst_src, inst)) orelse {
+ if (is_undef) {
+ return sema.addConstUndef(dest_ty);
+ }
+ const val = (try sema.resolveMaybeUndefVal(inst)) orelse {
if (dest_ty.zigTypeTag() == .ComptimeInt) {
if (!opts.report_err) return error.NotCoercible;
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime-known");
@@ -24327,7 +24349,10 @@ fn coerceExtra(
return try sema.addConstant(dest_ty, result_val);
},
.Int, .ComptimeInt => {
- if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| {
+ if (is_undef) {
+ return sema.addConstUndef(dest_ty);
+ }
+ if (try sema.resolveMaybeUndefVal(inst)) |val| {
// comptime-known integer to other number
if (!(try sema.intFitsInType(val, dest_ty, null))) {
if (!opts.report_err) return error.NotCoercible;
@@ -24364,7 +24389,10 @@ fn coerceExtra(
return try sema.addConstant(dest_ty, result_val);
},
.Float => {
- if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| {
+ if (is_undef) {
+ return sema.addConstUndef(dest_ty);
+ }
+ if (try sema.resolveMaybeUndefVal(inst)) |val| {
const result_val = try val.floatCast(sema.arena, dest_ty, target);
if (!val.eql(result_val, dest_ty, sema.mod)) {
return sema.fail(
@@ -24389,7 +24417,10 @@ fn coerceExtra(
}
},
.Int, .ComptimeInt => int: {
- const val = (try sema.resolveDefinedValue(block, inst_src, inst)) orelse {
+ if (is_undef) {
+ return sema.addConstUndef(dest_ty);
+ }
+ const val = (try sema.resolveMaybeUndefVal(inst)) orelse {
if (dest_ty.zigTypeTag() == .ComptimeFloat) {
if (!opts.report_err) return error.NotCoercible;
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime-known");
@@ -26543,6 +26574,10 @@ fn beginComptimePtrLoad(
.null_value => {
return sema.fail(block, src, "attempt to use null value", .{});
},
+ .opt_payload => blk: {
+ const opt_payload = ptr_val.castTag(.opt_payload).?.data;
+ break :blk try sema.beginComptimePtrLoad(block, src, opt_payload, null);
+ },
.zero,
.one,
@@ -27191,8 +27226,8 @@ fn coerceTupleToStruct(
}
if (root_msg) |msg| {
- root_msg = null;
try sema.addDeclaredHereNote(msg, struct_ty);
+ root_msg = null;
return sema.failWithOwnedErrorMsg(msg);
}
@@ -27297,8 +27332,8 @@ fn coerceTupleToTuple(
}
if (root_msg) |msg| {
- root_msg = null;
try sema.addDeclaredHereNote(msg, tuple_ty);
+ root_msg = null;
return sema.failWithOwnedErrorMsg(msg);
}
@@ -31298,7 +31333,10 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
}
pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool {
- return ty.hasRuntimeBitsAdvanced(false, sema);
+ return ty.hasRuntimeBitsAdvanced(false, .{ .sema = sema }) catch |err| switch (err) {
+ error.NeedLazy => unreachable,
+ else => |e| return e,
+ };
}
fn typeAbiSize(sema: *Sema, ty: Type) !u64 {
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index afafda36a6..8459920d61 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -988,6 +988,25 @@ pub const Object = struct {
args.appendAssumeCapacity(load_inst);
}
},
+ .byref_mut => {
+ const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_llvm_ty = try dg.lowerType(param_ty);
+ const param = llvm_func.getParam(llvm_arg_i);
+ const alignment = param_ty.abiAlignment(target);
+
+ dg.addArgAttr(llvm_func, llvm_arg_i, "noundef");
+ llvm_arg_i += 1;
+
+ try args.ensureUnusedCapacity(1);
+
+ if (isByRef(param_ty)) {
+ args.appendAssumeCapacity(param);
+ } else {
+ const load_inst = builder.buildLoad(param_llvm_ty, param, "");
+ load_inst.setAlignment(alignment);
+ args.appendAssumeCapacity(load_inst);
+ }
+ },
.abi_sized_int => {
assert(!it.byval_attr);
const param_ty = fn_info.param_types[it.zig_index - 1];
@@ -2583,6 +2602,9 @@ pub const DeclGen = struct {
const alignment = param_ty.abiAlignment(target);
dg.addByRefParamAttrs(llvm_fn, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
},
+ .byref_mut => {
+ dg.addArgAttr(llvm_fn, it.llvm_index - 1, "noundef");
+ },
// No attributes needed for these.
.no_bits,
.abi_sized_int,
@@ -3101,7 +3123,7 @@ pub const DeclGen = struct {
const param_ty = fn_info.param_types[it.zig_index - 1];
try llvm_params.append(try dg.lowerType(param_ty));
},
- .byref => {
+ .byref, .byref_mut => {
const param_ty = fn_info.param_types[it.zig_index - 1];
const raw_llvm_ty = try dg.lowerType(param_ty);
try llvm_params.append(raw_llvm_ty.pointerType(0));
@@ -4678,9 +4700,9 @@ pub const FuncGen = struct {
break :blk ret_ptr;
};
- if (fn_info.return_type.isError() and
- self.dg.module.comp.bin_file.options.error_return_tracing)
- {
+ const err_return_tracing = fn_info.return_type.isError() and
+ self.dg.module.comp.bin_file.options.error_return_tracing;
+ if (err_return_tracing) {
try llvm_args.append(self.err_ret_trace.?);
}
@@ -4726,6 +4748,27 @@ pub const FuncGen = struct {
try llvm_args.append(arg_ptr);
}
},
+ .byref_mut => {
+ const arg = args[it.zig_index - 1];
+ const param_ty = self.air.typeOf(arg);
+ const llvm_arg = try self.resolveInst(arg);
+
+ const alignment = param_ty.abiAlignment(target);
+ const param_llvm_ty = try self.dg.lowerType(param_ty);
+ const arg_ptr = self.buildAlloca(param_llvm_ty, alignment);
+ if (isByRef(param_ty)) {
+ const load_inst = self.builder.buildLoad(param_llvm_ty, llvm_arg, "");
+ load_inst.setAlignment(alignment);
+
+ const store_inst = self.builder.buildStore(load_inst, arg_ptr);
+ store_inst.setAlignment(alignment);
+ try llvm_args.append(arg_ptr);
+ } else {
+ const store_inst = self.builder.buildStore(llvm_arg, arg_ptr);
+ store_inst.setAlignment(alignment);
+ try llvm_args.append(arg_ptr);
+ }
+ },
.abi_sized_int => {
const arg = args[it.zig_index - 1];
const param_ty = self.air.typeOf(arg);
@@ -4847,6 +4890,66 @@ pub const FuncGen = struct {
"",
);
+ if (callee_ty.zigTypeTag() == .Pointer) {
+ // Add argument attributes for function pointer calls.
+ it = iterateParamTypes(self.dg, fn_info);
+ it.llvm_index += @boolToInt(sret);
+ it.llvm_index += @boolToInt(err_return_tracing);
+ while (it.next()) |lowering| switch (lowering) {
+ .byval => {
+ const param_index = it.zig_index - 1;
+ const param_ty = fn_info.param_types[param_index];
+ if (!isByRef(param_ty)) {
+ self.dg.addByValParamAttrs(call, param_ty, param_index, fn_info, it.llvm_index - 1);
+ }
+ },
+ .byref => {
+ const param_index = it.zig_index - 1;
+ const param_ty = fn_info.param_types[param_index];
+ const param_llvm_ty = try self.dg.lowerType(param_ty);
+ const alignment = param_ty.abiAlignment(target);
+ self.dg.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
+ },
+ .byref_mut => {
+ self.dg.addArgAttr(call, it.llvm_index - 1, "noundef");
+ },
+ // No attributes needed for these.
+ .no_bits,
+ .abi_sized_int,
+ .multiple_llvm_types,
+ .as_u16,
+ .float_array,
+ .i32_array,
+ .i64_array,
+ => continue,
+
+ .slice => {
+ assert(!it.byval_attr);
+ const param_ty = fn_info.param_types[it.zig_index - 1];
+ const ptr_info = param_ty.ptrInfo().data;
+ const llvm_arg_i = it.llvm_index - 2;
+
+ if (math.cast(u5, it.zig_index - 1)) |i| {
+ if (@truncate(u1, fn_info.noalias_bits >> i) != 0) {
+ self.dg.addArgAttr(call, llvm_arg_i, "noalias");
+ }
+ }
+ if (param_ty.zigTypeTag() != .Optional) {
+ self.dg.addArgAttr(call, llvm_arg_i, "nonnull");
+ }
+ if (!ptr_info.mutable) {
+ self.dg.addArgAttr(call, llvm_arg_i, "readonly");
+ }
+ if (ptr_info.@"align" != 0) {
+ self.dg.addArgAttrInt(call, llvm_arg_i, "align", ptr_info.@"align");
+ } else {
+ const elem_align = @max(ptr_info.pointee_type.abiAlignment(target), 1);
+ self.dg.addArgAttrInt(call, llvm_arg_i, "align", elem_align);
+ }
+ },
+ };
+ }
+
if (return_type.isNoReturn() and attr != .AlwaysTail) {
_ = self.builder.buildUnreachable();
return null;
@@ -4876,7 +4979,7 @@ pub const FuncGen = struct {
// In this case the function return type is honoring the calling convention by having
// a different LLVM type than the usual one. We solve this here at the callsite
// by bitcasting a pointer to our canonical type, then loading it if necessary.
- const alignment = return_type.abiAlignment(target);
+ const alignment = self.dg.object.target_data.abiAlignmentOfType(abi_ret_ty);
const rp = self.buildAlloca(llvm_ret_ty, alignment);
const ptr_abi_ty = abi_ret_ty.pointerType(0);
const casted_ptr = self.builder.buildBitCast(rp, ptr_abi_ty, "");
@@ -10384,6 +10487,7 @@ const ParamTypeIterator = struct {
no_bits,
byval,
byref,
+ byref_mut,
abi_sized_int,
multiple_llvm_types,
slice,
@@ -10425,6 +10529,7 @@ const ParamTypeIterator = struct {
it.llvm_index += 1;
var buf: Type.Payload.ElemType = undefined;
if (ty.isSlice() or (ty.zigTypeTag() == .Optional and ty.optionalChild(&buf).isSlice())) {
+ it.llvm_index += 1;
return .slice;
} else if (isByRef(ty)) {
return .byref;
@@ -10547,7 +10652,7 @@ const ParamTypeIterator = struct {
it.zig_index += 1;
it.llvm_index += 1;
switch (aarch64_c_abi.classifyType(ty, it.target)) {
- .memory => return .byref,
+ .memory => return .byref_mut,
.float_array => |len| return Lowering{ .float_array = len },
.byval => return .byval,
.integer => {
@@ -10578,9 +10683,7 @@ const ParamTypeIterator = struct {
return .as_u16;
}
switch (riscv_c_abi.classifyType(ty, it.target)) {
- .memory => {
- return .byref;
- },
+ .memory => return .byref_mut,
.byval => return .byval,
.integer => return .abi_sized_int,
.double_integer => return Lowering{ .i64_array = 2 },
diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig
index 1b462312cd..90d0f51c7b 100644
--- a/src/codegen/llvm/bindings.zig
+++ b/src/codegen/llvm/bindings.zig
@@ -88,8 +88,8 @@ pub const Context = opaque {
};
pub const Value = opaque {
- pub const addAttributeAtIndex = LLVMAddAttributeAtIndex;
- extern fn LLVMAddAttributeAtIndex(*Value, Idx: AttributeIndex, A: *Attribute) void;
+ pub const addAttributeAtIndex = ZigLLVMAddAttributeAtIndex;
+ extern fn ZigLLVMAddAttributeAtIndex(*Value, Idx: AttributeIndex, A: *Attribute) void;
pub const removeEnumAttributeAtIndex = LLVMRemoveEnumAttributeAtIndex;
extern fn LLVMRemoveEnumAttributeAtIndex(F: *Value, Idx: AttributeIndex, KindID: c_uint) void;
diff --git a/src/main.zig b/src/main.zig
index 410414b3a3..24518d743d 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -1304,7 +1304,11 @@ fn buildOutputType(
} else if (mem.eql(u8, arg, "--no-gc-sections")) {
linker_gc_sections = false;
} else if (mem.eql(u8, arg, "--debug-compile-errors")) {
- debug_compile_errors = true;
+ if (!crash_report.is_enabled) {
+ std.log.warn("Zig was compiled in a release mode. --debug-compile-errors has no effect.", .{});
+ } else {
+ debug_compile_errors = true;
+ }
} else if (mem.eql(u8, arg, "--verbose-link")) {
verbose_link = true;
} else if (mem.eql(u8, arg, "--verbose-cc")) {
diff --git a/src/print_air.zig b/src/print_air.zig
index 0bbc1100f7..bed6f029b1 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -400,9 +400,13 @@ const Writer = struct {
}
fn writeTyPlBin(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
- const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
+ const data = w.air.instructions.items(.data);
+ const ty_pl = data[inst].ty_pl;
const extra = w.air.extraData(Air.Bin, ty_pl.payload).data;
+ const inst_ty = w.air.getRefType(data[inst].ty_pl.ty);
+ try w.writeType(s, inst_ty);
+ try s.writeAll(", ");
try w.writeOperand(s, inst, 0, extra.lhs);
try s.writeAll(", ");
try w.writeOperand(s, inst, 1, extra.rhs);
diff --git a/src/print_zir.zig b/src/print_zir.zig
index f1b1068920..d434abd439 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -262,9 +262,10 @@ const Writer = struct {
=> try self.writeBreak(stream, inst),
.array_init,
.array_init_ref,
+ => try self.writeArrayInit(stream, inst),
.array_init_anon,
.array_init_anon_ref,
- => try self.writeArrayInit(stream, inst),
+ => try self.writeArrayInitAnon(stream, inst),
.slice_start => try self.writeSliceStart(stream, inst),
.slice_end => try self.writeSliceEnd(stream, inst),
@@ -2316,6 +2317,21 @@ const Writer = struct {
try self.writeSrc(stream, inst_data.src());
}
+ fn writeArrayInitAnon(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+
+ const extra = self.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
+ const args = self.code.refSlice(extra.end, extra.data.operands_len);
+
+ try stream.writeAll("{");
+ for (args) |arg, i| {
+ if (i != 0) try stream.writeAll(", ");
+ try self.writeInstRef(stream, arg);
+ }
+ try stream.writeAll("}) ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
fn writeArrayInitSent(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
diff --git a/src/type.zig b/src/type.zig
index 710b2fe4a3..6afee8bc73 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -2312,6 +2312,8 @@ pub const Type = extern union {
}
}
+ const RuntimeBitsError = Module.CompileError || error{NeedLazy};
+
/// true if and only if the type takes up space in memory at runtime.
/// There are two reasons a type will return false:
/// * the type is a comptime-only type. For example, the type `type` itself.
@@ -2326,8 +2328,8 @@ pub const Type = extern union {
pub fn hasRuntimeBitsAdvanced(
ty: Type,
ignore_comptime_only: bool,
- opt_sema: ?*Sema,
- ) Module.CompileError!bool {
+ strat: AbiAlignmentAdvancedStrat,
+ ) RuntimeBitsError!bool {
switch (ty.tag()) {
.u1,
.u8,
@@ -2406,8 +2408,8 @@ pub const Type = extern union {
return true;
} else if (ty.childType().zigTypeTag() == .Fn) {
return !ty.childType().fnInfo().is_generic;
- } else if (opt_sema) |sema| {
- return !(try sema.typeRequiresComptime(ty));
+ } else if (strat == .sema) {
+ return !(try strat.sema.typeRequiresComptime(ty));
} else {
return !comptimeOnly(ty);
}
@@ -2445,8 +2447,8 @@ pub const Type = extern union {
}
if (ignore_comptime_only) {
return true;
- } else if (opt_sema) |sema| {
- return !(try sema.typeRequiresComptime(child_ty));
+ } else if (strat == .sema) {
+ return !(try strat.sema.typeRequiresComptime(child_ty));
} else {
return !comptimeOnly(child_ty);
}
@@ -2459,13 +2461,14 @@ pub const Type = extern union {
// and then later if our guess was incorrect, we emit a compile error.
return true;
}
- if (opt_sema) |sema| {
- _ = try sema.resolveTypeFields(ty);
+ switch (strat) {
+ .sema => |sema| _ = try sema.resolveTypeFields(ty),
+ .eager => assert(struct_obj.haveFieldTypes()),
+ .lazy => if (!struct_obj.haveFieldTypes()) return error.NeedLazy,
}
- assert(struct_obj.haveFieldTypes());
for (struct_obj.fields.values()) |field| {
if (field.is_comptime) continue;
- if (try field.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, opt_sema))
+ if (try field.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat))
return true;
} else {
return false;
@@ -2474,7 +2477,7 @@ pub const Type = extern union {
.enum_full => {
const enum_full = ty.castTag(.enum_full).?.data;
- return enum_full.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, opt_sema);
+ return enum_full.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat);
},
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
@@ -2483,17 +2486,18 @@ pub const Type = extern union {
.enum_numbered, .enum_nonexhaustive => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = ty.intTagType(&buffer);
- return int_tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, opt_sema);
+ return int_tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat);
},
.@"union" => {
const union_obj = ty.castTag(.@"union").?.data;
- if (opt_sema) |sema| {
- _ = try sema.resolveTypeFields(ty);
+ switch (strat) {
+ .sema => |sema| _ = try sema.resolveTypeFields(ty),
+ .eager => assert(union_obj.haveFieldTypes()),
+ .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy,
}
- assert(union_obj.haveFieldTypes());
for (union_obj.fields.values()) |value| {
- if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, opt_sema))
+ if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat))
return true;
} else {
return false;
@@ -2501,16 +2505,17 @@ pub const Type = extern union {
},
.union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
- if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, opt_sema)) {
+ if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) {
return true;
}
- if (opt_sema) |sema| {
- _ = try sema.resolveTypeFields(ty);
+ switch (strat) {
+ .sema => |sema| _ = try sema.resolveTypeFields(ty),
+ .eager => assert(union_obj.haveFieldTypes()),
+ .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy,
}
- assert(union_obj.haveFieldTypes());
for (union_obj.fields.values()) |value| {
- if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, opt_sema))
+ if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat))
return true;
} else {
return false;
@@ -2518,9 +2523,9 @@ pub const Type = extern union {
},
.array, .vector => return ty.arrayLen() != 0 and
- try ty.elemType().hasRuntimeBitsAdvanced(ignore_comptime_only, opt_sema),
+ try ty.elemType().hasRuntimeBitsAdvanced(ignore_comptime_only, strat),
.array_u8 => return ty.arrayLen() != 0,
- .array_sentinel => return ty.childType().hasRuntimeBitsAdvanced(ignore_comptime_only, opt_sema),
+ .array_sentinel => return ty.childType().hasRuntimeBitsAdvanced(ignore_comptime_only, strat),
.int_signed, .int_unsigned => return ty.cast(Payload.Bits).?.data != 0,
@@ -2529,7 +2534,7 @@ pub const Type = extern union {
for (tuple.types) |field_ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
- if (try field_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, opt_sema)) return true;
+ if (try field_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) return true;
}
return false;
},
@@ -2665,11 +2670,11 @@ pub const Type = extern union {
}
pub fn hasRuntimeBits(ty: Type) bool {
- return hasRuntimeBitsAdvanced(ty, false, null) catch unreachable;
+ return hasRuntimeBitsAdvanced(ty, false, .eager) catch unreachable;
}
pub fn hasRuntimeBitsIgnoreComptime(ty: Type) bool {
- return hasRuntimeBitsAdvanced(ty, true, null) catch unreachable;
+ return hasRuntimeBitsAdvanced(ty, true, .eager) catch unreachable;
}
pub fn isFnOrHasRuntimeBits(ty: Type) bool {
@@ -2812,12 +2817,12 @@ pub const Type = extern union {
}
}
- const AbiAlignmentAdvanced = union(enum) {
+ pub const AbiAlignmentAdvanced = union(enum) {
scalar: u32,
val: Value,
};
- const AbiAlignmentAdvancedStrat = union(enum) {
+ pub const AbiAlignmentAdvancedStrat = union(enum) {
eager,
lazy: Allocator,
sema: *Sema,
@@ -2971,7 +2976,10 @@ pub const Type = extern union {
switch (strat) {
.eager, .sema => {
- if (!(try child_type.hasRuntimeBitsAdvanced(false, opt_sema))) {
+ if (!(child_type.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) {
+ error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) },
+ else => |e| return e,
+ })) {
return AbiAlignmentAdvanced{ .scalar = 1 };
}
return child_type.abiAlignmentAdvanced(target, strat);
@@ -2990,7 +2998,10 @@ pub const Type = extern union {
const code_align = abiAlignment(Type.anyerror, target);
switch (strat) {
.eager, .sema => {
- if (!(try data.payload.hasRuntimeBitsAdvanced(false, opt_sema))) {
+ if (!(data.payload.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) {
+ error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) },
+ else => |e| return e,
+ })) {
return AbiAlignmentAdvanced{ .scalar = code_align };
}
return AbiAlignmentAdvanced{ .scalar = @max(
@@ -3044,7 +3055,10 @@ pub const Type = extern union {
const fields = ty.structFields();
var big_align: u32 = 0;
for (fields.values()) |field| {
- if (!(try field.ty.hasRuntimeBitsAdvanced(false, opt_sema))) continue;
+ if (!(field.ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) {
+ error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) },
+ else => |e| return e,
+ })) continue;
const field_align = if (field.abi_align != 0)
field.abi_align
@@ -3161,7 +3175,10 @@ pub const Type = extern union {
var max_align: u32 = 0;
if (have_tag) max_align = union_obj.tag_ty.abiAlignment(target);
for (union_obj.fields.values()) |field| {
- if (!(try field.ty.hasRuntimeBitsAdvanced(false, opt_sema))) continue;
+ if (!(field.ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) {
+ error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) },
+ else => |e| return e,
+ })) continue;
const field_align = if (field.abi_align != 0)
field.abi_align
diff --git a/src/value.zig b/src/value.zig
index 042a960b25..59cf9046f4 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1911,7 +1911,11 @@ pub const Value = extern union {
.lazy_align => {
const ty = lhs.castTag(.lazy_align).?.data;
- if (try ty.hasRuntimeBitsAdvanced(false, opt_sema)) {
+ const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager;
+ if (ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) {
+ error.NeedLazy => unreachable,
+ else => |e| return e,
+ }) {
return .gt;
} else {
return .eq;
@@ -1919,7 +1923,11 @@ pub const Value = extern union {
},
.lazy_size => {
const ty = lhs.castTag(.lazy_size).?.data;
- if (try ty.hasRuntimeBitsAdvanced(false, opt_sema)) {
+ const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager;
+ if (ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) {
+ error.NeedLazy => unreachable,
+ else => |e| return e,
+ }) {
return .gt;
} else {
return .eq;
diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp
index 7134df6a9c..c38e311f67 100644
--- a/src/zig_llvm.cpp
+++ b/src/zig_llvm.cpp
@@ -444,6 +444,15 @@ LLVMValueRef ZigLLVMBuildCall(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
return wrap(call_inst);
}
+void ZigLLVMAddAttributeAtIndex(LLVMValueRef Val, unsigned Idx, LLVMAttributeRef A) {
+ if (isa<Function>(unwrap(Val))) {
+ unwrap<Function>(Val)->addAttributeAtIndex(Idx, unwrap(A));
+ } else {
+ unwrap<CallInst>(Val)->addAttributeAtIndex(Idx, unwrap(A));
+ }
+}
+
+
LLVMValueRef ZigLLVMBuildMemCpy(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign,
LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size, bool isVolatile)
{
@@ -1065,12 +1074,21 @@ void ZigLLVMSetFastMath(LLVMBuilderRef builder_wrapped, bool on_state) {
}
}
-void ZigLLVMAddByValAttr(LLVMValueRef fn_ref, unsigned ArgNo, LLVMTypeRef type_val) {
- Function *func = unwrap<Function>(fn_ref);
- AttrBuilder attr_builder(func->getContext());
- Type *llvm_type = unwrap<Type>(type_val);
- attr_builder.addByValAttr(llvm_type);
- func->addParamAttrs(ArgNo, attr_builder);
+void ZigLLVMAddByValAttr(LLVMValueRef Val, unsigned ArgNo, LLVMTypeRef type_val) {
+ if (isa<Function>(unwrap(Val))) {
+ Function *func = unwrap<Function>(Val);
+ AttrBuilder attr_builder(func->getContext());
+ Type *llvm_type = unwrap<Type>(type_val);
+ attr_builder.addByValAttr(llvm_type);
+ func->addParamAttrs(ArgNo, attr_builder);
+ } else {
+ CallInst *call = unwrap<CallInst>(Val);
+ AttrBuilder attr_builder(call->getContext());
+ Type *llvm_type = unwrap<Type>(type_val);
+ attr_builder.addByValAttr(llvm_type);
+ // NOTE: +1 here since index 0 refers to the return value
+ call->addAttributeAtIndex(ArgNo + 1, attr_builder.getAttribute(Attribute::ByVal));
+ }
}
void ZigLLVMAddSretAttr(LLVMValueRef fn_ref, LLVMTypeRef type_val) {
diff --git a/src/zig_llvm.h b/src/zig_llvm.h
index 0e210f9545..2829801a46 100644
--- a/src/zig_llvm.h
+++ b/src/zig_llvm.h
@@ -129,6 +129,8 @@ ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildCall(LLVMBuilderRef B, LLVMTypeRef functio
LLVMValueRef Fn, LLVMValueRef *Args, unsigned NumArgs, enum ZigLLVM_CallingConv CC,
enum ZigLLVM_CallAttr attr, const char *Name);
+ZIG_EXTERN_C void ZigLLVMAddAttributeAtIndex(LLVMValueRef Val, unsigned Idx, LLVMAttributeRef A);
+
ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildMemCpy(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign,
LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size, bool isVolatile);