aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/AstGen.zig9
-rw-r--r--src/Compilation.zig68
-rw-r--r--src/Module.zig14
-rw-r--r--src/Sema.zig420
-rw-r--r--src/TypedValue.zig2
-rw-r--r--src/arch/aarch64/CodeGen.zig6
-rw-r--r--src/arch/arm/CodeGen.zig6
-rw-r--r--src/arch/wasm/CodeGen.zig6
-rw-r--r--src/arch/x86_64/CodeGen.zig6
-rw-r--r--src/codegen.zig9
-rw-r--r--src/codegen/c.zig81
-rw-r--r--src/codegen/llvm.zig40
-rw-r--r--src/libcxx.zig2
-rw-r--r--src/link.zig1
-rw-r--r--src/link/Wasm.zig135
-rw-r--r--src/link/Wasm/types.zig44
-rw-r--r--src/target.zig3
-rw-r--r--src/translate_c.zig191
-rw-r--r--src/translate_c/ast.zig24
-rw-r--r--src/type.zig549
-rw-r--r--src/value.zig509
21 files changed, 1376 insertions, 749 deletions
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 07a972eaab..48e6a480f3 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -9709,7 +9709,7 @@ fn rvalue(
const result_index = refToIndex(result) orelse
return gz.addUnTok(.ref, result, src_token);
const zir_tags = gz.astgen.instructions.items(.tag);
- if (zir_tags[result_index].isParam())
+ if (zir_tags[result_index].isParam() or astgen.isInferred(result))
return gz.addUnTok(.ref, result, src_token);
const gop = try astgen.ref_table.getOrPut(astgen.gpa, result_index);
if (!gop.found_existing) {
@@ -12196,6 +12196,13 @@ fn isInferred(astgen: *AstGen, ref: Zir.Inst.Ref) bool {
.alloc_inferred_comptime_mut,
=> true,
+ .extended => {
+ const zir_data = astgen.instructions.items(.data);
+ if (zir_data[inst].extended.opcode != .alloc) return false;
+ const small = @bitCast(Zir.Inst.AllocExtended.Small, zir_data[inst].extended.small);
+ return !small.has_type;
+ },
+
else => false,
};
}
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 99332e5d2b..81aa6ec596 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -7,6 +7,9 @@ const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const log = std.log.scoped(.compilation);
const Target = std.Target;
+const ArrayList = std.ArrayList;
+const Sha256 = std.crypto.hash.sha2.Sha256;
+const fs = std.fs;
const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
@@ -1106,6 +1109,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
const root_name = try arena.dupeZ(u8, options.root_name);
const use_stage1 = options.use_stage1 orelse false;
+ if (use_stage1 and !build_options.have_stage1) return error.ZigCompilerBuiltWithoutStage1;
// Make a decision on whether to use LLVM or our own backend.
const use_llvm = build_options.have_llvm and blk: {
@@ -3918,6 +3922,70 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
}
}
+ // Windows has an argument length limit of 32,766 characters, macOS 262,144 and Linux
+ // 2,097,152. If our args exceed 30 KiB, we instead write them to a "response file" and
+ // pass that to zig, e.g. via 'zig build-lib @args.rsp'
+ // See @file syntax here: https://gcc.gnu.org/onlinedocs/gcc/Overall-Options.html
+ var args_length: usize = 0;
+ for (argv.items) |arg| {
+ args_length += arg.len + 1; // +1 to account for null terminator
+ }
+ if (args_length >= 30 * 1024) {
+ const allocator = comp.gpa;
+ const input_args = argv.items[2..];
+ const output_dir = comp.local_cache_directory;
+
+ var args_arena = std.heap.ArenaAllocator.init(allocator);
+ defer args_arena.deinit();
+
+ const args_to_escape = input_args;
+ var escaped_args = try ArrayList([]const u8).initCapacity(args_arena.allocator(), args_to_escape.len);
+
+ arg_blk: for (args_to_escape) |arg| {
+ for (arg) |c, arg_idx| {
+ if (c == '\\' or c == '"') {
+ // Slow path for arguments that need to be escaped. We'll need to allocate and copy
+ var escaped = try ArrayList(u8).initCapacity(args_arena.allocator(), arg.len + 1);
+ const writer = escaped.writer();
+ writer.writeAll(arg[0..arg_idx]) catch unreachable;
+ for (arg[arg_idx..]) |to_escape| {
+ if (to_escape == '\\' or to_escape == '"') try writer.writeByte('\\');
+ try writer.writeByte(to_escape);
+ }
+ escaped_args.appendAssumeCapacity(escaped.items);
+ continue :arg_blk;
+ }
+ }
+ escaped_args.appendAssumeCapacity(arg); // no escaping needed so just use original argument
+ }
+
+ const partially_quoted = try std.mem.join(allocator, "\" \"", escaped_args.items);
+ const args = try std.mem.concat(allocator, u8, &[_][]const u8{ "\"", partially_quoted, "\"" });
+
+ // Write the args to zig-cache/args/<SHA256 hash of args> to avoid conflicts with
+ // other zig build commands running in parallel.
+
+ var args_hash: [Sha256.digest_length]u8 = undefined;
+ Sha256.hash(args, &args_hash, .{});
+ var args_hex_hash: [Sha256.digest_length * 2]u8 = undefined;
+ _ = try std.fmt.bufPrint(
+ &args_hex_hash,
+ "{s}",
+ .{std.fmt.fmtSliceHexLower(&args_hash)},
+ );
+
+ const args_dir = "args";
+ try output_dir.handle.makePath(args_dir);
+ const args_file = try fs.path.join(allocator, &[_][]const u8{
+ args_dir, args_hex_hash[0..],
+ });
+ try output_dir.handle.writeFile(args_file, args);
+ const args_file_path = try output_dir.handle.realpathAlloc(allocator, args_file);
+
+ argv.shrinkRetainingCapacity(2);
+ try argv.append(try std.mem.concat(allocator, u8, &[_][]const u8{ "@", args_file_path }));
+ }
+
if (comp.verbose_cc) {
dump_argv(argv.items);
}
diff --git a/src/Module.zig b/src/Module.zig
index 4f150b0148..a8ea63ffc9 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -3367,6 +3367,8 @@ pub fn deinit(mod: *Module) void {
for (mod.import_table.keys()) |key| {
gpa.free(key);
}
+ var failed_decls = mod.failed_decls;
+ mod.failed_decls = .{};
for (mod.import_table.values()) |value| {
value.destroy(mod);
}
@@ -3406,10 +3408,10 @@ pub fn deinit(mod: *Module) void {
mod.local_zir_cache.handle.close();
mod.global_zir_cache.handle.close();
- for (mod.failed_decls.values()) |value| {
+ for (failed_decls.values()) |value| {
value.destroy(gpa);
}
- mod.failed_decls.deinit(gpa);
+ failed_decls.deinit(gpa);
if (mod.emit_h) |emit_h| {
for (emit_h.failed_decls.values()) |value| {
@@ -3482,6 +3484,14 @@ pub fn deinit(mod: *Module) void {
pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
const gpa = mod.gpa;
{
+ if (mod.failed_decls.contains(decl_index)) {
+ blk: {
+ const errs = mod.comp.getAllErrorsAlloc() catch break :blk;
+ for (errs.list) |err| Compilation.AllErrors.Message.renderToStdErr(err, .no_color);
+ }
+ // TODO restore test case triggering this panic
+ @panic("Zig compiler bug: attempted to destroy declaration with an attached error");
+ }
const decl = mod.declPtr(decl_index);
log.debug("destroy {*} ({s})", .{ decl, decl.name });
_ = mod.test_functions.swapRemove(decl_index);
diff --git a/src/Sema.zig b/src/Sema.zig
index 4c2f72034e..a73c1eedcb 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -151,6 +151,8 @@ pub const Block = struct {
runtime_index: Value.RuntimeIndex = .zero,
inline_block: Zir.Inst.Index = 0,
+ comptime_reason: ?*const ComptimeReason = null,
+ // TODO is_comptime and comptime_reason should probably be merged together.
is_comptime: bool,
is_typeof: bool = false,
is_coerce_result_ptr: bool = false,
@@ -173,6 +175,49 @@ pub const Block = struct {
/// Value for switch_capture in an inline case
inline_case_capture: Air.Inst.Ref = .none,
+ const ComptimeReason = union(enum) {
+ c_import: struct {
+ block: *Block,
+ src: LazySrcLoc,
+ },
+ comptime_ret_ty: struct {
+ block: *Block,
+ func: Air.Inst.Ref,
+ func_src: LazySrcLoc,
+ return_ty: Type,
+ },
+
+ fn explain(cr: ComptimeReason, sema: *Sema, msg: ?*Module.ErrorMsg) !void {
+ const parent = msg orelse return;
+ const prefix = "expression is evaluated at comptime because ";
+ switch (cr) {
+ .c_import => |ci| {
+ try sema.errNote(ci.block, ci.src, parent, prefix ++ "it is inside a @cImport", .{});
+ },
+ .comptime_ret_ty => |rt| {
+ const src_loc = if (try sema.funcDeclSrc(rt.block, rt.func_src, rt.func)) |capture| blk: {
+ var src_loc = capture;
+ src_loc.lazy = .{ .node_offset_fn_type_ret_ty = 0 };
+ break :blk src_loc;
+ } else blk: {
+ const src_decl = sema.mod.declPtr(rt.block.src_decl);
+ break :blk rt.func_src.toSrcLoc(src_decl);
+ };
+ if (rt.return_ty.tag() == .generic_poison) {
+ return sema.mod.errNoteNonLazy(src_loc, parent, prefix ++ "the generic function was instantiated with a comptime-only return type", .{});
+ }
+ try sema.mod.errNoteNonLazy(
+ src_loc,
+ parent,
+ prefix ++ "the function returns a comptime-only type '{}'",
+ .{rt.return_ty.fmt(sema.mod)},
+ );
+ try sema.explainWhyTypeIsComptime(rt.block, rt.func_src, parent, src_loc, rt.return_ty);
+ },
+ }
+ }
+ };
+
const Param = struct {
/// `noreturn` means `anytype`.
ty: Type,
@@ -224,6 +269,7 @@ pub const Block = struct {
.label = null,
.inlining = parent.inlining,
.is_comptime = parent.is_comptime,
+ .comptime_reason = parent.comptime_reason,
.is_typeof = parent.is_typeof,
.runtime_cond = parent.runtime_cond,
.runtime_loop = parent.runtime_loop,
@@ -1420,7 +1466,10 @@ fn analyzeBodyInner(
const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index);
const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len];
const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
- const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition, "condition in comptime branch must be comptime-known");
+ const cond = sema.resolveInstConst(block, cond_src, extra.data.condition, "condition in comptime branch must be comptime-known") catch |err| {
+ if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
+ return err;
+ };
const inline_body = if (cond.val.toBool()) then_body else else_body;
try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src);
@@ -1438,7 +1487,10 @@ fn analyzeBodyInner(
const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index);
const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len];
const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
- const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition, "condition in comptime branch must be comptime-known");
+ const cond = sema.resolveInstConst(block, cond_src, extra.data.condition, "condition in comptime branch must be comptime-known") catch |err| {
+ if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
+ return err;
+ };
const inline_body = if (cond.val.toBool()) then_body else else_body;
const old_runtime_index = block.runtime_index;
defer block.runtime_index = old_runtime_index;
@@ -1460,7 +1512,10 @@ fn analyzeBodyInner(
const err_union = try sema.resolveInst(extra.data.operand);
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union);
assert(is_non_err != .none);
- const is_non_err_tv = try sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known");
+ const is_non_err_tv = sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| {
+ if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
+ return err;
+ };
if (is_non_err_tv.val.toBool()) {
const err_union_ty = sema.typeOf(err_union);
break :blk try sema.analyzeErrUnionPayload(block, src, err_union_ty, err_union, operand_src, false);
@@ -1516,7 +1571,10 @@ fn analyzeBodyInner(
const err_union = try sema.analyzeLoad(block, src, operand, operand_src);
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union);
assert(is_non_err != .none);
- const is_non_err_tv = try sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known");
+ const is_non_err_tv = sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| {
+ if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
+ return err;
+ };
if (is_non_err_tv.val.toBool()) {
break :blk try sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false);
}
@@ -1675,8 +1733,8 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize)
return;
}
+ assert(!block.is_comptime);
var err_trace_block = block.makeSubBlock();
- err_trace_block.is_comptime = false;
defer err_trace_block.instructions.deinit(sema.gpa);
const src: LazySrcLoc = .unneeded;
@@ -1828,6 +1886,22 @@ fn resolveMaybeUndefValAllowVariables(
src: LazySrcLoc,
inst: Air.Inst.Ref,
) CompileError!?Value {
+ var make_runtime = false;
+ if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(block, src, inst, &make_runtime)) |val| {
+ if (make_runtime) return null;
+ return val;
+ }
+ return null;
+}
+
+/// Returns all Value tags including `variable`, `undef` and `runtime_value`.
+fn resolveMaybeUndefValAllowVariablesMaybeRuntime(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ inst: Air.Inst.Ref,
+ make_runtime: *bool,
+) CompileError!?Value {
// First section of indexes correspond to a set number of constant values.
var i: usize = @enumToInt(inst);
if (i < Air.Inst.Ref.typed_value_map.len) {
@@ -1843,7 +1917,7 @@ fn resolveMaybeUndefValAllowVariables(
.constant => {
const ty_pl = sema.air_instructions.items(.data)[i].ty_pl;
const val = sema.air_values.items[ty_pl.payload];
- if (val.tag() == .runtime_int) return null;
+ if (val.tag() == .runtime_value) make_runtime.* = true;
return val;
},
.const_ty => {
@@ -3896,6 +3970,7 @@ fn validateUnionInit(
var first_block_index = block.instructions.items.len;
var block_index = block.instructions.items.len - 1;
var init_val: ?Value = null;
+ var make_runtime = false;
while (block_index > 0) : (block_index -= 1) {
const store_inst = block.instructions.items[block_index];
if (store_inst == field_ptr_air_inst) break;
@@ -3920,7 +3995,7 @@ fn validateUnionInit(
} else {
first_block_index = @min(first_block_index, block_index);
}
- init_val = try sema.resolveMaybeUndefValAllowVariables(block, init_src, bin_op.rhs);
+ init_val = try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(block, init_src, bin_op.rhs, &make_runtime);
break;
}
@@ -3933,10 +4008,11 @@ fn validateUnionInit(
// instead a single `store` to the result ptr with a comptime union value.
block.instructions.shrinkRetainingCapacity(first_block_index);
- const union_val = try Value.Tag.@"union".create(sema.arena, .{
+ var union_val = try Value.Tag.@"union".create(sema.arena, .{
.tag = tag_val,
.val = val,
});
+ if (make_runtime) union_val = try Value.Tag.runtime_value.create(sema.arena, union_val);
const union_init = try sema.addConstant(union_ty, union_val);
try sema.storePtr2(block, init_src, union_ptr, init_src, union_init, init_src, .store);
return;
@@ -4054,6 +4130,7 @@ fn validateStructInit(
var struct_is_comptime = true;
var first_block_index = block.instructions.items.len;
+ var make_runtime = false;
const air_tags = sema.air_instructions.items(.tag);
const air_datas = sema.air_instructions.items(.data);
@@ -4130,7 +4207,7 @@ fn validateStructInit(
} else {
first_block_index = @min(first_block_index, block_index);
}
- if (try sema.resolveMaybeUndefValAllowVariables(block, field_src, bin_op.rhs)) |val| {
+ if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(block, field_src, bin_op.rhs, &make_runtime)) |val| {
field_values[i] = val;
} else {
struct_is_comptime = false;
@@ -4185,7 +4262,8 @@ fn validateStructInit(
// instead a single `store` to the struct_ptr with a comptime struct value.
block.instructions.shrinkRetainingCapacity(first_block_index);
- const struct_val = try Value.Tag.aggregate.create(sema.arena, field_values);
+ var struct_val = try Value.Tag.aggregate.create(sema.arena, field_values);
+ if (make_runtime) struct_val = try Value.Tag.runtime_value.create(sema.arena, struct_val);
const struct_init = try sema.addConstant(struct_ty, struct_val);
try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store);
return;
@@ -4265,6 +4343,7 @@ fn zirValidateArrayInit(
var array_is_comptime = true;
var first_block_index = block.instructions.items.len;
+ var make_runtime = false;
// Collect the comptime element values in case the array literal ends up
// being comptime-known.
@@ -4326,7 +4405,7 @@ fn zirValidateArrayInit(
array_is_comptime = false;
continue;
}
- if (try sema.resolveMaybeUndefValAllowVariables(block, elem_src, bin_op.rhs)) |val| {
+ if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(block, elem_src, bin_op.rhs, &make_runtime)) |val| {
element_vals[i] = val;
} else {
array_is_comptime = false;
@@ -4352,7 +4431,7 @@ fn zirValidateArrayInit(
array_is_comptime = false;
continue;
}
- if (try sema.resolveMaybeUndefValAllowVariables(block, elem_src, bin_op.rhs)) |val| {
+ if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(block, elem_src, bin_op.rhs, &make_runtime)) |val| {
element_vals[i] = val;
} else {
array_is_comptime = false;
@@ -4383,7 +4462,8 @@ fn zirValidateArrayInit(
block.instructions.shrinkRetainingCapacity(first_block_index);
- const array_val = try Value.Tag.aggregate.create(sema.arena, element_vals);
+ var array_val = try Value.Tag.aggregate.create(sema.arena, element_vals);
+ if (make_runtime) array_val = try Value.Tag.runtime_value.create(sema.arena, array_val);
const array_init = try sema.addConstant(array_ty, array_val);
try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store);
}
@@ -4922,6 +5002,10 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
var c_import_buf = std.ArrayList(u8).init(sema.gpa);
defer c_import_buf.deinit();
+ var comptime_reason = .{ .c_import = .{
+ .block = parent_block,
+ .src = src,
+ } };
var child_block: Block = .{
.parent = parent_block,
.sema = sema,
@@ -4930,7 +5014,8 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
.wip_capture_scope = parent_block.wip_capture_scope,
.instructions = .{},
.inlining = parent_block.inlining,
- .is_comptime = parent_block.is_comptime,
+ .is_comptime = true,
+ .comptime_reason = &comptime_reason,
.c_import_buf = &c_import_buf,
.runtime_cond = parent_block.runtime_cond,
.runtime_loop = parent_block.runtime_loop,
@@ -5031,6 +5116,8 @@ fn zirBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErro
.label = &label,
.inlining = parent_block.inlining,
.is_comptime = parent_block.is_comptime,
+ .comptime_reason = parent_block.comptime_reason,
+ .is_typeof = parent_block.is_typeof,
.want_safety = parent_block.want_safety,
.float_mode = parent_block.float_mode,
.runtime_cond = parent_block.runtime_cond,
@@ -5903,6 +5990,7 @@ fn zirCall(
defer block.is_comptime = parent_comptime;
if (arg_index < fn_params_len and func_ty_info.comptime_params[arg_index]) {
block.is_comptime = true;
+ // TODO set comptime_reason
}
const param_ty_inst = try sema.addType(param_ty);
@@ -5923,7 +6011,7 @@ fn zirCall(
const backend_supports_error_return_tracing = sema.mod.comp.bin_file.options.use_llvm;
if (backend_supports_error_return_tracing and sema.mod.comp.bin_file.options.error_return_tracing and
- !block.is_comptime and (input_is_error or pop_error_return_trace))
+ !block.is_comptime and !block.is_typeof and (input_is_error or pop_error_return_trace))
{
const call_inst: Air.Inst.Ref = if (modifier == .always_tail) undefined else b: {
break :b try sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args, bound_arg_src);
@@ -6033,37 +6121,6 @@ const GenericCallAdapter = struct {
}
};
-fn addComptimeReturnTypeNote(
- sema: *Sema,
- block: *Block,
- func: Air.Inst.Ref,
- func_src: LazySrcLoc,
- return_ty: Type,
- parent: *Module.ErrorMsg,
- requires_comptime: bool,
-) !void {
- if (!requires_comptime) return;
-
- const src_loc = if (try sema.funcDeclSrc(block, func_src, func)) |capture| blk: {
- var src_loc = capture;
- src_loc.lazy = .{ .node_offset_fn_type_ret_ty = 0 };
- break :blk src_loc;
- } else blk: {
- const src_decl = sema.mod.declPtr(block.src_decl);
- break :blk func_src.toSrcLoc(src_decl);
- };
- if (return_ty.tag() == .generic_poison) {
- return sema.mod.errNoteNonLazy(src_loc, parent, "generic function is instantiated with a comptime-only return type", .{});
- }
- try sema.mod.errNoteNonLazy(
- src_loc,
- parent,
- "function is being called at comptime because it returns a comptime-only type '{}'",
- .{return_ty.fmt(sema.mod)},
- );
- try sema.explainWhyTypeIsComptime(block, func_src, parent, src_loc, return_ty);
-}
-
fn analyzeCall(
sema: *Sema,
block: *Block,
@@ -6154,11 +6211,21 @@ fn analyzeCall(
var is_generic_call = func_ty_info.is_generic;
var is_comptime_call = block.is_comptime or modifier == .compile_time;
- var comptime_only_ret_ty = false;
+ var comptime_reason_buf: Block.ComptimeReason = undefined;
+ var comptime_reason: ?*const Block.ComptimeReason = null;
if (!is_comptime_call) {
if (sema.typeRequiresComptime(func_ty_info.return_type)) |ct| {
is_comptime_call = ct;
- comptime_only_ret_ty = ct;
+ if (ct) {
+ // stage1 can't handle doing this directly
+ comptime_reason_buf = .{ .comptime_ret_ty = .{
+ .block = block,
+ .func = func,
+ .func_src = func_src,
+ .return_ty = func_ty_info.return_type,
+ } };
+ comptime_reason = &comptime_reason_buf;
+ }
} else |err| switch (err) {
error.GenericPoison => is_generic_call = true,
else => |e| return e,
@@ -6187,7 +6254,14 @@ fn analyzeCall(
error.ComptimeReturn => {
is_inline_call = true;
is_comptime_call = true;
- comptime_only_ret_ty = true;
+ // stage1 can't handle doing this directly
+ comptime_reason_buf = .{ .comptime_ret_ty = .{
+ .block = block,
+ .func = func,
+ .func_src = func_src,
+ .return_ty = func_ty_info.return_type,
+ } };
+ comptime_reason = &comptime_reason_buf;
},
else => |e| return e,
}
@@ -6199,9 +6273,7 @@ fn analyzeCall(
const result: Air.Inst.Ref = if (is_inline_call) res: {
const func_val = sema.resolveConstValue(block, func_src, func, "function being called at comptime must be comptime-known") catch |err| {
- if (err == error.AnalysisFail and sema.err != null) {
- try sema.addComptimeReturnTypeNote(block, func, func_src, func_ty_info.return_type, sema.err.?, comptime_only_ret_ty);
- }
+ if (err == error.AnalysisFail and comptime_reason != null) try comptime_reason.?.explain(sema, sema.err);
return err;
};
const module_fn = switch (func_val.tag()) {
@@ -6269,6 +6341,7 @@ fn analyzeCall(
.label = null,
.inlining = &inlining,
.is_comptime = is_comptime_call,
+ .comptime_reason = comptime_reason,
.error_return_trace_index = block.error_return_trace_index,
};
@@ -6321,11 +6394,6 @@ fn analyzeCall(
is_comptime_call,
&should_memoize,
memoized_call_key,
- // last 4 arguments are only used when reporting errors
- undefined,
- undefined,
- undefined,
- undefined,
) catch |err| switch (err) {
error.NeededSourceLocation => {
_ = sema.inst_map.remove(inst);
@@ -6341,10 +6409,6 @@ fn analyzeCall(
is_comptime_call,
&should_memoize,
memoized_call_key,
- func,
- func_src,
- func_ty_info.return_type,
- comptime_only_ret_ty,
);
return error.AnalysisFail;
},
@@ -6403,7 +6467,7 @@ fn analyzeCall(
}
const new_func_resolved_ty = try Type.Tag.function.create(sema.arena, new_fn_info);
- if (!is_comptime_call) {
+ if (!is_comptime_call and !block.is_typeof) {
try sema.emitDbgInline(block, parent_func.?, module_fn, new_func_resolved_ty, .dbg_inline_begin);
const zir_tags = sema.code.instructions.items(.tag);
@@ -6441,7 +6505,7 @@ fn analyzeCall(
break :result try sema.analyzeBlockBody(block, call_src, &child_block, merges);
};
- if (!is_comptime_call and sema.typeOf(result).zigTypeTag() != .NoReturn) {
+ if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag() != .NoReturn) {
try sema.emitDbgInline(
block,
module_fn,
@@ -6581,10 +6645,6 @@ fn analyzeInlineCallArg(
is_comptime_call: bool,
should_memoize: *bool,
memoized_call_key: Module.MemoizedCall.Key,
- func: Air.Inst.Ref,
- func_src: LazySrcLoc,
- ret_ty: Type,
- comptime_only_ret_ty: bool,
) !void {
const zir_tags = sema.code.instructions.items(.tag);
switch (zir_tags[inst]) {
@@ -6601,9 +6661,7 @@ fn analyzeInlineCallArg(
const uncasted_arg = uncasted_args[arg_i.*];
if (try sema.typeRequiresComptime(param_ty)) {
_ = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to parameter with comptime-only type must be comptime-known") catch |err| {
- if (err == error.AnalysisFail and sema.err != null) {
- try sema.addComptimeReturnTypeNote(arg_block, func, func_src, ret_ty, sema.err.?, comptime_only_ret_ty);
- }
+ if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err);
return err;
};
}
@@ -6612,9 +6670,7 @@ fn analyzeInlineCallArg(
if (is_comptime_call) {
try sema.inst_map.putNoClobber(sema.gpa, inst, casted_arg);
const arg_val = sema.resolveConstMaybeUndefVal(arg_block, arg_src, casted_arg, "argument to function being called at comptime must be comptime-known") catch |err| {
- if (err == error.AnalysisFail and sema.err != null) {
- try sema.addComptimeReturnTypeNote(arg_block, func, func_src, ret_ty, sema.err.?, comptime_only_ret_ty);
- }
+ if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err);
return err;
};
switch (arg_val.tag()) {
@@ -6635,20 +6691,14 @@ fn analyzeInlineCallArg(
.ty = param_ty,
.val = arg_val,
};
- } else if (((try sema.resolveMaybeUndefVal(arg_block, arg_src, casted_arg)) == null) or
- try sema.typeRequiresComptime(param_ty) or zir_tags[inst] == .param_comptime)
- {
+ } else if (zir_tags[inst] == .param_comptime or try sema.typeRequiresComptime(param_ty)) {
try sema.inst_map.putNoClobber(sema.gpa, inst, casted_arg);
- } else {
+ } else if (try sema.resolveMaybeUndefVal(arg_block, arg_src, casted_arg)) |val| {
// We have a comptime value but we need a runtime value to preserve inlining semantics,
- const ptr_type = try Type.ptr(sema.arena, sema.mod, .{
- .pointee_type = param_ty,
- .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
- });
- const alloc = try arg_block.addTy(.alloc, ptr_type);
- _ = try arg_block.addBinOp(.store, alloc, casted_arg);
- const loaded = try arg_block.addTyOp(.load, param_ty, alloc);
- try sema.inst_map.putNoClobber(sema.gpa, inst, loaded);
+ const wrapped = try sema.addConstant(param_ty, try Value.Tag.runtime_value.create(sema.arena, val));
+ try sema.inst_map.putNoClobber(sema.gpa, inst, wrapped);
+ } else {
+ try sema.inst_map.putNoClobber(sema.gpa, inst, casted_arg);
}
arg_i.* += 1;
@@ -6662,9 +6712,7 @@ fn analyzeInlineCallArg(
if (is_comptime_call) {
try sema.inst_map.putNoClobber(sema.gpa, inst, uncasted_arg);
const arg_val = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to function being called at comptime must be comptime-known") catch |err| {
- if (err == error.AnalysisFail and sema.err != null) {
- try sema.addComptimeReturnTypeNote(arg_block, func, func_src, ret_ty, sema.err.?, comptime_only_ret_ty);
- }
+ if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err);
return err;
};
switch (arg_val.tag()) {
@@ -6685,20 +6733,14 @@ fn analyzeInlineCallArg(
.ty = sema.typeOf(uncasted_arg),
.val = arg_val,
};
- } else if ((try sema.resolveMaybeUndefVal(arg_block, arg_src, uncasted_arg)) == null or
- try sema.typeRequiresComptime(param_ty) or zir_tags[inst] == .param_anytype_comptime)
- {
+ } else if (zir_tags[inst] == .param_anytype_comptime or try sema.typeRequiresComptime(param_ty)) {
try sema.inst_map.putNoClobber(sema.gpa, inst, uncasted_arg);
- } else {
+ } else if (try sema.resolveMaybeUndefVal(arg_block, arg_src, uncasted_arg)) |val| {
// We have a comptime value but we need a runtime value to preserve inlining semantics,
- const ptr_type = try Type.ptr(sema.arena, sema.mod, .{
- .pointee_type = param_ty,
- .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
- });
- const alloc = try arg_block.addTy(.alloc, ptr_type);
- _ = try arg_block.addBinOp(.store, alloc, uncasted_arg);
- const loaded = try arg_block.addTyOp(.load, param_ty, alloc);
- try sema.inst_map.putNoClobber(sema.gpa, inst, loaded);
+ const wrapped = try sema.addConstant(param_ty, try Value.Tag.runtime_value.create(sema.arena, val));
+ try sema.inst_map.putNoClobber(sema.gpa, inst, wrapped);
+ } else {
+ try sema.inst_map.putNoClobber(sema.gpa, inst, uncasted_arg);
}
arg_i.* += 1;
@@ -6737,6 +6779,8 @@ fn analyzeGenericCallArg(
try sema.queueFullTypeResolution(param_ty);
runtime_args[runtime_i.*] = casted_arg;
runtime_i.* += 1;
+ } else if (try sema.typeHasOnePossibleValue(block, arg_src, comptime_arg.ty)) |_| {
+ _ = try sema.coerce(block, comptime_arg.ty, uncasted_arg, arg_src);
}
}
@@ -10210,6 +10254,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
.label = &label,
.inlining = block.inlining,
.is_comptime = block.is_comptime,
+ .comptime_reason = block.comptime_reason,
+ .is_typeof = block.is_typeof,
.switch_else_err_ty = else_error_ty,
.runtime_cond = block.runtime_cond,
.runtime_loop = block.runtime_loop,
@@ -10319,7 +10365,13 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
return sema.resolveBlockBody(block, src, &child_block, special.body, inst, merges);
}
- try sema.requireRuntimeBlock(block, src, operand_src);
+ if (child_block.is_comptime) {
+ _ = sema.resolveConstValue(&child_block, operand_src, operand, "condition in comptime switch must be comptime-known") catch |err| {
+ if (err == error.AnalysisFail and child_block.comptime_reason != null) try child_block.comptime_reason.?.explain(sema, sema.err);
+ return err;
+ };
+ unreachable;
+ }
const estimated_cases_extra = (scalar_cases_len + multi_cases_len) *
@typeInfo(Air.SwitchBr.Case).Struct.fields.len + 2;
@@ -14826,7 +14878,7 @@ fn zirBuiltinSrc(
// fn_name: [:0]const u8,
field_values[1] = func_name_val;
// line: u32
- field_values[2] = try Value.Tag.runtime_int.create(sema.arena, extra.line + 1);
+ field_values[2] = try Value.Tag.runtime_value.create(sema.arena, try Value.Tag.int_u64.create(sema.arena, extra.line + 1));
// column: u32,
field_values[3] = try Value.Tag.int_u64.create(sema.arena, extra.column + 1);
@@ -16401,7 +16453,7 @@ fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
if (!ok) return;
// This is only relevant at runtime.
- if (block.is_comptime) return;
+ if (block.is_comptime or block.is_typeof) return;
// This is only relevant within functions.
if (sema.func == null) return;
@@ -16421,7 +16473,7 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index)
const src = sema.src; // TODO
// This is only relevant at runtime.
- if (start_block.is_comptime) return;
+ if (start_block.is_comptime or start_block.is_typeof) return;
const backend_supports_error_return_tracing = sema.mod.comp.bin_file.options.use_llvm;
const ok = sema.owner_func.?.calls_or_awaits_errorable_fn and
@@ -18198,7 +18250,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
return sema.analyzeDeclVal(block, src, new_decl_index);
},
.Fn => {
- const struct_val = union_val.val.castTag(.aggregate).?.data;
+ const struct_val: []const Value = union_val.val.castTag(.aggregate).?.data;
// TODO use reflection instead of magic numbers here
// calling_convention: CallingConvention,
const cc = struct_val[0].toEnum(std.builtin.CallingConvention);
@@ -18232,12 +18284,17 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
break :alignment alignment;
}
};
+ const return_type = return_type_val.optionalValue() orelse
+ return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{});
+
var buf: Value.ToTypeBuffer = undefined;
const args_slice_val = args_val.castTag(.slice).?.data;
const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod.getTarget()));
- var param_types = try sema.arena.alloc(Type, args_len);
- var comptime_params = try sema.arena.alloc(bool, args_len);
+
+ const param_types = try sema.arena.alloc(Type, args_len);
+ const comptime_params = try sema.arena.alloc(bool, args_len);
+
var noalias_bits: u32 = 0;
var i: usize = 0;
while (i < args_len) : (i += 1) {
@@ -18265,11 +18322,9 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{});
param_types[i] = try param_type.toType(&buf).copy(sema.arena);
+ comptime_params[i] = false;
}
- const return_type = return_type_val.optionalValue() orelse
- return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{});
-
var fn_info = Type.Payload.Function.Data{
.param_types = param_types,
.comptime_params = comptime_params.ptr,
@@ -21452,6 +21507,9 @@ fn requireRuntimeBlock(sema: *Sema, block: *Block, src: LazySrcLoc, runtime_src:
if (runtime_src) |some| {
try sema.errNote(block, some, msg, "operation is runtime due to this operand", .{});
}
+ if (block.comptime_reason) |some| {
+ try some.explain(sema, msg);
+ }
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
@@ -21923,6 +21981,7 @@ fn addSafetyCheck(
panic_id: PanicId,
) !void {
const gpa = sema.gpa;
+ assert(!parent_block.is_comptime);
var fail_block: Block = .{
.parent = parent_block,
@@ -21932,7 +21991,7 @@ fn addSafetyCheck(
.wip_capture_scope = parent_block.wip_capture_scope,
.instructions = .{},
.inlining = parent_block.inlining,
- .is_comptime = parent_block.is_comptime,
+ .is_comptime = false,
};
defer fail_block.instructions.deinit(gpa);
@@ -22044,6 +22103,7 @@ fn panicUnwrapError(
unwrap_err_tag: Air.Inst.Tag,
is_non_err_tag: Air.Inst.Tag,
) !void {
+ assert(!parent_block.is_comptime);
const ok = try parent_block.addUnOp(is_non_err_tag, operand);
const gpa = sema.gpa;
@@ -22055,7 +22115,7 @@ fn panicUnwrapError(
.wip_capture_scope = parent_block.wip_capture_scope,
.instructions = .{},
.inlining = parent_block.inlining,
- .is_comptime = parent_block.is_comptime,
+ .is_comptime = false,
};
defer fail_block.instructions.deinit(gpa);
@@ -22087,6 +22147,7 @@ fn panicIndexOutOfBounds(
len: Air.Inst.Ref,
cmp_op: Air.Inst.Tag,
) !void {
+ assert(!parent_block.is_comptime);
const ok = try parent_block.addBinOp(cmp_op, index, len);
const gpa = sema.gpa;
@@ -22098,7 +22159,7 @@ fn panicIndexOutOfBounds(
.wip_capture_scope = parent_block.wip_capture_scope,
.instructions = .{},
.inlining = parent_block.inlining,
- .is_comptime = parent_block.is_comptime,
+ .is_comptime = false,
};
defer fail_block.instructions.deinit(gpa);
@@ -22129,6 +22190,7 @@ fn panicSentinelMismatch(
ptr: Air.Inst.Ref,
sentinel_index: Air.Inst.Ref,
) !void {
+ assert(!parent_block.is_comptime);
const expected_sentinel_val = maybe_sentinel orelse return;
const expected_sentinel = try sema.addConstant(sentinel_ty, expected_sentinel_val);
@@ -22169,7 +22231,7 @@ fn panicSentinelMismatch(
.wip_capture_scope = parent_block.wip_capture_scope,
.instructions = .{},
.inlining = parent_block.inlining,
- .is_comptime = parent_block.is_comptime,
+ .is_comptime = false,
};
defer fail_block.instructions.deinit(gpa);
@@ -24065,20 +24127,23 @@ fn coerceExtra(
},
else => {},
},
- .Slice => {
- // pointer to tuple to slice
- if (inst_ty.isSinglePointer() and inst_ty.childType().isTuple() and dest_info.size == .Slice and
- sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
- {
- return sema.coerceTupleToSlicePtrs(block, dest_ty, dest_ty_src, inst, inst_src);
+ .Slice => to_slice: {
+ if (inst_ty.zigTypeTag() == .Array) {
+ return sema.fail(
+ block,
+ inst_src,
+ "array literal requires address-of operator (&) to coerce to slice type '{}'",
+ .{dest_ty.fmt(sema.mod)},
+ );
}
+ if (!inst_ty.isSinglePointer()) break :to_slice;
+ const inst_child_ty = inst_ty.childType();
+ if (!inst_child_ty.isTuple()) break :to_slice;
+
// empty tuple to zero-length slice
// note that this allows coercing to a mutable slice.
- if (inst_ty.isSinglePointer() and
- inst_ty.childType().tag() == .empty_struct_literal and
- dest_info.size == .Slice)
- {
+ if (inst_child_ty.tupleFields().types.len == 0) {
const slice_val = try Value.Tag.slice.create(sema.arena, .{
.ptr = Value.undef,
.len = Value.zero,
@@ -24086,14 +24151,17 @@ fn coerceExtra(
return sema.addConstant(dest_ty, slice_val);
}
- if (inst_ty.zigTypeTag() == .Array) {
- return sema.fail(
- block,
- inst_src,
- "array literal requires address-of operator (&) to coerce to slice type '{}'",
- .{dest_ty.fmt(sema.mod)},
- );
+ // pointer to tuple to slice
+ if (dest_info.mutable) {
+ const err_msg = err_msg: {
+ const err_msg = try sema.errMsg(block, inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(sema.mod)});
+ errdefer err_msg.deinit(sema.gpa);
+ try sema.errNote(block, dest_ty_src, err_msg, "pointers to tuples can only coerce to constant pointers", .{});
+ break :err_msg err_msg;
+ };
+ return sema.failWithOwnedErrorMsg(err_msg);
}
+ return sema.coerceTupleToSlicePtrs(block, dest_ty, dest_ty_src, inst, inst_src);
},
.Many => p: {
if (!inst_ty.isSlice()) break :p;
@@ -26422,48 +26490,6 @@ fn bitCastVal(
const target = sema.mod.getTarget();
if (old_ty.eql(new_ty, sema.mod)) return val;
- // Some conversions have a bitwise definition that ignores in-memory layout,
- // such as converting between f80 and u80.
-
- if (old_ty.eql(Type.f80, sema.mod) and new_ty.isAbiInt()) {
- const float = val.toFloat(f80);
- switch (new_ty.intInfo(target).signedness) {
- .signed => {
- const int = @bitCast(i80, float);
- const limbs = try sema.arena.alloc(std.math.big.Limb, 2);
- const big_int = std.math.big.int.Mutable.init(limbs, int);
- return Value.fromBigInt(sema.arena, big_int.toConst());
- },
- .unsigned => {
- const int = @bitCast(u80, float);
- const limbs = try sema.arena.alloc(std.math.big.Limb, 2);
- const big_int = std.math.big.int.Mutable.init(limbs, int);
- return Value.fromBigInt(sema.arena, big_int.toConst());
- },
- }
- }
-
- if (new_ty.eql(Type.f80, sema.mod) and old_ty.isAbiInt()) {
- var bigint_space: Value.BigIntSpace = undefined;
- var bigint = try val.toBigIntAdvanced(&bigint_space, target, sema.kit(block, src));
- switch (old_ty.intInfo(target).signedness) {
- .signed => {
- // This conversion cannot fail because we already checked bit size before
- // calling bitCastVal.
- const int = bigint.to(i80) catch unreachable;
- const float = @bitCast(f80, int);
- return Value.Tag.float_80.create(sema.arena, float);
- },
- .unsigned => {
- // This conversion cannot fail because we already checked bit size before
- // calling bitCastVal.
- const int = bigint.to(u80) catch unreachable;
- const float = @bitCast(f80, int);
- return Value.Tag.float_80.create(sema.arena, float);
- },
- }
- }
-
// For types with well-defined memory layouts, we serialize them a byte buffer,
// then deserialize to the new type.
const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(target));
@@ -28338,8 +28364,16 @@ fn resolvePeerTypes(
const candidate_ty_tag = try candidate_ty.zigTypeTagOrPoison();
const chosen_ty_tag = try chosen_ty.zigTypeTagOrPoison();
- if (candidate_ty.eql(chosen_ty, sema.mod))
+ // If the candidate can coerce into our chosen type, we're done.
+ // If the chosen type can coerce into the candidate, use that.
+ if ((try sema.coerceInMemoryAllowed(block, chosen_ty, candidate_ty, false, target, src, src)) == .ok) {
continue;
+ }
+ if ((try sema.coerceInMemoryAllowed(block, candidate_ty, chosen_ty, false, target, src, src)) == .ok) {
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ continue;
+ }
switch (candidate_ty_tag) {
.NoReturn, .Undefined => continue,
@@ -28739,17 +28773,6 @@ fn resolvePeerTypes(
else => {},
}
- // If the candidate can coerce into our chosen type, we're done.
- // If the chosen type can coerce into the candidate, use that.
- if ((try sema.coerceInMemoryAllowed(block, chosen_ty, candidate_ty, false, target, src, src)) == .ok) {
- continue;
- }
- if ((try sema.coerceInMemoryAllowed(block, candidate_ty, chosen_ty, false, target, src, src)) == .ok) {
- chosen = candidate;
- chosen_i = candidate_i + 1;
- continue;
- }
-
// At this point, we hit a compile error. We need to recover
// the source locations.
const chosen_src = candidate_srcs.resolve(
@@ -29073,6 +29096,33 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
struct_obj.backing_int_ty = try backing_int_ty.copy(decl_arena_allocator);
try wip_captures.finalize();
} else {
+ if (fields_bit_sum > std.math.maxInt(u16)) {
+ var sema: Sema = .{
+ .mod = mod,
+ .gpa = gpa,
+ .arena = undefined,
+ .perm_arena = decl_arena_allocator,
+ .code = zir,
+ .owner_decl = decl,
+ .owner_decl_index = decl_index,
+ .func = null,
+ .fn_ret_ty = Type.void,
+ .owner_func = null,
+ };
+ defer sema.deinit();
+
+ var block: Block = .{
+ .parent = null,
+ .sema = &sema,
+ .src_decl = decl_index,
+ .namespace = &struct_obj.namespace,
+ .wip_capture_scope = undefined,
+ .instructions = .{},
+ .inlining = null,
+ .is_comptime = true,
+ };
+ return sema.fail(&block, LazySrcLoc.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum});
+ }
var buf: Type.Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
.data = @intCast(u16, fields_bit_sum),
diff --git a/src/TypedValue.zig b/src/TypedValue.zig
index ba32e55f1e..619fb003f9 100644
--- a/src/TypedValue.zig
+++ b/src/TypedValue.zig
@@ -477,6 +477,6 @@ pub fn print(
},
.generic_poison_type => return writer.writeAll("(generic poison type)"),
.generic_poison => return writer.writeAll("(generic poison)"),
- .runtime_int => return writer.writeAll("[runtime value]"),
+ .runtime_value => return writer.writeAll("[runtime value]"),
};
}
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index eb8ca8e8f1..3bb5bbe0d3 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -5401,7 +5401,11 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
}
}
-fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
+fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
+ var typed_value = arg_tv;
+ if (typed_value.val.castTag(.runtime_value)) |rt| {
+ typed_value.val = rt.data;
+ }
log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() });
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 1ebc348fc2..67cf899dc3 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -6047,7 +6047,11 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
}
}
-fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
+fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
+ var typed_value = arg_tv;
+ if (typed_value.val.castTag(.runtime_value)) |rt| {
+ typed_value.val = rt.data;
+ }
log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() });
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 538fcb13c1..69d5e38f65 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -2582,7 +2582,11 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo(
return @intCast(WantedT, result);
}
-fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
+fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
+ var val = arg_val;
+ if (val.castTag(.runtime_value)) |rt| {
+ val = rt.data;
+ }
if (val.isUndefDeep()) return func.emitUndefined(ty);
if (val.castTag(.decl_ref)) |decl_ref| {
const decl_index = decl_ref.data;
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 5f793aaeb9..965a34251c 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -6960,7 +6960,11 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
}
}
-fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
+fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
+ var typed_value = arg_tv;
+ if (typed_value.val.castTag(.runtime_value)) |rt| {
+ typed_value.val = rt.data;
+ }
log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() });
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
diff --git a/src/codegen.zig b/src/codegen.zig
index e7f927a2d6..6acea5a509 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -149,7 +149,7 @@ fn writeFloat(comptime F: type, f: F, target: Target, endian: std.builtin.Endian
pub fn generateSymbol(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- typed_value: TypedValue,
+ arg_tv: TypedValue,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
@@ -157,6 +157,11 @@ pub fn generateSymbol(
const tracy = trace(@src());
defer tracy.end();
+ var typed_value = arg_tv;
+ if (arg_tv.val.castTag(.runtime_value)) |rt| {
+ typed_value.val = rt.data;
+ }
+
const target = bin_file.options.target;
const endian = target.cpu.arch.endian();
@@ -465,7 +470,7 @@ pub fn generateSymbol(
const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow;
const start = code.items.len;
try code.resize(start + abi_size);
- bigint.writeTwosComplement(code.items[start..][0..abi_size], info.bits, abi_size, endian);
+ bigint.writeTwosComplement(code.items[start..][0..abi_size], endian);
return Result{ .appended = {} };
}
switch (info.signedness) {
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 098424524f..83debd1af3 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -468,13 +468,10 @@ pub const DeclGen = struct {
//
// Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr
fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type) error{ OutOfMemory, AnalysisFail }!void {
- switch (ptr_ty.ptrSize()) {
- .Slice => {},
- .Many, .C, .One => {
- try writer.writeByte('(');
- try dg.renderTypecast(writer, ptr_ty);
- try writer.writeByte(')');
- },
+ if (!ptr_ty.isSlice()) {
+ try writer.writeByte('(');
+ try dg.renderTypecast(writer, ptr_ty);
+ try writer.writeByte(')');
}
switch (ptr_val.tag()) {
.decl_ref_mut, .decl_ref, .variable => {
@@ -541,13 +538,13 @@ pub const DeclGen = struct {
.name = container_ty.unionFields().keys()[index],
.ty = container_ty.unionFields().values()[index].ty,
},
- .Pointer => switch (container_ty.ptrSize()) {
- .Slice => switch (index) {
+ .Pointer => field_info: {
+ assert(container_ty.isSlice());
+ break :field_info switch (index) {
0 => FieldInfo{ .name = "ptr", .ty = container_ty.childType() },
1 => FieldInfo{ .name = "len", .ty = Type.usize },
else => unreachable,
- },
- else => unreachable,
+ };
},
else => unreachable,
};
@@ -597,9 +594,13 @@ pub const DeclGen = struct {
dg: *DeclGen,
writer: anytype,
ty: Type,
- val: Value,
+ arg_val: Value,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
+ var val = arg_val;
+ if (val.castTag(.runtime_value)) |rt| {
+ val = rt.data;
+ }
const target = dg.module.getTarget();
if (val.isUndefDeep()) {
switch (ty.zigTypeTag()) {
@@ -622,25 +623,22 @@ pub const DeclGen = struct {
}
return writer.writeByte(')');
},
- .Pointer => switch (ty.ptrSize()) {
- .Slice => {
- if (location != .Initializer) {
- try writer.writeByte('(');
- try dg.renderTypecast(writer, ty);
- try writer.writeByte(')');
- }
-
- try writer.writeAll("{(");
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_ty = ty.slicePtrFieldType(&buf);
- try dg.renderTypecast(writer, ptr_ty);
- return writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(Type.usize, val)});
- },
- .Many, .C, .One => {
- try writer.writeAll("((");
+ .Pointer => if (ty.isSlice()) {
+ if (location != .Initializer) {
+ try writer.writeByte('(');
try dg.renderTypecast(writer, ty);
- return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val)});
- },
+ try writer.writeByte(')');
+ }
+
+ try writer.writeAll("{(");
+ var buf: Type.SlicePtrFieldTypeBuffer = undefined;
+ const ptr_ty = ty.slicePtrFieldType(&buf);
+ try dg.renderTypecast(writer, ptr_ty);
+ return writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(Type.usize, val)});
+ } else {
+ try writer.writeAll("((");
+ try dg.renderTypecast(writer, ty);
+ return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val)});
},
.Optional => {
var opt_buf: Type.Payload.ElemType = undefined;
@@ -817,7 +815,15 @@ pub const DeclGen = struct {
return writer.writeByte(')');
},
.Pointer => switch (val.tag()) {
- .null_value, .zero => {
+ .null_value, .zero => if (ty.isSlice()) {
+ var slice_pl = Value.Payload.Slice{
+ .base = .{ .tag = .slice },
+ .data = .{ .ptr = val, .len = Value.undef },
+ };
+ const slice_val = Value.initPayload(&slice_pl.base);
+
+ return dg.renderValue(writer, ty, slice_val, location);
+ } else {
try writer.writeAll("((");
try dg.renderTypecast(writer, ty);
try writer.writeAll(")NULL)");
@@ -827,15 +833,15 @@ pub const DeclGen = struct {
return dg.renderDeclValue(writer, ty, val, decl);
},
.slice => {
- const slice = val.castTag(.slice).?.data;
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
-
if (location != .Initializer) {
try writer.writeByte('(');
try dg.renderTypecast(writer, ty);
try writer.writeByte(')');
}
+ const slice = val.castTag(.slice).?.data;
+ var buf: Type.SlicePtrFieldTypeBuffer = undefined;
+
try writer.writeByte('{');
try dg.renderValue(writer, ty.slicePtrFieldType(&buf), slice.ptr, .Initializer);
try writer.writeAll(", ");
@@ -3923,6 +3929,7 @@ fn airIsNull(
const optional_ty = if (is_ptr) operand_ty.childType() else operand_ty;
var payload_buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&payload_buf);
+ var slice_ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime())
TypedValue{ .ty = Type.bool, .val = Value.@"true" }
@@ -3931,7 +3938,11 @@ fn airIsNull(
TypedValue{ .ty = operand_ty, .val = Value.@"null" }
else if (payload_ty.zigTypeTag() == .ErrorSet)
TypedValue{ .ty = payload_ty, .val = Value.zero }
- else rhs: {
+ else if (payload_ty.isSlice() and optional_ty.optionalReprIsPayload()) rhs: {
+ try writer.writeAll(".ptr");
+ const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf);
+ break :rhs TypedValue{ .ty = slice_ptr_ty, .val = Value.@"null" };
+ } else rhs: {
try writer.writeAll(".is_null");
break :rhs TypedValue{ .ty = Type.bool, .val = Value.@"true" };
};
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index ffc19cb6f6..d92a444ea4 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -1027,7 +1027,9 @@ pub const Object = struct {
dg.addArgAttr(llvm_func, llvm_arg_i, "noalias");
}
}
- dg.addArgAttr(llvm_func, llvm_arg_i, "nonnull");
+ if (param_ty.zigTypeTag() != .Optional) {
+ dg.addArgAttr(llvm_func, llvm_arg_i, "nonnull");
+ }
if (!ptr_info.mutable) {
dg.addArgAttr(llvm_func, llvm_arg_i, "readonly");
}
@@ -1916,7 +1918,7 @@ pub const Object = struct {
if (ty.castTag(.@"struct")) |payload| {
const struct_obj = payload.data;
- if (struct_obj.layout == .Packed) {
+ if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) {
const info = struct_obj.backing_int_ty.intInfo(target);
const dwarf_encoding: c_uint = switch (info.signedness) {
.signed => DW.ATE.signed,
@@ -3117,7 +3119,11 @@ pub const DeclGen = struct {
.slice => {
const param_ty = fn_info.param_types[it.zig_index - 1];
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_ty = param_ty.slicePtrFieldType(&buf);
+ var opt_buf: Type.Payload.ElemType = undefined;
+ const ptr_ty = if (param_ty.zigTypeTag() == .Optional)
+ param_ty.optionalChild(&opt_buf).slicePtrFieldType(&buf)
+ else
+ param_ty.slicePtrFieldType(&buf);
const ptr_llvm_ty = try dg.lowerType(ptr_ty);
const len_llvm_ty = try dg.lowerType(Type.usize);
@@ -3187,7 +3193,11 @@ pub const DeclGen = struct {
return llvm_elem_ty;
}
- fn lowerValue(dg: *DeclGen, tv: TypedValue) Error!*llvm.Value {
+ fn lowerValue(dg: *DeclGen, arg_tv: TypedValue) Error!*llvm.Value {
+ var tv = arg_tv;
+ if (tv.val.castTag(.runtime_value)) |rt| {
+ tv.val = rt.data;
+ }
if (tv.val.isUndef()) {
const llvm_type = try dg.lowerType(tv.ty);
return llvm_type.getUndef();
@@ -5434,10 +5444,11 @@ pub const FuncGen = struct {
const llvm_usize = try self.dg.lowerType(Type.usize);
const len = llvm_usize.constInt(array_ty.arrayLen(), .False);
const slice_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
+ const operand = try self.resolveInst(ty_op.operand);
if (!array_ty.hasRuntimeBitsIgnoreComptime()) {
- return self.builder.buildInsertValue(slice_llvm_ty.getUndef(), len, 1, "");
+ const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), operand, 0, "");
+ return self.builder.buildInsertValue(partial, len, 1, "");
}
- const operand = try self.resolveInst(ty_op.operand);
const indices: [2]*llvm.Value = .{
llvm_usize.constNull(), llvm_usize.constNull(),
};
@@ -6316,18 +6327,24 @@ pub const FuncGen = struct {
const operand_ty = self.air.typeOf(un_op);
const optional_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty;
const optional_llvm_ty = try self.dg.lowerType(optional_ty);
+ var buf: Type.Payload.ElemType = undefined;
+ const payload_ty = optional_ty.optionalChild(&buf);
if (optional_ty.optionalReprIsPayload()) {
const loaded = if (operand_is_ptr)
self.builder.buildLoad(optional_llvm_ty, operand, "")
else
operand;
+ if (payload_ty.isSlice()) {
+ const slice_ptr = self.builder.buildExtractValue(loaded, 0, "");
+ var slice_buf: Type.SlicePtrFieldTypeBuffer = undefined;
+ const ptr_ty = try self.dg.lowerType(payload_ty.slicePtrFieldType(&slice_buf));
+ return self.builder.buildICmp(pred, slice_ptr, ptr_ty.constNull(), "");
+ }
return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), "");
}
comptime assert(optional_layout_version == 3);
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = optional_ty.optionalChild(&buf);
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
const loaded = if (operand_is_ptr)
self.builder.buildLoad(optional_llvm_ty, operand, "")
@@ -10351,7 +10368,8 @@ const ParamTypeIterator = struct {
.Unspecified, .Inline => {
it.zig_index += 1;
it.llvm_index += 1;
- if (ty.isSlice()) {
+ var buf: Type.Payload.ElemType = undefined;
+ if (ty.isSlice() or (ty.zigTypeTag() == .Optional and ty.optionalChild(&buf).isSlice())) {
return .slice;
} else if (isByRef(ty)) {
return .byref;
@@ -10726,8 +10744,8 @@ fn backendSupportsF128(target: std.Target) bool {
fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool {
return switch (scalar_ty.tag()) {
.f16 => backendSupportsF16(target),
- .f80 => target.longDoubleIs(f80) and backendSupportsF80(target),
- .f128 => target.longDoubleIs(f128) and backendSupportsF128(target),
+ .f80 => (CType.longdouble.sizeInBits(target) == 80) and backendSupportsF80(target),
+ .f128 => (CType.longdouble.sizeInBits(target) == 128) and backendSupportsF128(target),
else => true,
};
}
diff --git a/src/libcxx.zig b/src/libcxx.zig
index b0261aaed6..850da698c5 100644
--- a/src/libcxx.zig
+++ b/src/libcxx.zig
@@ -320,7 +320,7 @@ pub fn buildLibCXXABI(comp: *Compilation) !void {
}
try cflags.append("-D_LIBCXXABI_HAS_NO_THREADS");
try cflags.append("-D_LIBCPP_HAS_NO_THREADS");
- } else {
+ } else if (target.abi.isGnu()) {
try cflags.append("-DHAVE___CXA_THREAD_ATEXIT_IMPL");
}
diff --git a/src/link.zig b/src/link.zig
index 9d4ac0d55b..39f51e90ec 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -696,6 +696,7 @@ pub const File = struct {
GlobalTypeMismatch,
InvalidCharacter,
InvalidEntryKind,
+ InvalidFeatureSet,
InvalidFormat,
InvalidIndex,
InvalidMagicByte,
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 4c3de84e01..b9f2d74bd8 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -651,6 +651,109 @@ fn resolveSymbolsInArchives(wasm: *Wasm) !void {
}
}
+fn validateFeatures(
+ wasm: *const Wasm,
+ to_emit: *[@typeInfo(types.Feature.Tag).Enum.fields.len]bool,
+ emit_features_count: *u32,
+) !void {
+ const cpu_features = wasm.base.options.target.cpu.features;
+ const infer = cpu_features.isEmpty(); // when the user did not define any features, we infer them from linked objects.
+ const known_features_count = @typeInfo(types.Feature.Tag).Enum.fields.len;
+
+ var allowed = [_]bool{false} ** known_features_count;
+ var used = [_]u17{0} ** known_features_count;
+ var disallowed = [_]u17{0} ** known_features_count;
+ var required = [_]u17{0} ** known_features_count;
+
+ // when false, we fail linking. We only verify this after a loop to catch all invalid features.
+ var valid_feature_set = true;
+
+ // When the user has given an explicit list of features to enable,
+ // we extract them and insert each into the 'allowed' list.
+ if (!infer) {
+ inline for (@typeInfo(std.Target.wasm.Feature).Enum.fields) |feature_field| {
+ if (cpu_features.isEnabled(feature_field.value)) {
+ allowed[feature_field.value] = true;
+ emit_features_count.* += 1;
+ }
+ }
+ }
+
+ // extract all the used, disallowed and required features from each
+ // linked object file so we can test them.
+ for (wasm.objects.items) |object, object_index| {
+ for (object.features) |feature| {
+ const value = @intCast(u16, object_index) << 1 | @as(u1, 1);
+ switch (feature.prefix) {
+ .used => {
+ used[@enumToInt(feature.tag)] = value;
+ },
+ .disallowed => {
+ disallowed[@enumToInt(feature.tag)] = value;
+ },
+ .required => {
+ required[@enumToInt(feature.tag)] = value;
+ used[@enumToInt(feature.tag)] = value;
+ },
+ }
+ }
+ }
+
+ // when we infer the features, we allow each feature found in the 'used' set
+ // and insert it into the 'allowed' set. When features are not inferred,
+ // we validate that a used feature is allowed.
+ for (used) |used_set, used_index| {
+ const is_enabled = @truncate(u1, used_set) != 0;
+ if (infer) {
+ allowed[used_index] = is_enabled;
+ emit_features_count.* += @boolToInt(is_enabled);
+ } else if (is_enabled and !allowed[used_index]) {
+ log.err("feature '{s}' not allowed, but used by linked object", .{(@intToEnum(types.Feature.Tag, used_index)).toString()});
+ log.err(" defined in '{s}'", .{wasm.objects.items[used_set >> 1].name});
+ valid_feature_set = false;
+ }
+ }
+
+ if (!valid_feature_set) {
+ return error.InvalidFeatureSet;
+ }
+
+ // For each linked object, validate the required and disallowed features
+ for (wasm.objects.items) |object| {
+ var object_used_features = [_]bool{false} ** known_features_count;
+ for (object.features) |feature| {
+ if (feature.prefix == .disallowed) continue; // already defined in 'disallowed' set.
+ // from here a feature is always used
+ const disallowed_feature = disallowed[@enumToInt(feature.tag)];
+ if (@truncate(u1, disallowed_feature) != 0) {
+ log.err("feature '{s}' is disallowed, but used by linked object", .{feature.tag.toString()});
+ log.err(" disallowed by '{s}'", .{wasm.objects.items[disallowed_feature >> 1].name});
+ log.err(" used in '{s}'", .{object.name});
+ valid_feature_set = false;
+ }
+
+ object_used_features[@enumToInt(feature.tag)] = true;
+ }
+
+ // validate the linked object file has each required feature
+ for (required) |required_feature, feature_index| {
+ const is_required = @truncate(u1, required_feature) != 0;
+ if (is_required and !object_used_features[feature_index]) {
+ log.err("feature '{s}' is required but not used in linked object", .{(@intToEnum(types.Feature.Tag, feature_index)).toString()});
+ log.err(" required by '{s}'", .{wasm.objects.items[required_feature >> 1].name});
+ log.err(" missing in '{s}'", .{object.name});
+ valid_feature_set = false;
+ }
+ }
+ }
+
+ if (!valid_feature_set) {
+ return error.InvalidFeatureSet;
+ }
+
+ to_emit.* = allowed;
+}
+
fn checkUndefinedSymbols(wasm: *const Wasm) !void {
if (wasm.base.options.output_mode == .Obj) return;
@@ -2158,6 +2261,9 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
try wasm.resolveSymbolsInObject(@intCast(u16, object_index));
}
+ var emit_features_count: u32 = 0;
+ var enabled_features: [@typeInfo(types.Feature.Tag).Enum.fields.len]bool = undefined;
+ try wasm.validateFeatures(&enabled_features, &emit_features_count);
try wasm.resolveSymbolsInArchives();
try wasm.checkUndefinedSymbols();
@@ -2603,6 +2709,9 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
}
try emitProducerSection(&binary_bytes);
+ if (emit_features_count > 0) {
+ try emitFeaturesSection(&binary_bytes, &enabled_features, emit_features_count);
+ }
}
// Only when writing all sections executed properly we write the magic
@@ -2695,6 +2804,32 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void {
);
}
+fn emitFeaturesSection(binary_bytes: *std.ArrayList(u8), enabled_features: []const bool, features_count: u32) !void {
+ const header_offset = try reserveCustomSectionHeader(binary_bytes);
+
+ const writer = binary_bytes.writer();
+ const target_features = "target_features";
+ try leb.writeULEB128(writer, @intCast(u32, target_features.len));
+ try writer.writeAll(target_features);
+
+ try leb.writeULEB128(writer, features_count);
+ for (enabled_features) |enabled, feature_index| {
+ if (enabled) {
+ const feature: types.Feature = .{ .prefix = .used, .tag = @intToEnum(types.Feature.Tag, feature_index) };
+ try leb.writeULEB128(writer, @enumToInt(feature.prefix));
+ const string = feature.tag.toString();
+ try leb.writeULEB128(writer, @intCast(u32, string.len));
+ try writer.writeAll(string);
+ }
+ }
+
+ try writeCustomSectionHeader(
+ binary_bytes.items,
+ header_offset,
+ @intCast(u32, binary_bytes.items.len - header_offset - 6),
+ );
+}
+
fn emitNameSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), arena: std.mem.Allocator) !void {
const Name = struct {
index: u32,
diff --git a/src/link/Wasm/types.zig b/src/link/Wasm/types.zig
index 2006fe1812..a46fad4e53 100644
--- a/src/link/Wasm/types.zig
+++ b/src/link/Wasm/types.zig
@@ -183,17 +183,44 @@ pub const Feature = struct {
/// Type of the feature, must be unique in the sequence of features.
tag: Tag,
+ /// Unlike `std.Target.wasm.Feature` this also contains linker-features such as shared-mem
pub const Tag = enum {
atomics,
bulk_memory,
exception_handling,
+ extended_const,
multivalue,
mutable_globals,
nontrapping_fptoint,
+ reference_types,
+ relaxed_simd,
sign_ext,
simd128,
tail_call,
shared_mem,
+
+ /// From a given cpu feature, returns its linker feature
+ pub fn fromCpuFeature(feature: std.Target.wasm.Feature) Tag {
+ return @intToEnum(Tag, @enumToInt(feature));
+ }
+
+ pub fn toString(tag: Tag) []const u8 {
+ return switch (tag) {
+ .atomics => "atomics",
+ .bulk_memory => "bulk-memory",
+ .exception_handling => "exception-handling",
+ .extended_const => "extended-const",
+ .multivalue => "multivalue",
+ .mutable_globals => "mutable-globals",
+ .nontrapping_fptoint => "nontrapping-fptoint",
+ .reference_types => "reference-types",
+ .relaxed_simd => "relaxed-simd",
+ .sign_ext => "sign-ext",
+ .simd128 => "simd128",
+ .tail_call => "tail-call",
+ .shared_mem => "shared-mem",
+ };
+ }
};
pub const Prefix = enum(u8) {
@@ -202,22 +229,10 @@ pub const Feature = struct {
required = '=',
};
- pub fn toString(feature: Feature) []const u8 {
- return switch (feature.tag) {
- .bulk_memory => "bulk-memory",
- .exception_handling => "exception-handling",
- .mutable_globals => "mutable-globals",
- .nontrapping_fptoint => "nontrapping-fptoint",
- .sign_ext => "sign-ext",
- .tail_call => "tail-call",
- else => @tagName(feature),
- };
- }
-
pub fn format(feature: Feature, comptime fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void {
_ = opt;
_ = fmt;
- try writer.print("{c} {s}", .{ feature.prefix, feature.toString() });
+ try writer.print("{c} {s}", .{ feature.prefix, feature.tag.toString() });
}
};
@@ -225,9 +240,12 @@ pub const known_features = std.ComptimeStringMap(Feature.Tag, .{
.{ "atomics", .atomics },
.{ "bulk-memory", .bulk_memory },
.{ "exception-handling", .exception_handling },
+ .{ "extended-const", .extended_const },
.{ "multivalue", .multivalue },
.{ "mutable-globals", .mutable_globals },
.{ "nontrapping-fptoint", .nontrapping_fptoint },
+ .{ "reference-types", .reference_types },
+ .{ "relaxed-simd", .relaxed_simd },
.{ "sign-ext", .sign_ext },
.{ "simd128", .simd128 },
.{ "tail-call", .tail_call },
diff --git a/src/target.zig b/src/target.zig
index 9e2d26dac6..079d115161 100644
--- a/src/target.zig
+++ b/src/target.zig
@@ -18,6 +18,7 @@ pub const available_libcs = [_]ArchOsAbi{
.{ .arch = .aarch64, .os = .windows, .abi = .gnu },
.{ .arch = .aarch64, .os = .macos, .abi = .none, .os_ver = .{ .major = 11, .minor = 0 } },
.{ .arch = .aarch64, .os = .macos, .abi = .none, .os_ver = .{ .major = 12, .minor = 0 } },
+ .{ .arch = .aarch64, .os = .macos, .abi = .none, .os_ver = .{ .major = 13, .minor = 0 } },
.{ .arch = .armeb, .os = .linux, .abi = .gnueabi },
.{ .arch = .armeb, .os = .linux, .abi = .gnueabihf },
.{ .arch = .armeb, .os = .linux, .abi = .musleabi },
@@ -70,9 +71,9 @@ pub const available_libcs = [_]ArchOsAbi{
.{ .arch = .x86_64, .os = .linux, .abi = .gnux32 },
.{ .arch = .x86_64, .os = .linux, .abi = .musl },
.{ .arch = .x86_64, .os = .windows, .abi = .gnu },
- .{ .arch = .x86_64, .os = .macos, .abi = .none, .os_ver = .{ .major = 10, .minor = 0 } },
.{ .arch = .x86_64, .os = .macos, .abi = .none, .os_ver = .{ .major = 11, .minor = 0 } },
.{ .arch = .x86_64, .os = .macos, .abi = .none, .os_ver = .{ .major = 12, .minor = 0 } },
+ .{ .arch = .x86_64, .os = .macos, .abi = .none, .os_ver = .{ .major = 13, .minor = 0 } },
};
pub fn libCGenericName(target: std.Target) [:0]const u8 {
diff --git a/src/translate_c.zig b/src/translate_c.zig
index d71e5f30e2..693f274e8e 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -224,8 +224,7 @@ const Scope = struct {
}
}
- fn findBlockReturnType(inner: *Scope, c: *Context) clang.QualType {
- _ = c;
+ fn findBlockReturnType(inner: *Scope) clang.QualType {
var scope = inner;
while (true) {
switch (scope.id) {
@@ -833,7 +832,7 @@ fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]co
if (has_init) trans_init: {
if (decl_init) |expr| {
const node_or_error = if (expr.getStmtClass() == .StringLiteralClass)
- transStringLiteralInitializer(c, scope, @ptrCast(*const clang.StringLiteral, expr), type_node)
+ transStringLiteralInitializer(c, @ptrCast(*const clang.StringLiteral, expr), type_node)
else
transExprCoercing(c, scope, expr, .used);
init_node = node_or_error catch |err| switch (err) {
@@ -1319,10 +1318,10 @@ fn transStmt(
.StringLiteralClass => return transStringLiteral(c, scope, @ptrCast(*const clang.StringLiteral, stmt), result_used),
.ParenExprClass => {
const expr = try transExpr(c, scope, @ptrCast(*const clang.ParenExpr, stmt).getSubExpr(), .used);
- return maybeSuppressResult(c, scope, result_used, expr);
+ return maybeSuppressResult(c, result_used, expr);
},
.InitListExprClass => return transInitListExpr(c, scope, @ptrCast(*const clang.InitListExpr, stmt), result_used),
- .ImplicitValueInitExprClass => return transImplicitValueInitExpr(c, scope, @ptrCast(*const clang.Expr, stmt), result_used),
+ .ImplicitValueInitExprClass => return transImplicitValueInitExpr(c, scope, @ptrCast(*const clang.Expr, stmt)),
.IfStmtClass => return transIfStmt(c, scope, @ptrCast(*const clang.IfStmt, stmt)),
.WhileStmtClass => return transWhileLoop(c, scope, @ptrCast(*const clang.WhileStmt, stmt)),
.DoStmtClass => return transDoWhileLoop(c, scope, @ptrCast(*const clang.DoStmt, stmt)),
@@ -1332,7 +1331,7 @@ fn transStmt(
.ContinueStmtClass => return Tag.@"continue".init(),
.BreakStmtClass => return Tag.@"break".init(),
.ForStmtClass => return transForLoop(c, scope, @ptrCast(*const clang.ForStmt, stmt)),
- .FloatingLiteralClass => return transFloatingLiteral(c, scope, @ptrCast(*const clang.FloatingLiteral, stmt), result_used),
+ .FloatingLiteralClass => return transFloatingLiteral(c, @ptrCast(*const clang.FloatingLiteral, stmt), result_used),
.ConditionalOperatorClass => {
return transConditionalOperator(c, scope, @ptrCast(*const clang.ConditionalOperator, stmt), result_used);
},
@@ -1356,9 +1355,9 @@ fn transStmt(
.OpaqueValueExprClass => {
const source_expr = @ptrCast(*const clang.OpaqueValueExpr, stmt).getSourceExpr().?;
const expr = try transExpr(c, scope, source_expr, .used);
- return maybeSuppressResult(c, scope, result_used, expr);
+ return maybeSuppressResult(c, result_used, expr);
},
- .OffsetOfExprClass => return transOffsetOfExpr(c, scope, @ptrCast(*const clang.OffsetOfExpr, stmt), result_used),
+ .OffsetOfExprClass => return transOffsetOfExpr(c, @ptrCast(*const clang.OffsetOfExpr, stmt), result_used),
.CompoundLiteralExprClass => {
const compound_literal = @ptrCast(*const clang.CompoundLiteralExpr, stmt);
return transExpr(c, scope, compound_literal.getInitializer(), result_used);
@@ -1369,13 +1368,13 @@ fn transStmt(
},
.ConvertVectorExprClass => {
const conv_vec = @ptrCast(*const clang.ConvertVectorExpr, stmt);
- const conv_vec_node = try transConvertVectorExpr(c, scope, stmt.getBeginLoc(), conv_vec);
- return maybeSuppressResult(c, scope, result_used, conv_vec_node);
+ const conv_vec_node = try transConvertVectorExpr(c, scope, conv_vec);
+ return maybeSuppressResult(c, result_used, conv_vec_node);
},
.ShuffleVectorExprClass => {
const shuffle_vec_expr = @ptrCast(*const clang.ShuffleVectorExpr, stmt);
const shuffle_vec_node = try transShuffleVectorExpr(c, scope, shuffle_vec_expr);
- return maybeSuppressResult(c, scope, result_used, shuffle_vec_node);
+ return maybeSuppressResult(c, result_used, shuffle_vec_node);
},
.ChooseExprClass => {
const choose_expr = @ptrCast(*const clang.ChooseExpr, stmt);
@@ -1402,10 +1401,8 @@ fn transStmt(
fn transConvertVectorExpr(
c: *Context,
scope: *Scope,
- source_loc: clang.SourceLocation,
expr: *const clang.ConvertVectorExpr,
) TransError!Node {
- _ = source_loc;
const base_stmt = @ptrCast(*const clang.Stmt, expr);
var block_scope = try Scope.Block.init(c, scope, true);
@@ -1521,12 +1518,7 @@ fn transShuffleVectorExpr(
/// Translate a "simple" offsetof expression containing exactly one component,
/// when that component is of kind .Field - e.g. offsetof(mytype, myfield)
-fn transSimpleOffsetOfExpr(
- c: *Context,
- scope: *Scope,
- expr: *const clang.OffsetOfExpr,
-) TransError!Node {
- _ = scope;
+fn transSimpleOffsetOfExpr(c: *Context, expr: *const clang.OffsetOfExpr) TransError!Node {
assert(expr.getNumComponents() == 1);
const component = expr.getComponent(0);
if (component.getKind() == .Field) {
@@ -1551,13 +1543,12 @@ fn transSimpleOffsetOfExpr(
fn transOffsetOfExpr(
c: *Context,
- scope: *Scope,
expr: *const clang.OffsetOfExpr,
result_used: ResultUsed,
) TransError!Node {
if (expr.getNumComponents() == 1) {
- const offsetof_expr = try transSimpleOffsetOfExpr(c, scope, expr);
- return maybeSuppressResult(c, scope, result_used, offsetof_expr);
+ const offsetof_expr = try transSimpleOffsetOfExpr(c, expr);
+ return maybeSuppressResult(c, result_used, offsetof_expr);
}
// TODO implement OffsetOfExpr with more than 1 component
@@ -1613,7 +1604,6 @@ fn transCreatePointerArithmeticSignedOp(
return transCreateNodeInfixOp(
c,
- scope,
if (is_add) .add else .sub,
lhs_node,
bitcast_node,
@@ -1629,7 +1619,7 @@ fn transBinaryOperator(
) TransError!Node {
const op = stmt.getOpcode();
const qt = stmt.getType();
- const isPointerDiffExpr = cIsPointerDiffExpr(c, stmt);
+ const isPointerDiffExpr = cIsPointerDiffExpr(stmt);
switch (op) {
.Assign => return try transCreateNodeAssign(c, scope, result_used, stmt.getLHS(), stmt.getRHS()),
.Comma => {
@@ -1646,7 +1636,7 @@ fn transBinaryOperator(
});
try block_scope.statements.append(break_node);
const block_node = try block_scope.complete(c);
- return maybeSuppressResult(c, scope, result_used, block_node);
+ return maybeSuppressResult(c, result_used, block_node);
},
.Div => {
if (cIsSignedInteger(qt)) {
@@ -1654,7 +1644,7 @@ fn transBinaryOperator(
const lhs = try transExpr(c, scope, stmt.getLHS(), .used);
const rhs = try transExpr(c, scope, stmt.getRHS(), .used);
const div_trunc = try Tag.div_trunc.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
- return maybeSuppressResult(c, scope, result_used, div_trunc);
+ return maybeSuppressResult(c, result_used, div_trunc);
}
},
.Rem => {
@@ -1663,7 +1653,7 @@ fn transBinaryOperator(
const lhs = try transExpr(c, scope, stmt.getLHS(), .used);
const rhs = try transExpr(c, scope, stmt.getRHS(), .used);
const rem = try Tag.signed_remainder.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
- return maybeSuppressResult(c, scope, result_used, rem);
+ return maybeSuppressResult(c, result_used, rem);
}
},
.Shl => {
@@ -1764,7 +1754,7 @@ fn transBinaryOperator(
else
rhs_uncasted;
- const infixOpNode = try transCreateNodeInfixOp(c, scope, op_id, lhs, rhs, result_used);
+ const infixOpNode = try transCreateNodeInfixOp(c, op_id, lhs, rhs, result_used);
if (isPointerDiffExpr) {
// @divExact(@bitCast(<platform-ptrdiff_t>, @ptrToInt(lhs) -% @ptrToInt(rhs)), @sizeOf(<lhs target type>))
const ptrdiff_type = try transQualTypeIntWidthOf(c, qt, true);
@@ -1843,7 +1833,7 @@ fn transCStyleCastExprClass(
src_type,
sub_expr_node,
));
- return maybeSuppressResult(c, scope, result_used, cast_node);
+ return maybeSuppressResult(c, result_used, cast_node);
}
/// The alignment of a variable or field
@@ -1933,7 +1923,7 @@ fn transDeclStmtOne(
var init_node = if (decl_init) |expr|
if (expr.getStmtClass() == .StringLiteralClass)
- try transStringLiteralInitializer(c, scope, @ptrCast(*const clang.StringLiteral, expr), type_node)
+ try transStringLiteralInitializer(c, @ptrCast(*const clang.StringLiteral, expr), type_node)
else
try transExprCoercing(c, scope, expr, .used)
else if (is_static_local)
@@ -2051,21 +2041,21 @@ fn transImplicitCastExpr(
.BitCast, .FloatingCast, .FloatingToIntegral, .IntegralToFloating, .IntegralCast, .PointerToIntegral, .IntegralToPointer => {
const sub_expr_node = try transExpr(c, scope, sub_expr, .used);
const casted = try transCCast(c, scope, expr.getBeginLoc(), dest_type, src_type, sub_expr_node);
- return maybeSuppressResult(c, scope, result_used, casted);
+ return maybeSuppressResult(c, result_used, casted);
},
.LValueToRValue, .NoOp, .FunctionToPointerDecay => {
const sub_expr_node = try transExpr(c, scope, sub_expr, .used);
- return maybeSuppressResult(c, scope, result_used, sub_expr_node);
+ return maybeSuppressResult(c, result_used, sub_expr_node);
},
.ArrayToPointerDecay => {
const sub_expr_node = try transExpr(c, scope, sub_expr, .used);
if (exprIsNarrowStringLiteral(sub_expr) or exprIsFlexibleArrayRef(c, sub_expr)) {
- return maybeSuppressResult(c, scope, result_used, sub_expr_node);
+ return maybeSuppressResult(c, result_used, sub_expr_node);
}
const addr = try Tag.address_of.create(c.arena, sub_expr_node);
const casted = try transCPtrCast(c, scope, expr.getBeginLoc(), dest_type, src_type, addr);
- return maybeSuppressResult(c, scope, result_used, casted);
+ return maybeSuppressResult(c, result_used, casted);
},
.NullToPointer => {
return Tag.null_literal.init();
@@ -2076,18 +2066,18 @@ fn transImplicitCastExpr(
const ptr_to_int = try Tag.ptr_to_int.create(c.arena, ptr_node);
const ne = try Tag.not_equal.create(c.arena, .{ .lhs = ptr_to_int, .rhs = Tag.zero_literal.init() });
- return maybeSuppressResult(c, scope, result_used, ne);
+ return maybeSuppressResult(c, result_used, ne);
},
.IntegralToBoolean, .FloatingToBoolean => {
const sub_expr_node = try transExpr(c, scope, sub_expr, .used);
// The expression is already a boolean one, return it as-is
if (isBoolRes(sub_expr_node))
- return maybeSuppressResult(c, scope, result_used, sub_expr_node);
+ return maybeSuppressResult(c, result_used, sub_expr_node);
// val != 0
const ne = try Tag.not_equal.create(c.arena, .{ .lhs = sub_expr_node, .rhs = Tag.zero_literal.init() });
- return maybeSuppressResult(c, scope, result_used, ne);
+ return maybeSuppressResult(c, result_used, ne);
},
.BuiltinFnToFnPtr => {
return transBuiltinFnExpr(c, scope, sub_expr, result_used);
@@ -2140,13 +2130,13 @@ fn transBoolExpr(
var res = try transExpr(c, scope, expr, used);
if (isBoolRes(res)) {
- return maybeSuppressResult(c, scope, used, res);
+ return maybeSuppressResult(c, used, res);
}
const ty = getExprQualType(c, expr).getTypePtr();
const node = try finishBoolExpr(c, scope, expr.getBeginLoc(), ty, res, used);
- return maybeSuppressResult(c, scope, used, node);
+ return maybeSuppressResult(c, used, node);
}
fn exprIsBooleanType(expr: *const clang.Expr) bool {
@@ -2299,7 +2289,7 @@ fn transIntegerLiteral(
if (suppress_as == .no_as) {
const int_lit_node = try transCreateNodeAPInt(c, eval_result.Val.getInt());
- return maybeSuppressResult(c, scope, result_used, int_lit_node);
+ return maybeSuppressResult(c, result_used, int_lit_node);
}
// Integer literals in C have types, and this can matter for several reasons.
@@ -2317,7 +2307,7 @@ fn transIntegerLiteral(
const ty_node = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc());
const rhs = try transCreateNodeAPInt(c, eval_result.Val.getInt());
const as = try Tag.as.create(c.arena, .{ .lhs = ty_node, .rhs = rhs });
- return maybeSuppressResult(c, scope, result_used, as);
+ return maybeSuppressResult(c, result_used, as);
}
fn transReturnStmt(
@@ -2329,7 +2319,7 @@ fn transReturnStmt(
return Tag.return_void.init();
var rhs = try transExprCoercing(c, scope, val_expr, .used);
- const return_qt = scope.findBlockReturnType(c);
+ const return_qt = scope.findBlockReturnType();
if (isBoolRes(rhs) and !qualTypeIsBoolean(return_qt)) {
rhs = try Tag.bool_to_int.create(c.arena, rhs);
}
@@ -2338,7 +2328,6 @@ fn transReturnStmt(
fn transNarrowStringLiteral(
c: *Context,
- scope: *Scope,
stmt: *const clang.StringLiteral,
result_used: ResultUsed,
) TransError!Node {
@@ -2347,7 +2336,7 @@ fn transNarrowStringLiteral(
const str = try std.fmt.allocPrint(c.arena, "\"{}\"", .{std.zig.fmtEscapes(bytes_ptr[0..len])});
const node = try Tag.string_literal.create(c.arena, str);
- return maybeSuppressResult(c, scope, result_used, node);
+ return maybeSuppressResult(c, result_used, node);
}
fn transStringLiteral(
@@ -2358,18 +2347,18 @@ fn transStringLiteral(
) TransError!Node {
const kind = stmt.getKind();
switch (kind) {
- .Ascii, .UTF8 => return transNarrowStringLiteral(c, scope, stmt, result_used),
+ .Ascii, .UTF8 => return transNarrowStringLiteral(c, stmt, result_used),
.UTF16, .UTF32, .Wide => {
const str_type = @tagName(stmt.getKind());
const name = try std.fmt.allocPrint(c.arena, "zig.{s}_string_{d}", .{ str_type, c.getMangle() });
const expr_base = @ptrCast(*const clang.Expr, stmt);
const array_type = try transQualTypeInitialized(c, scope, expr_base.getType(), expr_base, expr_base.getBeginLoc());
- const lit_array = try transStringLiteralInitializer(c, scope, stmt, array_type);
+ const lit_array = try transStringLiteralInitializer(c, stmt, array_type);
const decl = try Tag.var_simple.create(c.arena, .{ .name = name, .init = lit_array });
try scope.appendNode(decl);
const node = try Tag.identifier.create(c.arena, name);
- return maybeSuppressResult(c, scope, result_used, node);
+ return maybeSuppressResult(c, result_used, node);
},
}
}
@@ -2384,7 +2373,6 @@ fn getArrayPayload(array_type: Node) ast.Payload.Array.ArrayTypeInfo {
/// the appropriate length, if necessary.
fn transStringLiteralInitializer(
c: *Context,
- scope: *Scope,
stmt: *const clang.StringLiteral,
array_type: Node,
) TransError!Node {
@@ -2403,7 +2391,7 @@ fn transStringLiteralInitializer(
const init_node = if (num_inits > 0) blk: {
if (is_narrow) {
// "string literal".* or string literal"[0..num_inits].*
- var str = try transNarrowStringLiteral(c, scope, stmt, .used);
+ var str = try transNarrowStringLiteral(c, stmt, .used);
if (str_length != array_size) str = try Tag.string_slice.create(c.arena, .{ .string = str, .end = num_inits });
break :blk try Tag.deref.create(c.arena, str);
} else {
@@ -2440,8 +2428,7 @@ fn transStringLiteralInitializer(
/// determine whether `stmt` is a "pointer subtraction expression" - a subtraction where
/// both operands resolve to addresses. The C standard requires that both operands
/// point to elements of the same array object, but we do not verify that here.
-fn cIsPointerDiffExpr(c: *Context, stmt: *const clang.BinaryOperator) bool {
- _ = c;
+fn cIsPointerDiffExpr(stmt: *const clang.BinaryOperator) bool {
const lhs = @ptrCast(*const clang.Stmt, stmt.getLHS());
const rhs = @ptrCast(*const clang.Stmt, stmt.getRHS());
return stmt.getOpcode() == .Sub and
@@ -2748,9 +2735,7 @@ fn transInitListExprVector(
scope: *Scope,
loc: clang.SourceLocation,
expr: *const clang.InitListExpr,
- ty: *const clang.Type,
) TransError!Node {
- _ = ty;
const qt = getExprQualType(c, @ptrCast(*const clang.Expr, expr));
const vector_ty = @ptrCast(*const clang.VectorType, qualTypeCanon(qt));
@@ -2829,7 +2814,7 @@ fn transInitListExpr(
}
if (qual_type.isRecordType()) {
- return maybeSuppressResult(c, scope, used, try transInitListExprRecord(
+ return maybeSuppressResult(c, used, try transInitListExprRecord(
c,
scope,
source_loc,
@@ -2837,7 +2822,7 @@ fn transInitListExpr(
qual_type,
));
} else if (qual_type.isArrayType()) {
- return maybeSuppressResult(c, scope, used, try transInitListExprArray(
+ return maybeSuppressResult(c, used, try transInitListExprArray(
c,
scope,
source_loc,
@@ -2845,13 +2830,7 @@ fn transInitListExpr(
qual_type,
));
} else if (qual_type.isVectorType()) {
- return maybeSuppressResult(c, scope, used, try transInitListExprVector(
- c,
- scope,
- source_loc,
- expr,
- qual_type,
- ));
+ return maybeSuppressResult(c, used, try transInitListExprVector(c, scope, source_loc, expr));
} else {
const type_name = try c.str(qual_type.getTypeClassName());
return fail(c, error.UnsupportedType, source_loc, "unsupported initlist type: '{s}'", .{type_name});
@@ -2912,9 +2891,7 @@ fn transImplicitValueInitExpr(
c: *Context,
scope: *Scope,
expr: *const clang.Expr,
- used: ResultUsed,
) TransError!Node {
- _ = used;
const source_loc = expr.getBeginLoc();
const qt = getExprQualType(c, expr);
const ty = qt.getTypePtr();
@@ -3354,7 +3331,7 @@ fn transConstantExpr(c: *Context, scope: *Scope, expr: *const clang.Expr, used:
.lhs = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc()),
.rhs = try transCreateNodeAPInt(c, result.Val.getInt()),
});
- return maybeSuppressResult(c, scope, used, as_node);
+ return maybeSuppressResult(c, used, as_node);
},
else => |kind| {
return fail(c, error.UnsupportedTranslation, expr.getBeginLoc(), "unsupported constant expression kind '{}'", .{kind});
@@ -3391,7 +3368,7 @@ fn transCharLiteral(
try transCreateCharLitNode(c, narrow, val);
if (suppress_as == .no_as) {
- return maybeSuppressResult(c, scope, result_used, int_lit_node);
+ return maybeSuppressResult(c, result_used, int_lit_node);
}
// See comment in `transIntegerLiteral` for why this code is here.
// @as(T, x)
@@ -3400,7 +3377,7 @@ fn transCharLiteral(
.lhs = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc()),
.rhs = int_lit_node,
});
- return maybeSuppressResult(c, scope, result_used, as_node);
+ return maybeSuppressResult(c, result_used, as_node);
}
fn transStmtExpr(c: *Context, scope: *Scope, stmt: *const clang.StmtExpr, used: ResultUsed) TransError!Node {
@@ -3426,7 +3403,7 @@ fn transStmtExpr(c: *Context, scope: *Scope, stmt: *const clang.StmtExpr, used:
});
try block_scope.statements.append(break_node);
const res = try block_scope.complete(c);
- return maybeSuppressResult(c, scope, used, res);
+ return maybeSuppressResult(c, used, res);
}
fn transMemberExpr(c: *Context, scope: *Scope, stmt: *const clang.MemberExpr, result_used: ResultUsed) TransError!Node {
@@ -3455,7 +3432,7 @@ fn transMemberExpr(c: *Context, scope: *Scope, stmt: *const clang.MemberExpr, re
if (exprIsFlexibleArrayRef(c, @ptrCast(*const clang.Expr, stmt))) {
node = try Tag.call.create(c.arena, .{ .lhs = node, .args = &.{} });
}
- return maybeSuppressResult(c, scope, result_used, node);
+ return maybeSuppressResult(c, result_used, node);
}
/// ptr[subscr] (`subscr` is a signed integer expression, `ptr` a pointer) becomes:
@@ -3533,7 +3510,7 @@ fn transSignedArrayAccess(
const derefed = try Tag.deref.create(c.arena, block_node);
- return maybeSuppressResult(c, &block_scope.base, result_used, derefed);
+ return maybeSuppressResult(c, result_used, derefed);
}
fn transArrayAccess(c: *Context, scope: *Scope, stmt: *const clang.ArraySubscriptExpr, result_used: ResultUsed) TransError!Node {
@@ -3574,7 +3551,7 @@ fn transArrayAccess(c: *Context, scope: *Scope, stmt: *const clang.ArraySubscrip
.lhs = container_node,
.rhs = rhs,
});
- return maybeSuppressResult(c, scope, result_used, node);
+ return maybeSuppressResult(c, result_used, node);
}
/// Check if an expression is ultimately a reference to a function declaration
@@ -3665,7 +3642,7 @@ fn transCallExpr(c: *Context, scope: *Scope, stmt: *const clang.CallExpr, result
}
}
- return maybeSuppressResult(c, scope, result_used, node);
+ return maybeSuppressResult(c, result_used, node);
}
const ClangFunctionType = union(enum) {
@@ -3705,14 +3682,13 @@ fn transUnaryExprOrTypeTraitExpr(
stmt: *const clang.UnaryExprOrTypeTraitExpr,
result_used: ResultUsed,
) TransError!Node {
- _ = result_used;
const loc = stmt.getBeginLoc();
const type_node = try transQualType(c, scope, stmt.getTypeOfArgument(), loc);
const kind = stmt.getKind();
- switch (kind) {
- .SizeOf => return Tag.sizeof.create(c.arena, type_node),
- .AlignOf => return Tag.alignof.create(c.arena, type_node),
+ const node = switch (kind) {
+ .SizeOf => try Tag.sizeof.create(c.arena, type_node),
+ .AlignOf => try Tag.alignof.create(c.arena, type_node),
.PreferredAlignOf,
.VecStep,
.OpenMPRequiredSimdAlign,
@@ -3723,7 +3699,8 @@ fn transUnaryExprOrTypeTraitExpr(
"unsupported type trait kind {}",
.{kind},
),
- }
+ };
+ return maybeSuppressResult(c, result_used, node);
}
fn qualTypeHasWrappingOverflow(qt: clang.QualType) bool {
@@ -3812,7 +3789,7 @@ fn transCreatePreCrement(
// zig: expr += 1
const lhs = try transExpr(c, scope, op_expr, .used);
const rhs = Tag.one_literal.init();
- return transCreateNodeInfixOp(c, scope, op, lhs, rhs, .used);
+ return transCreateNodeInfixOp(c, op, lhs, rhs, .used);
}
// worst case
// c: ++expr
@@ -3832,7 +3809,7 @@ fn transCreatePreCrement(
const lhs_node = try Tag.identifier.create(c.arena, ref);
const ref_node = try Tag.deref.create(c.arena, lhs_node);
- const node = try transCreateNodeInfixOp(c, &block_scope.base, op, ref_node, Tag.one_literal.init(), .used);
+ const node = try transCreateNodeInfixOp(c, op, ref_node, Tag.one_literal.init(), .used);
try block_scope.statements.append(node);
const break_node = try Tag.break_val.create(c.arena, .{
@@ -3858,7 +3835,7 @@ fn transCreatePostCrement(
// zig: expr += 1
const lhs = try transExpr(c, scope, op_expr, .used);
const rhs = Tag.one_literal.init();
- return transCreateNodeInfixOp(c, scope, op, lhs, rhs, .used);
+ return transCreateNodeInfixOp(c, op, lhs, rhs, .used);
}
// worst case
// c: expr++
@@ -3884,7 +3861,7 @@ fn transCreatePostCrement(
const tmp_decl = try Tag.var_simple.create(c.arena, .{ .name = tmp, .init = ref_node });
try block_scope.statements.append(tmp_decl);
- const node = try transCreateNodeInfixOp(c, &block_scope.base, op, ref_node, Tag.one_literal.init(), .used);
+ const node = try transCreateNodeInfixOp(c, op, ref_node, Tag.one_literal.init(), .used);
try block_scope.statements.append(node);
const break_node = try Tag.break_val.create(c.arena, .{
@@ -3965,7 +3942,7 @@ fn transCreateCompoundAssign(
else
try Tag.div_trunc.create(c.arena, operands);
- return transCreateNodeInfixOp(c, scope, .assign, lhs_node, builtin, .used);
+ return transCreateNodeInfixOp(c, .assign, lhs_node, builtin, .used);
}
if (is_shift) {
@@ -3974,7 +3951,7 @@ fn transCreateCompoundAssign(
} else if (requires_int_cast) {
rhs_node = try transCCast(c, scope, loc, lhs_qt, rhs_qt, rhs_node);
}
- return transCreateNodeInfixOp(c, scope, op, lhs_node, rhs_node, .used);
+ return transCreateNodeInfixOp(c, op, lhs_node, rhs_node, .used);
}
// worst case
// c: lhs += rhs
@@ -4005,7 +3982,7 @@ fn transCreateCompoundAssign(
else
try Tag.div_trunc.create(c.arena, operands);
- const assign = try transCreateNodeInfixOp(c, &block_scope.base, .assign, ref_node, builtin, .used);
+ const assign = try transCreateNodeInfixOp(c, .assign, ref_node, builtin, .used);
try block_scope.statements.append(assign);
} else {
if (is_shift) {
@@ -4015,7 +3992,7 @@ fn transCreateCompoundAssign(
rhs_node = try transCCast(c, &block_scope.base, loc, lhs_qt, rhs_qt, rhs_node);
}
- const assign = try transCreateNodeInfixOp(c, &block_scope.base, op, ref_node, rhs_node, .used);
+ const assign = try transCreateNodeInfixOp(c, op, ref_node, rhs_node, .used);
try block_scope.statements.append(assign);
}
@@ -4071,7 +4048,7 @@ fn transCPtrCast(
}
}
-fn transFloatingLiteral(c: *Context, scope: *Scope, expr: *const clang.FloatingLiteral, used: ResultUsed) TransError!Node {
+fn transFloatingLiteral(c: *Context, expr: *const clang.FloatingLiteral, used: ResultUsed) TransError!Node {
switch (expr.getRawSemantics()) {
.IEEEhalf, // f16
.IEEEsingle, // f32
@@ -4095,7 +4072,7 @@ fn transFloatingLiteral(c: *Context, scope: *Scope, expr: *const clang.FloatingL
try std.fmt.allocPrint(c.arena, "{d}", .{dbl});
var node = try Tag.float_literal.create(c.arena, str);
if (is_negative) node = try Tag.negate.create(c.arena, node);
- return maybeSuppressResult(c, scope, used, node);
+ return maybeSuppressResult(c, used, node);
}
fn transBinaryConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang.BinaryConditionalOperator, used: ResultUsed) TransError!Node {
@@ -4151,7 +4128,7 @@ fn transBinaryConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang
});
try block_scope.statements.append(break_node);
const res = try block_scope.complete(c);
- return maybeSuppressResult(c, scope, used, res);
+ return maybeSuppressResult(c, used, res);
}
fn transConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang.ConditionalOperator, used: ResultUsed) TransError!Node {
@@ -4191,13 +4168,7 @@ fn transConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang.Condi
return if_node;
}
-fn maybeSuppressResult(
- c: *Context,
- scope: *Scope,
- used: ResultUsed,
- result: Node,
-) TransError!Node {
- _ = scope;
+fn maybeSuppressResult(c: *Context, used: ResultUsed, result: Node) TransError!Node {
if (used == .used) return result;
return Tag.discard.create(c.arena, .{ .should_skip = false, .value = result });
}
@@ -4551,7 +4522,7 @@ fn transCreateNodeAssign(
if (!exprIsBooleanType(lhs) and isBoolRes(rhs_node)) {
rhs_node = try Tag.bool_to_int.create(c.arena, rhs_node);
}
- return transCreateNodeInfixOp(c, scope, .assign, lhs_node, rhs_node, .used);
+ return transCreateNodeInfixOp(c, .assign, lhs_node, rhs_node, .used);
}
// worst case
@@ -4571,7 +4542,7 @@ fn transCreateNodeAssign(
const lhs_node = try transExpr(c, &block_scope.base, lhs, .used);
const tmp_ident = try Tag.identifier.create(c.arena, tmp);
- const assign = try transCreateNodeInfixOp(c, &block_scope.base, .assign, lhs_node, tmp_ident, .used);
+ const assign = try transCreateNodeInfixOp(c, .assign, lhs_node, tmp_ident, .used);
try block_scope.statements.append(assign);
const break_node = try Tag.break_val.create(c.arena, .{
@@ -4584,7 +4555,6 @@ fn transCreateNodeAssign(
fn transCreateNodeInfixOp(
c: *Context,
- scope: *Scope,
op: Tag,
lhs: Node,
rhs: Node,
@@ -4598,7 +4568,7 @@ fn transCreateNodeInfixOp(
.rhs = rhs,
},
};
- return maybeSuppressResult(c, scope, used, Node.initPayload(&payload.base));
+ return maybeSuppressResult(c, used, Node.initPayload(&payload.base));
}
fn transCreateNodeBoolInfixOp(
@@ -4613,7 +4583,7 @@ fn transCreateNodeBoolInfixOp(
const lhs = try transBoolExpr(c, scope, stmt.getLHS(), .used);
const rhs = try transBoolExpr(c, scope, stmt.getRHS(), .used);
- return transCreateNodeInfixOp(c, scope, op, lhs, rhs, used);
+ return transCreateNodeInfixOp(c, op, lhs, rhs, used);
}
fn transCreateNodeAPInt(c: *Context, int: *const clang.APSInt) !Node {
@@ -4730,7 +4700,7 @@ fn transCreateNodeShiftOp(
const rhs = try transExprCoercing(c, scope, rhs_expr, .used);
const rhs_casted = try Tag.int_cast.create(c.arena, .{ .lhs = rhs_type, .rhs = rhs });
- return transCreateNodeInfixOp(c, scope, op, lhs, rhs_casted, used);
+ return transCreateNodeInfixOp(c, op, lhs, rhs_casted, used);
}
fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clang.SourceLocation) TypeError!Node {
@@ -5681,13 +5651,14 @@ const ParseError = Error || error{ParseError};
fn parseCExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
// TODO parseCAssignExpr here
- const node = try parseCCondExpr(c, m, scope);
+ var block_scope = try Scope.Block.init(c, scope, true);
+ defer block_scope.deinit();
+
+ const node = try parseCCondExpr(c, m, &block_scope.base);
if (m.next().? != .Comma) {
m.i -= 1;
return node;
}
- var block_scope = try Scope.Block.init(c, scope, true);
- defer block_scope.deinit();
var last = node;
while (true) {
@@ -6261,7 +6232,7 @@ fn parseCMulExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
.Slash => {
const lhs = try macroBoolToInt(c, node);
const rhs = try macroBoolToInt(c, try parseCCastExpr(c, m, scope));
- node = try Tag.div.create(c.arena, .{ .lhs = lhs, .rhs = rhs });
+ node = try Tag.macro_arithmetic.create(c.arena, .{ .op = .div, .lhs = lhs, .rhs = rhs });
},
.Percent => {
const lhs = try macroBoolToInt(c, node);
@@ -6298,7 +6269,7 @@ fn parseCCastExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
// allow_fail is set when unsure if we are parsing a type-name
fn parseCTypeName(c: *Context, m: *MacroCtx, scope: *Scope, allow_fail: bool) ParseError!?Node {
if (try parseCSpecifierQualifierList(c, m, scope, allow_fail)) |node| {
- return try parseCAbstractDeclarator(c, m, scope, node);
+ return try parseCAbstractDeclarator(c, m, node);
} else {
return null;
}
@@ -6327,7 +6298,7 @@ fn parseCSpecifierQualifierList(c: *Context, m: *MacroCtx, scope: *Scope, allow_
.Keyword_complex,
=> {
m.i -= 1;
- return try parseCNumericType(c, m, scope);
+ return try parseCNumericType(c, m);
},
.Keyword_enum, .Keyword_struct, .Keyword_union => {
// struct Foo will be declared as struct_Foo by transRecordDecl
@@ -6349,8 +6320,7 @@ fn parseCSpecifierQualifierList(c: *Context, m: *MacroCtx, scope: *Scope, allow_
}
}
-fn parseCNumericType(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
- _ = scope;
+fn parseCNumericType(c: *Context, m: *MacroCtx) ParseError!Node {
const KwCounter = struct {
double: u8 = 0,
long: u8 = 0,
@@ -6451,8 +6421,7 @@ fn parseCNumericType(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
return error.ParseError;
}
-fn parseCAbstractDeclarator(c: *Context, m: *MacroCtx, scope: *Scope, node: Node) ParseError!Node {
- _ = scope;
+fn parseCAbstractDeclarator(c: *Context, m: *MacroCtx, node: Node) ParseError!Node {
switch (m.next().?) {
.Asterisk => {
// last token of `node`
diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig
index 4dcdbc4250..4a64c13ce7 100644
--- a/src/translate_c/ast.zig
+++ b/src/translate_c/ast.zig
@@ -159,6 +159,9 @@ pub const Node = extern union {
/// @shuffle(type, a, b, mask)
shuffle,
+ /// @import("std").zig.c_translation.MacroArithmetic.<op>(lhs, rhs)
+ macro_arithmetic,
+
asm_simple,
negate,
@@ -370,6 +373,7 @@ pub const Node = extern union {
.field_access => Payload.FieldAccess,
.string_slice => Payload.StringSlice,
.shuffle => Payload.Shuffle,
+ .macro_arithmetic => Payload.MacroArithmetic,
};
}
@@ -713,6 +717,19 @@ pub const Payload = struct {
mask_vector: Node,
},
};
+
+ pub const MacroArithmetic = struct {
+ base: Payload,
+ data: struct {
+ op: Operator,
+ lhs: Node,
+ rhs: Node,
+ },
+
+ pub const Operator = enum {
+ div,
+ };
+ };
};
/// Converts the nodes into a Zig Ast.
@@ -1408,6 +1425,12 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
payload.mask_vector,
});
},
+ .macro_arithmetic => {
+ const payload = node.castTag(.macro_arithmetic).?.data;
+ const op = @tagName(payload.op);
+ const import_node = try renderStdImport(c, &.{ "zig", "c_translation", "MacroArithmetic", op });
+ return renderCall(c, import_node, &.{ payload.lhs, payload.rhs });
+ },
.alignof => {
const payload = node.castTag(.alignof).?.data;
return renderBuiltinCall(c, "@alignOf", &.{payload});
@@ -2349,6 +2372,7 @@ fn renderNodeGrouped(c: *Context, node: Node) !NodeIndex {
.shuffle,
.static_local_var,
.mut_str,
+ .macro_arithmetic,
=> {
// no grouping needed
return renderNode(c, node);
diff --git a/src/type.zig b/src/type.zig
index 5c74f39290..3a072a2f5b 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -2892,41 +2892,24 @@ pub const Type = extern union {
.anyframe_T,
=> return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) },
- .c_short => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.short.sizeInBits(target), 8) },
- .c_ushort => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.ushort.sizeInBits(target), 8) },
- .c_int => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.int.sizeInBits(target), 8) },
- .c_uint => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.uint.sizeInBits(target), 8) },
- .c_long => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.long.sizeInBits(target), 8) },
- .c_ulong => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.ulong.sizeInBits(target), 8) },
- .c_longlong => switch (target.cpu.arch) {
- .i386 => switch (target.os.tag) {
- .windows, .uefi => return AbiAlignmentAdvanced{ .scalar = 8 },
- else => return AbiAlignmentAdvanced{ .scalar = 4 },
- },
- else => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.longlong.sizeInBits(target), 8) },
- },
- .c_ulonglong => switch (target.cpu.arch) {
- .i386 => switch (target.os.tag) {
- .windows, .uefi => return AbiAlignmentAdvanced{ .scalar = 8 },
- else => return AbiAlignmentAdvanced{ .scalar = 4 },
- },
- else => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.ulonglong.sizeInBits(target), 8) },
- },
+ .c_short => return AbiAlignmentAdvanced{ .scalar = CType.short.alignment(target) },
+ .c_ushort => return AbiAlignmentAdvanced{ .scalar = CType.ushort.alignment(target) },
+ .c_int => return AbiAlignmentAdvanced{ .scalar = CType.int.alignment(target) },
+ .c_uint => return AbiAlignmentAdvanced{ .scalar = CType.uint.alignment(target) },
+ .c_long => return AbiAlignmentAdvanced{ .scalar = CType.long.alignment(target) },
+ .c_ulong => return AbiAlignmentAdvanced{ .scalar = CType.ulong.alignment(target) },
+ .c_longlong => return AbiAlignmentAdvanced{ .scalar = CType.longlong.alignment(target) },
+ .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = CType.ulonglong.alignment(target) },
+ .c_longdouble => return AbiAlignmentAdvanced{ .scalar = CType.longdouble.alignment(target) },
.f16 => return AbiAlignmentAdvanced{ .scalar = 2 },
- .f32 => return AbiAlignmentAdvanced{ .scalar = 4 },
- .f64 => switch (target.cpu.arch) {
- .i386 => switch (target.os.tag) {
- .windows, .uefi => return AbiAlignmentAdvanced{ .scalar = 8 },
- else => return AbiAlignmentAdvanced{ .scalar = 4 },
- },
+ .f32 => return AbiAlignmentAdvanced{ .scalar = CType.float.alignment(target) },
+ .f64 => switch (CType.double.sizeInBits(target)) {
+ 64 => return AbiAlignmentAdvanced{ .scalar = CType.double.alignment(target) },
else => return AbiAlignmentAdvanced{ .scalar = 8 },
},
- .f128 => return AbiAlignmentAdvanced{ .scalar = 16 },
-
- .f80 => switch (target.cpu.arch) {
- .i386 => return AbiAlignmentAdvanced{ .scalar = 4 },
- .x86_64 => return AbiAlignmentAdvanced{ .scalar = 16 },
+ .f80 => switch (CType.longdouble.sizeInBits(target)) {
+ 80 => return AbiAlignmentAdvanced{ .scalar = CType.longdouble.alignment(target) },
else => {
var payload: Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
@@ -2936,17 +2919,7 @@ pub const Type = extern union {
return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, target) };
},
},
- .c_longdouble => switch (CType.longdouble.sizeInBits(target)) {
- 16 => return AbiAlignmentAdvanced{ .scalar = abiAlignment(Type.f16, target) },
- 32 => return AbiAlignmentAdvanced{ .scalar = abiAlignment(Type.f32, target) },
- 64 => return AbiAlignmentAdvanced{ .scalar = abiAlignment(Type.f64, target) },
- 80 => if (target.cpu.arch == .i386 and target.isMinGW())
- return AbiAlignmentAdvanced{ .scalar = 4 }
- else
- return AbiAlignmentAdvanced{ .scalar = abiAlignment(Type.f80, target) },
- 128 => return AbiAlignmentAdvanced{ .scalar = abiAlignment(Type.f128, target) },
- else => unreachable,
- },
+ .f128 => return AbiAlignmentAdvanced{ .scalar = 16 },
// TODO revisit this when we have the concept of the error tag type
.anyerror_void_error_union,
@@ -3411,16 +3384,8 @@ pub const Type = extern union {
.f32 => return AbiSizeAdvanced{ .scalar = 4 },
.f64 => return AbiSizeAdvanced{ .scalar = 8 },
.f128 => return AbiSizeAdvanced{ .scalar = 16 },
-
- .f80 => switch (target.cpu.arch) {
- .i386 => switch (target.os.tag) {
- .windows => switch (target.abi) {
- .msvc => return AbiSizeAdvanced{ .scalar = 16 },
- else => return AbiSizeAdvanced{ .scalar = 12 },
- },
- else => return AbiSizeAdvanced{ .scalar = 12 },
- },
- .x86_64 => return AbiSizeAdvanced{ .scalar = 16 },
+ .f80 => switch (CType.longdouble.sizeInBits(target)) {
+ 80 => return AbiSizeAdvanced{ .scalar = std.mem.alignForward(10, CType.longdouble.alignment(target)) },
else => {
var payload: Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
@@ -3469,20 +3434,8 @@ pub const Type = extern union {
if (!child_type.hasRuntimeBits()) return AbiSizeAdvanced{ .scalar = 1 };
- switch (child_type.zigTypeTag()) {
- .Pointer => {
- const ptr_info = child_type.ptrInfo().data;
- const has_null = switch (ptr_info.size) {
- .Slice, .C => true,
- else => ptr_info.@"allowzero",
- };
- if (!has_null) {
- const ptr_size_bytes = @divExact(target.cpu.arch.ptrBitWidth(), 8);
- return AbiSizeAdvanced{ .scalar = ptr_size_bytes };
- }
- },
- .ErrorSet => return abiSizeAdvanced(Type.anyerror, target, strat),
- else => {},
+ if (ty.optionalReprIsPayload()) {
+ return abiSizeAdvanced(child_type, target, strat);
}
const payload_size = switch (try child_type.abiSizeAdvanced(target, strat)) {
@@ -3747,28 +3700,10 @@ pub const Type = extern union {
.int_signed, .int_unsigned => return ty.cast(Payload.Bits).?.data,
- .optional => {
- var buf: Payload.ElemType = undefined;
- const child_type = ty.optionalChild(&buf);
- if (!child_type.hasRuntimeBits()) return 8;
-
- if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr() and !child_type.isSlice())
- return target.cpu.arch.ptrBitWidth();
-
- // Optional types are represented as a struct with the child type as the first
- // field and a boolean as the second. Since the child type's abi alignment is
- // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal
- // to the child type's ABI alignment.
- const child_bit_size = try bitSizeAdvanced(child_type, target, sema_kit);
- return child_bit_size + 1;
- },
-
- .error_union => {
- const payload = ty.castTag(.error_union).?.data;
- if (!payload.payload.hasRuntimeBits()) {
- return payload.error_set.bitSizeAdvanced(target, sema_kit);
- }
- @panic("TODO bitSize error union");
+ .optional, .error_union => {
+ // Optionals and error unions are not packed so their bitsize
+ // includes padding bits.
+ return (try abiSizeAdvanced(ty, target, if (sema_kit) |sk| .{ .sema_kit = sk } else .eager)).scalar * 8;
},
.atomic_order,
@@ -4045,8 +3980,8 @@ pub const Type = extern union {
.Pointer => {
const info = child_ty.ptrInfo().data;
switch (info.size) {
- .Slice, .C => return false,
- .Many, .One => return !info.@"allowzero",
+ .C => return false,
+ .Slice, .Many, .One => return !info.@"allowzero",
}
},
.ErrorSet => return true,
@@ -6668,45 +6603,80 @@ pub const CType = enum {
ulonglong,
longdouble,
+ // We don't have a `c_float`/`c_double` type in Zig, but these
+ // are useful for querying target-correct alignment and checking
+ // whether C's double is f64 or f32
+ float,
+ double,
+
pub fn sizeInBits(self: CType, target: Target) u16 {
switch (target.os.tag) {
.freestanding, .other => switch (target.cpu.arch) {
.msp430 => switch (self) {
.short, .ushort, .int, .uint => return 16,
- .long, .ulong => return 32,
- .longlong, .ulonglong, .longdouble => return 64,
+ .float, .long, .ulong => return 32,
+ .longlong, .ulonglong, .double, .longdouble => return 64,
},
.avr => switch (self) {
.short, .ushort, .int, .uint => return 16,
- .long, .ulong, .longdouble => return 32,
+ .long, .ulong, .float, .double, .longdouble => return 32,
.longlong, .ulonglong => return 64,
},
+ .tce, .tcele => switch (self) {
+ .short, .ushort => return 16,
+ .int, .uint, .long, .ulong, .longlong, .ulonglong => return 32,
+ .float, .double, .longdouble => return 32,
+ },
+ .mips64, .mips64el => switch (self) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => return if (target.abi != .gnuabin32) 64 else 32,
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => return 128,
+ },
+ .x86_64 => switch (self) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => switch (target.abi) {
+ .gnux32, .muslx32 => return 32,
+ else => return 64,
+ },
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => return 80,
+ },
else => switch (self) {
.short, .ushort => return 16,
- .int, .uint => return 32,
+ .int, .uint, .float => return 32,
.long, .ulong => return target.cpu.arch.ptrBitWidth(),
- .longlong, .ulonglong => return 64,
+ .longlong, .ulonglong, .double => return 64,
.longdouble => switch (target.cpu.arch) {
.i386 => switch (target.abi) {
.android => return 64,
else => return 80,
},
- .x86_64 => return 80,
+ .powerpc,
+ .powerpcle,
+ .powerpc64,
+ .powerpc64le,
+ => switch (target.abi) {
+ .musl,
+ .musleabi,
+ .musleabihf,
+ .muslx32,
+ => return 64,
+ else => return 128,
+ },
+
+ .riscv32,
.riscv64,
.aarch64,
.aarch64_be,
.aarch64_32,
.s390x,
- .mips64,
- .mips64el,
.sparc,
.sparc64,
.sparcel,
- .powerpc,
- .powerpcle,
- .powerpc64,
- .powerpc64le,
.wasm32,
.wasm64,
=> return 128,
@@ -6730,23 +6700,78 @@ pub const CType = enum {
.fuchsia,
.minix,
=> switch (target.cpu.arch) {
+ .msp430 => switch (self) {
+ .short, .ushort, .int, .uint => return 16,
+ .long, .ulong, .float => return 32,
+ .longlong, .ulonglong, .double, .longdouble => return 64,
+ },
.avr => switch (self) {
.short, .ushort, .int, .uint => return 16,
- .long, .ulong, .longdouble => return 32,
+ .long, .ulong, .float, .double, .longdouble => return 32,
.longlong, .ulonglong => return 64,
},
+ .tce, .tcele => switch (self) {
+ .short, .ushort => return 16,
+ .int, .uint, .long, .ulong, .longlong, .ulonglong => return 32,
+ .float, .double, .longdouble => return 32,
+ },
+ .mips64, .mips64el => switch (self) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => return if (target.abi != .gnuabin32) 64 else 32,
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => if (target.os.tag == .freebsd) return 64 else return 128,
+ },
+ .x86_64 => switch (self) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => switch (target.abi) {
+ .gnux32, .muslx32 => return 32,
+ else => return 64,
+ },
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => return 80,
+ },
else => switch (self) {
.short, .ushort => return 16,
- .int, .uint => return 32,
+ .int, .uint, .float => return 32,
.long, .ulong => return target.cpu.arch.ptrBitWidth(),
- .longlong, .ulonglong => return 64,
+ .longlong, .ulonglong, .double => return 64,
.longdouble => switch (target.cpu.arch) {
.i386 => switch (target.abi) {
.android => return 64,
else => return 80,
},
- .x86_64 => return 80,
+ .powerpc,
+ .powerpcle,
+ => switch (target.abi) {
+ .musl,
+ .musleabi,
+ .musleabihf,
+ .muslx32,
+ => return 64,
+ else => switch (target.os.tag) {
+ .freebsd, .netbsd, .openbsd => return 64,
+ else => return 128,
+ },
+ },
+
+ .powerpc64,
+ .powerpc64le,
+ => switch (target.abi) {
+ .musl,
+ .musleabi,
+ .musleabihf,
+ .muslx32,
+ => return 64,
+ else => switch (target.os.tag) {
+ .freebsd, .openbsd => return 64,
+ else => return 128,
+ },
+ },
+
+ .riscv32,
.riscv64,
.aarch64,
.aarch64_be,
@@ -6757,10 +6782,6 @@ pub const CType = enum {
.sparc,
.sparc64,
.sparcel,
- .powerpc,
- .powerpcle,
- .powerpc64,
- .powerpc64le,
.wasm32,
.wasm64,
=> return 128,
@@ -6770,37 +6791,77 @@ pub const CType = enum {
},
},
- .windows, .uefi => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .long, .ulong => return 32,
- .longlong, .ulonglong => return 64,
- .longdouble => switch (target.cpu.arch) {
- .i386 => switch (target.abi) {
- .gnu => return 80,
+ .windows, .uefi => switch (target.cpu.arch) {
+ .i386 => switch (self) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => return 32,
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => switch (target.abi) {
+ .gnu, .gnuilp32, .cygnus => return 80,
else => return 64,
},
- .x86_64 => switch (target.abi) {
- .gnu => return 80,
+ },
+ .x86_64 => switch (self) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => switch (target.abi) {
+ .cygnus => return 64,
+ else => return 32,
+ },
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => switch (target.abi) {
+ .gnu, .gnuilp32, .cygnus => return 80,
else => return 64,
},
- else => return 64,
+ },
+ else => switch (self) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => return 32,
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => return 64,
},
},
.macos, .ios, .tvos, .watchos => switch (self) {
.short, .ushort => return 16,
- .int, .uint => return 32,
- .long, .ulong, .longlong, .ulonglong => return 64,
+ .int, .uint, .float => return 32,
+ .long, .ulong => switch (target.cpu.arch) {
+ .i386, .arm, .aarch64_32 => return 32,
+ .x86_64 => switch (target.abi) {
+ .gnux32, .muslx32 => return 32,
+ else => return 64,
+ },
+ else => return 64,
+ },
+ .longlong, .ulonglong, .double => return 64,
.longdouble => switch (target.cpu.arch) {
- .i386, .x86_64 => return 80,
+ .i386 => switch (target.abi) {
+ .android => return 64,
+ else => return 80,
+ },
+ .x86_64 => return 80,
else => return 64,
},
},
+ .nvcl, .cuda => switch (self) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => switch (target.cpu.arch) {
+ .nvptx => return 32,
+ .nvptx64 => return 64,
+ else => return 64,
+ },
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => return 64,
+ },
+
.amdhsa, .amdpal => switch (self) {
.short, .ushort => return 16,
- .int, .uint => return 32,
- .long, .ulong, .longlong, .ulonglong => return 64,
+ .int, .uint, .float => return 32,
+ .long, .ulong, .longlong, .ulonglong, .double => return 64,
.longdouble => return 128,
},
@@ -6811,8 +6872,6 @@ pub const CType = enum {
.rtems,
.nacl,
.aix,
- .cuda,
- .nvcl,
.ps4,
.ps5,
.elfiamcu,
@@ -6828,4 +6887,240 @@ pub const CType = enum {
=> @panic("TODO specify the C integer and float type sizes for this OS"),
}
}
+
+ pub fn alignment(self: CType, target: Target) u16 {
+
+ // Overrides for unusual alignments
+ switch (target.cpu.arch) {
+ .avr => switch (self) {
+ .short, .ushort => return 2,
+ else => return 1,
+ },
+ .i386 => switch (target.os.tag) {
+ .windows, .uefi => switch (self) {
+ .longlong, .ulonglong, .double => return 8,
+ .longdouble => switch (target.abi) {
+ .gnu, .gnuilp32, .cygnus => return 4,
+ else => return 8,
+ },
+ else => {},
+ },
+ else => {},
+ },
+ else => {},
+ }
+
+ // Next-power-of-two-aligned, up to a maximum.
+ return @min(
+ std.math.ceilPowerOfTwoAssert(u16, (self.sizeInBits(target) + 7) / 8),
+ switch (target.cpu.arch) {
+ .arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
+ .netbsd => switch (target.abi) {
+ .gnueabi,
+ .gnueabihf,
+ .eabi,
+ .eabihf,
+ .android,
+ .musleabi,
+ .musleabihf,
+ => 8,
+
+ else => @as(u16, 4),
+ },
+ .ios, .tvos, .watchos => 4,
+ else => 8,
+ },
+
+ .msp430,
+ .avr,
+ => 2,
+
+ .arc,
+ .csky,
+ .i386,
+ .xcore,
+ .dxil,
+ .loongarch32,
+ .tce,
+ .tcele,
+ .le32,
+ .amdil,
+ .hsail,
+ .spir,
+ .spirv32,
+ .kalimba,
+ .shave,
+ .renderscript32,
+ .ve,
+ .spu_2,
+ => 4,
+
+ .aarch64_32,
+ .amdgcn,
+ .amdil64,
+ .bpfel,
+ .bpfeb,
+ .hexagon,
+ .hsail64,
+ .loongarch64,
+ .m68k,
+ .mips,
+ .mipsel,
+ .sparc,
+ .sparcel,
+ .sparc64,
+ .lanai,
+ .le64,
+ .nvptx,
+ .nvptx64,
+ .r600,
+ .s390x,
+ .spir64,
+ .spirv64,
+ .renderscript64,
+ => 8,
+
+ .aarch64,
+ .aarch64_be,
+ .mips64,
+ .mips64el,
+ .powerpc,
+ .powerpcle,
+ .powerpc64,
+ .powerpc64le,
+ .riscv32,
+ .riscv64,
+ .x86_64,
+ .wasm32,
+ .wasm64,
+ => 16,
+ },
+ );
+ }
+
+ pub fn preferredAlignment(self: CType, target: Target) u16 {
+
+ // Overrides for unusual alignments
+ switch (target.cpu.arch) {
+ .arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
+ .netbsd => switch (target.abi) {
+ .gnueabi,
+ .gnueabihf,
+ .eabi,
+ .eabihf,
+ .android,
+ .musleabi,
+ .musleabihf,
+ => {},
+
+ else => switch (self) {
+ .longdouble => return 4,
+ else => {},
+ },
+ },
+ .ios, .tvos, .watchos => switch (self) {
+ .longdouble => return 4,
+ else => {},
+ },
+ else => {},
+ },
+ .arc => switch (self) {
+ .longdouble => return 4,
+ else => {},
+ },
+ .avr => switch (self) {
+ .int, .uint, .long, .ulong, .float, .longdouble => return 1,
+ .short, .ushort => return 2,
+ .double => return 4,
+ .longlong, .ulonglong => return 8,
+ },
+ .i386 => switch (target.os.tag) {
+ .windows, .uefi => switch (self) {
+ .longdouble => switch (target.abi) {
+ .gnu, .gnuilp32, .cygnus => return 4,
+ else => return 8,
+ },
+ else => {},
+ },
+ else => switch (self) {
+ .longdouble => return 4,
+ else => {},
+ },
+ },
+ else => {},
+ }
+
+ // Next-power-of-two-aligned, up to a maximum.
+ return @min(
+ std.math.ceilPowerOfTwoAssert(u16, (self.sizeInBits(target) + 7) / 8),
+ switch (target.cpu.arch) {
+ .msp430 => @as(u16, 2),
+
+ .csky,
+ .xcore,
+ .dxil,
+ .loongarch32,
+ .tce,
+ .tcele,
+ .le32,
+ .amdil,
+ .hsail,
+ .spir,
+ .spirv32,
+ .kalimba,
+ .shave,
+ .renderscript32,
+ .ve,
+ .spu_2,
+ => 4,
+
+ .arc,
+ .arm,
+ .armeb,
+ .avr,
+ .thumb,
+ .thumbeb,
+ .aarch64_32,
+ .amdgcn,
+ .amdil64,
+ .bpfel,
+ .bpfeb,
+ .hexagon,
+ .hsail64,
+ .i386,
+ .loongarch64,
+ .m68k,
+ .mips,
+ .mipsel,
+ .sparc,
+ .sparcel,
+ .sparc64,
+ .lanai,
+ .le64,
+ .nvptx,
+ .nvptx64,
+ .r600,
+ .s390x,
+ .spir64,
+ .spirv64,
+ .renderscript64,
+ => 8,
+
+ .aarch64,
+ .aarch64_be,
+ .mips64,
+ .mips64el,
+ .powerpc,
+ .powerpcle,
+ .powerpc64,
+ .powerpc64le,
+ .riscv32,
+ .riscv64,
+ .x86_64,
+ .wasm32,
+ .wasm64,
+ => 16,
+ },
+ );
+ }
};
diff --git a/src/value.zig b/src/value.zig
index d24c5a1c17..a727df5d22 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -111,10 +111,12 @@ pub const Value = extern union {
int_i64,
int_big_positive,
int_big_negative,
- runtime_int,
function,
extern_fn,
variable,
+ /// A wrapper for values which are comptime-known but should
+ /// semantically be runtime-known.
+ runtime_value,
/// Represents a pointer to a Decl.
/// When machine codegen backend sees this, it must set the Decl's `alive` field to true.
decl_ref,
@@ -282,6 +284,7 @@ pub const Value = extern union {
.eu_payload,
.opt_payload,
.empty_array_sentinel,
+ .runtime_value,
=> Payload.SubValue,
.eu_payload_ptr,
@@ -305,7 +308,6 @@ pub const Value = extern union {
.int_type => Payload.IntType,
.int_u64 => Payload.U64,
.int_i64 => Payload.I64,
- .runtime_int => Payload.U64,
.function => Payload.Function,
.variable => Payload.Variable,
.decl_ref_mut => Payload.DeclRefMut,
@@ -485,7 +487,6 @@ pub const Value = extern union {
},
.int_type => return self.copyPayloadShallow(arena, Payload.IntType),
.int_u64 => return self.copyPayloadShallow(arena, Payload.U64),
- .runtime_int => return self.copyPayloadShallow(arena, Payload.U64),
.int_i64 => return self.copyPayloadShallow(arena, Payload.I64),
.int_big_positive, .int_big_negative => {
const old_payload = self.cast(Payload.BigInt).?;
@@ -567,6 +568,7 @@ pub const Value = extern union {
.eu_payload,
.opt_payload,
.empty_array_sentinel,
+ .runtime_value,
=> {
const payload = self.cast(Payload.SubValue).?;
const new_payload = try arena.create(Payload.SubValue);
@@ -765,7 +767,7 @@ pub const Value = extern union {
.int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, out_stream),
.int_big_positive => return out_stream.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}),
.int_big_negative => return out_stream.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}),
- .runtime_int => return out_stream.writeAll("[runtime value]"),
+ .runtime_value => return out_stream.writeAll("[runtime value]"),
.function => return out_stream.print("(function decl={d})", .{val.castTag(.function).?.data.owner_decl}),
.extern_fn => return out_stream.writeAll("(extern function)"),
.variable => return out_stream.writeAll("(variable)"),
@@ -1081,8 +1083,6 @@ pub const Value = extern union {
.int_big_positive => return val.castTag(.int_big_positive).?.asBigInt(),
.int_big_negative => return val.castTag(.int_big_negative).?.asBigInt(),
- .runtime_int => return BigIntMutable.init(&space.limbs, val.castTag(.runtime_int).?.data).toConst(),
-
.undef => unreachable,
.lazy_align => {
@@ -1138,8 +1138,6 @@ pub const Value = extern union {
.int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(u64) catch null,
.int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(u64) catch null,
- .runtime_int => return val.castTag(.runtime_int).?.data,
-
.undef => unreachable,
.lazy_align => {
@@ -1208,8 +1206,13 @@ pub const Value = extern union {
};
}
+ /// Write a Value's contents to `buffer`.
+ ///
+ /// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past
+ /// the end of the value in memory.
pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) void {
const target = mod.getTarget();
+ const endian = target.cpu.arch.endian();
if (val.isUndef()) {
const size = @intCast(usize, ty.abiSize(target));
std.mem.set(u8, buffer[0..size], 0xaa);
@@ -1220,31 +1223,41 @@ pub const Value = extern union {
.Bool => {
buffer[0] = @boolToInt(val.toBool());
},
- .Int => {
- var bigint_buffer: BigIntSpace = undefined;
- const bigint = val.toBigInt(&bigint_buffer, target);
- const bits = ty.intInfo(target).bits;
- const abi_size = @intCast(usize, ty.abiSize(target));
- bigint.writeTwosComplement(buffer, bits, abi_size, target.cpu.arch.endian());
- },
- .Enum => {
+ .Int, .Enum => {
+ const int_info = ty.intInfo(target);
+ const bits = int_info.bits;
+ const byte_count = (bits + 7) / 8;
+
var enum_buffer: Payload.U64 = undefined;
const int_val = val.enumToInt(ty, &enum_buffer);
- var bigint_buffer: BigIntSpace = undefined;
- const bigint = int_val.toBigInt(&bigint_buffer, target);
- const bits = ty.intInfo(target).bits;
- const abi_size = @intCast(usize, ty.abiSize(target));
- bigint.writeTwosComplement(buffer, bits, abi_size, target.cpu.arch.endian());
+
+ if (byte_count <= @sizeOf(u64)) {
+ const int: u64 = switch (int_val.tag()) {
+ .zero => 0,
+ .one => 1,
+ .int_u64 => int_val.castTag(.int_u64).?.data,
+ .int_i64 => @bitCast(u64, int_val.castTag(.int_i64).?.data),
+ else => unreachable,
+ };
+ for (buffer[0..byte_count]) |_, i| switch (endian) {
+ .Little => buffer[i] = @truncate(u8, (int >> @intCast(u6, (8 * i)))),
+ .Big => buffer[byte_count - i - 1] = @truncate(u8, (int >> @intCast(u6, (8 * i)))),
+ };
+ } else {
+ var bigint_buffer: BigIntSpace = undefined;
+ const bigint = int_val.toBigInt(&bigint_buffer, target);
+ bigint.writeTwosComplement(buffer[0..byte_count], endian);
+ }
},
.Float => switch (ty.floatBits(target)) {
- 16 => return floatWriteToMemory(f16, val.toFloat(f16), target, buffer),
- 32 => return floatWriteToMemory(f32, val.toFloat(f32), target, buffer),
- 64 => return floatWriteToMemory(f64, val.toFloat(f64), target, buffer),
- 80 => return floatWriteToMemory(f80, val.toFloat(f80), target, buffer),
- 128 => return floatWriteToMemory(f128, val.toFloat(f128), target, buffer),
+ 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(u16, val.toFloat(f16)), endian),
+ 32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(u32, val.toFloat(f32)), endian),
+ 64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(u64, val.toFloat(f64)), endian),
+ 80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(u80, val.toFloat(f80)), endian),
+ 128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(u128, val.toFloat(f128)), endian),
else => unreachable,
},
- .Array, .Vector => {
+ .Array => {
const len = ty.arrayLen();
const elem_ty = ty.childType();
const elem_size = @intCast(usize, elem_ty.abiSize(target));
@@ -1253,10 +1266,16 @@ pub const Value = extern union {
var buf_off: usize = 0;
while (elem_i < len) : (elem_i += 1) {
const elem_val = val.elemValueBuffer(mod, elem_i, &elem_value_buf);
- writeToMemory(elem_val, elem_ty, mod, buffer[buf_off..]);
+ elem_val.writeToMemory(elem_ty, mod, buffer[buf_off..]);
buf_off += elem_size;
}
},
+ .Vector => {
+ // We use byte_count instead of abi_size here, so that any padding bytes
+ // follow the data bytes, on both big- and little-endian systems.
+ const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
+ writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
+ },
.Struct => switch (ty.containerLayout()) {
.Auto => unreachable, // Sema is supposed to have emitted a compile error already
.Extern => {
@@ -1268,122 +1287,113 @@ pub const Value = extern union {
}
},
.Packed => {
- // TODO allocate enough heap space instead of using this buffer
- // on the stack.
- var buf: [16]std.math.big.Limb = undefined;
- const host_int = packedStructToInt(val, ty, target, &buf);
- const abi_size = @intCast(usize, ty.abiSize(target));
- const bit_size = @intCast(usize, ty.bitSize(target));
- host_int.writeTwosComplement(buffer, bit_size, abi_size, target.cpu.arch.endian());
+ const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
+ writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
},
},
.ErrorSet => {
// TODO revisit this when we have the concept of the error tag type
const Int = u16;
const int = mod.global_error_set.get(val.castTag(.@"error").?.data.name).?;
- std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), target.cpu.arch.endian());
+ std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian);
},
else => @panic("TODO implement writeToMemory for more types"),
}
}
- fn packedStructToInt(val: Value, ty: Type, target: Target, buf: []std.math.big.Limb) BigIntConst {
- var bigint = BigIntMutable.init(buf, 0);
- const fields = ty.structFields().values();
- const field_vals = val.castTag(.aggregate).?.data;
- var bits: u16 = 0;
- // TODO allocate enough heap space instead of using this buffer
- // on the stack.
- var field_buf: [16]std.math.big.Limb = undefined;
- var field_space: BigIntSpace = undefined;
- var field_buf2: [16]std.math.big.Limb = undefined;
- for (fields) |field, i| {
- const field_val = field_vals[i];
- const field_bigint_const = switch (field.ty.zigTypeTag()) {
- .Void => continue,
- .Float => floatToBigInt(field_val, field.ty, target, &field_buf),
- .Int, .Bool => intOrBoolToBigInt(field_val, field.ty, target, &field_buf, &field_space),
- .Struct => switch (field.ty.containerLayout()) {
- .Auto, .Extern => unreachable, // Sema should have error'd before this.
- .Packed => packedStructToInt(field_val, field.ty, target, &field_buf),
- },
- .Vector => vectorToBigInt(field_val, field.ty, target, &field_buf),
- .Enum => enumToBigInt(field_val, field.ty, target, &field_space),
- .Union => unreachable, // TODO: packed structs support packed unions
- else => unreachable,
- };
- var field_bigint = BigIntMutable.init(&field_buf2, 0);
- field_bigint.shiftLeft(field_bigint_const, bits);
- bits += @intCast(u16, field.ty.bitSize(target));
- bigint.bitOr(bigint.toConst(), field_bigint.toConst());
- }
- return bigint.toConst();
- }
-
- fn intOrBoolToBigInt(val: Value, ty: Type, target: Target, buf: []std.math.big.Limb, space: *BigIntSpace) BigIntConst {
- const big_int_const = val.toBigInt(space, target);
- if (big_int_const.positive) return big_int_const;
-
- var big_int = BigIntMutable.init(buf, 0);
- big_int.bitNotWrap(big_int_const.negate(), .unsigned, @intCast(u32, ty.bitSize(target)));
- big_int.addScalar(big_int.toConst(), 1);
- return big_int.toConst();
- }
-
- fn vectorToBigInt(val: Value, ty: Type, target: Target, buf: []std.math.big.Limb) BigIntConst {
+ /// Write a Value's contents to `buffer`.
+ ///
+ /// Both the start and the end of the provided buffer must be tight, since
+ /// big-endian packed memory layouts start at the end of the buffer.
+ pub fn writeToPackedMemory(val: Value, ty: Type, mod: *Module, buffer: []u8, bit_offset: usize) void {
+ const target = mod.getTarget();
const endian = target.cpu.arch.endian();
- var vec_bitint = BigIntMutable.init(buf, 0);
- const vec_len = @intCast(usize, ty.arrayLen());
- const elem_ty = ty.childType();
- const elem_size = @intCast(usize, elem_ty.bitSize(target));
-
- var elem_buf: [16]std.math.big.Limb = undefined;
- var elem_space: BigIntSpace = undefined;
- var elem_buf2: [16]std.math.big.Limb = undefined;
-
- var elem_i: usize = 0;
- while (elem_i < vec_len) : (elem_i += 1) {
- const elem_i_target = if (endian == .Big) vec_len - elem_i - 1 else elem_i;
- const elem_val = val.indexVectorlike(elem_i_target);
- const elem_bigint_const = switch (elem_ty.zigTypeTag()) {
- .Int, .Bool => intOrBoolToBigInt(elem_val, elem_ty, target, &elem_buf, &elem_space),
- .Float => floatToBigInt(elem_val, elem_ty, target, &elem_buf),
- .Pointer => unreachable, // TODO
- else => unreachable, // Sema should not let this happen
- };
- var elem_bitint = BigIntMutable.init(&elem_buf2, 0);
- elem_bitint.shiftLeft(elem_bigint_const, elem_size * elem_i);
- vec_bitint.bitOr(vec_bitint.toConst(), elem_bitint.toConst());
+ if (val.isUndef()) {
+ const bit_size = @intCast(usize, ty.bitSize(target));
+ std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian);
+ return;
}
- return vec_bitint.toConst();
- }
+ switch (ty.zigTypeTag()) {
+ .Void => {},
+ .Bool => {
+ const byte_index = switch (endian) {
+ .Little => bit_offset / 8,
+ .Big => buffer.len - bit_offset / 8 - 1,
+ };
+ if (val.toBool()) {
+ buffer[byte_index] |= (@as(u8, 1) << @intCast(u3, bit_offset % 8));
+ } else {
+ buffer[byte_index] &= ~(@as(u8, 1) << @intCast(u3, bit_offset % 8));
+ }
+ },
+ .Int, .Enum => {
+ const bits = ty.intInfo(target).bits;
+ const abi_size = @intCast(usize, ty.abiSize(target));
- fn enumToBigInt(val: Value, ty: Type, target: Target, space: *BigIntSpace) BigIntConst {
- var enum_buf: Payload.U64 = undefined;
- const int_val = val.enumToInt(ty, &enum_buf);
- return int_val.toBigInt(space, target);
- }
+ var enum_buffer: Payload.U64 = undefined;
+ const int_val = val.enumToInt(ty, &enum_buffer);
- fn floatToBigInt(val: Value, ty: Type, target: Target, buf: []std.math.big.Limb) BigIntConst {
- return switch (ty.floatBits(target)) {
- 16 => bitcastFloatToBigInt(f16, val.toFloat(f16), buf),
- 32 => bitcastFloatToBigInt(f32, val.toFloat(f32), buf),
- 64 => bitcastFloatToBigInt(f64, val.toFloat(f64), buf),
- 80 => bitcastFloatToBigInt(f80, val.toFloat(f80), buf),
- 128 => bitcastFloatToBigInt(f128, val.toFloat(f128), buf),
- else => unreachable,
- };
- }
+ if (abi_size <= @sizeOf(u64)) {
+ const int: u64 = switch (int_val.tag()) {
+ .zero => 0,
+ .one => 1,
+ .int_u64 => int_val.castTag(.int_u64).?.data,
+ .int_i64 => @bitCast(u64, int_val.castTag(.int_i64).?.data),
+ else => unreachable,
+ };
+ std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian);
+ } else {
+ var bigint_buffer: BigIntSpace = undefined;
+ const bigint = int_val.toBigInt(&bigint_buffer, target);
+ bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian);
+ }
+ },
+ .Float => switch (ty.floatBits(target)) {
+ 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(u16, val.toFloat(f16)), endian),
+ 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(u32, val.toFloat(f32)), endian),
+ 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(u64, val.toFloat(f64)), endian),
+ 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(u80, val.toFloat(f80)), endian),
+ 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(u128, val.toFloat(f128)), endian),
+ else => unreachable,
+ },
+ .Vector => {
+ const elem_ty = ty.childType();
+ const elem_bit_size = @intCast(u16, elem_ty.bitSize(target));
+ const len = @intCast(usize, ty.arrayLen());
- fn bitcastFloatToBigInt(comptime F: type, f: F, buf: []std.math.big.Limb) BigIntConst {
- const Int = @Type(.{ .Int = .{
- .signedness = .unsigned,
- .bits = @typeInfo(F).Float.bits,
- } });
- const int = @bitCast(Int, f);
- return BigIntMutable.init(buf, int).toConst();
+ var bits: u16 = 0;
+ var elem_i: usize = 0;
+ var elem_value_buf: ElemValueBuffer = undefined;
+ while (elem_i < len) : (elem_i += 1) {
+ // On big-endian systems, LLVM reverses the element order of vectors by default
+ const tgt_elem_i = if (endian == .Big) len - elem_i - 1 else elem_i;
+ const elem_val = val.elemValueBuffer(mod, tgt_elem_i, &elem_value_buf);
+ elem_val.writeToPackedMemory(elem_ty, mod, buffer, bit_offset + bits);
+ bits += elem_bit_size;
+ }
+ },
+ .Struct => switch (ty.containerLayout()) {
+ .Auto => unreachable, // Sema is supposed to have emitted a compile error already
+ .Extern => unreachable, // Handled in non-packed writeToMemory
+ .Packed => {
+ var bits: u16 = 0;
+ const fields = ty.structFields().values();
+ const field_vals = val.castTag(.aggregate).?.data;
+ for (fields) |field, i| {
+ const field_bits = @intCast(u16, field.ty.bitSize(target));
+ field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits);
+ bits += field_bits;
+ }
+ },
+ },
+ else => @panic("TODO implement writeToPackedMemory for more types"),
+ }
}
+ /// Load a Value from the contents of `buffer`.
+ ///
+ /// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past
+ /// the end of the value in memory.
pub fn readFromMemory(
ty: Type,
mod: *Module,
@@ -1391,6 +1401,7 @@ pub const Value = extern union {
arena: Allocator,
) Allocator.Error!Value {
const target = mod.getTarget();
+ const endian = target.cpu.arch.endian();
switch (ty.zigTypeTag()) {
.Void => return Value.@"void",
.Bool => {
@@ -1400,27 +1411,40 @@ pub const Value = extern union {
return Value.@"true";
}
},
- .Int => {
- if (buffer.len == 0) return Value.zero;
+ .Int, .Enum => {
const int_info = ty.intInfo(target);
- const endian = target.cpu.arch.endian();
- const Limb = std.math.big.Limb;
- const limb_count = (buffer.len + @sizeOf(Limb) - 1) / @sizeOf(Limb);
- const limbs_buffer = try arena.alloc(Limb, limb_count);
- const abi_size = @intCast(usize, ty.abiSize(target));
- var bigint = BigIntMutable.init(limbs_buffer, 0);
- bigint.readTwosComplement(buffer, int_info.bits, abi_size, endian, int_info.signedness);
- return fromBigInt(arena, bigint.toConst());
+ const bits = int_info.bits;
+ const byte_count = (bits + 7) / 8;
+ if (bits == 0 or buffer.len == 0) return Value.zero;
+
+ if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64
+ .signed => {
+ const val = std.mem.readVarInt(i64, buffer[0..byte_count], endian);
+ return Value.Tag.int_i64.create(arena, (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits));
+ },
+ .unsigned => {
+ const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian);
+ return Value.Tag.int_u64.create(arena, (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits));
+ },
+ } else { // Slow path, we have to construct a big-int
+ const Limb = std.math.big.Limb;
+ const limb_count = (byte_count + @sizeOf(Limb) - 1) / @sizeOf(Limb);
+ const limbs_buffer = try arena.alloc(Limb, limb_count);
+
+ var bigint = BigIntMutable.init(limbs_buffer, 0);
+ bigint.readTwosComplement(buffer[0..byte_count], bits, endian, int_info.signedness);
+ return fromBigInt(arena, bigint.toConst());
+ }
},
.Float => switch (ty.floatBits(target)) {
- 16 => return Value.Tag.float_16.create(arena, floatReadFromMemory(f16, target, buffer)),
- 32 => return Value.Tag.float_32.create(arena, floatReadFromMemory(f32, target, buffer)),
- 64 => return Value.Tag.float_64.create(arena, floatReadFromMemory(f64, target, buffer)),
- 80 => return Value.Tag.float_80.create(arena, floatReadFromMemory(f80, target, buffer)),
- 128 => return Value.Tag.float_128.create(arena, floatReadFromMemory(f128, target, buffer)),
+ 16 => return Value.Tag.float_16.create(arena, @bitCast(f16, std.mem.readInt(u16, buffer[0..2], endian))),
+ 32 => return Value.Tag.float_32.create(arena, @bitCast(f32, std.mem.readInt(u32, buffer[0..4], endian))),
+ 64 => return Value.Tag.float_64.create(arena, @bitCast(f64, std.mem.readInt(u64, buffer[0..8], endian))),
+ 80 => return Value.Tag.float_80.create(arena, @bitCast(f80, std.mem.readInt(u80, buffer[0..10], endian))),
+ 128 => return Value.Tag.float_128.create(arena, @bitCast(f128, std.mem.readInt(u128, buffer[0..16], endian))),
else => unreachable,
},
- .Array, .Vector => {
+ .Array => {
const elem_ty = ty.childType();
const elem_size = elem_ty.abiSize(target);
const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen()));
@@ -1431,6 +1455,12 @@ pub const Value = extern union {
}
return Tag.aggregate.create(arena, elems);
},
+ .Vector => {
+ // We use byte_count instead of abi_size here, so that any padding bytes
+ // follow the data bytes, on both big- and little-endian systems.
+ const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
+ return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
+ },
.Struct => switch (ty.containerLayout()) {
.Auto => unreachable, // Sema is supposed to have emitted a compile error already
.Extern => {
@@ -1438,26 +1468,20 @@ pub const Value = extern union {
const field_vals = try arena.alloc(Value, fields.len);
for (fields) |field, i| {
const off = @intCast(usize, ty.structFieldOffset(i, target));
- field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..], arena);
+ const sz = @intCast(usize, ty.structFieldType(i).abiSize(target));
+ field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena);
}
return Tag.aggregate.create(arena, field_vals);
},
.Packed => {
- const endian = target.cpu.arch.endian();
- const Limb = std.math.big.Limb;
- const abi_size = @intCast(usize, ty.abiSize(target));
- const bit_size = @intCast(usize, ty.bitSize(target));
- const limb_count = (buffer.len + @sizeOf(Limb) - 1) / @sizeOf(Limb);
- const limbs_buffer = try arena.alloc(Limb, limb_count);
- var bigint = BigIntMutable.init(limbs_buffer, 0);
- bigint.readTwosComplement(buffer, bit_size, abi_size, endian, .unsigned);
- return intToPackedStruct(ty, target, bigint.toConst(), arena);
+ const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
+ return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
},
},
.ErrorSet => {
// TODO revisit this when we have the concept of the error tag type
const Int = u16;
- const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], target.cpu.arch.endian());
+ const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], endian);
const payload = try arena.create(Value.Payload.Error);
payload.* = .{
@@ -1470,115 +1494,90 @@ pub const Value = extern union {
}
}
- fn intToPackedStruct(
+ /// Load a Value from the contents of `buffer`.
+ ///
+ /// Both the start and the end of the provided buffer must be tight, since
+ /// big-endian packed memory layouts start at the end of the buffer.
+ pub fn readFromPackedMemory(
ty: Type,
- target: Target,
- bigint: BigIntConst,
+ mod: *Module,
+ buffer: []const u8,
+ bit_offset: usize,
arena: Allocator,
) Allocator.Error!Value {
- const limbs_buffer = try arena.alloc(std.math.big.Limb, bigint.limbs.len);
- var bigint_mut = bigint.toMutable(limbs_buffer);
- const fields = ty.structFields().values();
- const field_vals = try arena.alloc(Value, fields.len);
- var bits: u16 = 0;
- for (fields) |field, i| {
- const field_bits = @intCast(u16, field.ty.bitSize(target));
- bigint_mut.shiftRight(bigint, bits);
- bigint_mut.truncate(bigint_mut.toConst(), .unsigned, field_bits);
- bits += field_bits;
- const field_bigint = bigint_mut.toConst();
-
- field_vals[i] = switch (field.ty.zigTypeTag()) {
- .Float => switch (field.ty.floatBits(target)) {
- 16 => try bitCastBigIntToFloat(f16, .float_16, field_bigint, arena),
- 32 => try bitCastBigIntToFloat(f32, .float_32, field_bigint, arena),
- 64 => try bitCastBigIntToFloat(f64, .float_64, field_bigint, arena),
- 80 => try bitCastBigIntToFloat(f80, .float_80, field_bigint, arena),
- 128 => try bitCastBigIntToFloat(f128, .float_128, field_bigint, arena),
- else => unreachable,
- },
- .Bool => makeBool(!field_bigint.eqZero()),
- .Int => try Tag.int_big_positive.create(
- arena,
- try arena.dupe(std.math.big.Limb, field_bigint.limbs),
- ),
- .Struct => try intToPackedStruct(field.ty, target, field_bigint, arena),
- else => unreachable,
- };
- }
- return Tag.aggregate.create(arena, field_vals);
- }
-
- fn bitCastBigIntToFloat(
- comptime F: type,
- comptime float_tag: Tag,
- bigint: BigIntConst,
- arena: Allocator,
- ) !Value {
- const Int = @Type(.{ .Int = .{
- .signedness = .unsigned,
- .bits = @typeInfo(F).Float.bits,
- } });
- const int = bigint.to(Int) catch |err| switch (err) {
- error.NegativeIntoUnsigned => unreachable,
- error.TargetTooSmall => unreachable,
- };
- const f = @bitCast(F, int);
- return float_tag.create(arena, f);
- }
-
- fn floatWriteToMemory(comptime F: type, f: F, target: Target, buffer: []u8) void {
+ const target = mod.getTarget();
const endian = target.cpu.arch.endian();
- if (F == f80) {
- const repr = std.math.break_f80(f);
- std.mem.writeInt(u64, buffer[0..8], repr.fraction, endian);
- std.mem.writeInt(u16, buffer[8..10], repr.exp, endian);
- std.mem.set(u8, buffer[10..], 0);
- return;
- }
- const Int = @Type(.{ .Int = .{
- .signedness = .unsigned,
- .bits = @typeInfo(F).Float.bits,
- } });
- const int = @bitCast(Int, f);
- std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], int, endian);
- }
+ switch (ty.zigTypeTag()) {
+ .Void => return Value.@"void",
+ .Bool => {
+ const byte = switch (endian) {
+ .Big => buffer[buffer.len - bit_offset / 8 - 1],
+ .Little => buffer[bit_offset / 8],
+ };
+ if (((byte >> @intCast(u3, bit_offset % 8)) & 1) == 0) {
+ return Value.@"false";
+ } else {
+ return Value.@"true";
+ }
+ },
+ .Int, .Enum => {
+ if (buffer.len == 0) return Value.zero;
+ const int_info = ty.intInfo(target);
+ const abi_size = @intCast(usize, ty.abiSize(target));
- fn floatReadFromMemory(comptime F: type, target: Target, buffer: []const u8) F {
- const endian = target.cpu.arch.endian();
- if (F == f80) {
- return std.math.make_f80(.{
- .fraction = readInt(u64, buffer[0..8], endian),
- .exp = readInt(u16, buffer[8..10], endian),
- });
- }
- const Int = @Type(.{ .Int = .{
- .signedness = .unsigned,
- .bits = @typeInfo(F).Float.bits,
- } });
- const int = readInt(Int, buffer[0..@sizeOf(Int)], endian);
- return @bitCast(F, int);
- }
-
- fn readInt(comptime Int: type, buffer: *const [@sizeOf(Int)]u8, endian: std.builtin.Endian) Int {
- var result: Int = 0;
- switch (endian) {
- .Big => {
- for (buffer) |byte| {
- result <<= 8;
- result |= byte;
+ const bits = int_info.bits;
+ if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64
+ .signed => return Value.Tag.int_i64.create(arena, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)),
+ .unsigned => return Value.Tag.int_u64.create(arena, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)),
+ } else { // Slow path, we have to construct a big-int
+ const Limb = std.math.big.Limb;
+ const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb);
+ const limbs_buffer = try arena.alloc(Limb, limb_count);
+
+ var bigint = BigIntMutable.init(limbs_buffer, 0);
+ bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness);
+ return fromBigInt(arena, bigint.toConst());
}
},
- .Little => {
- var i: usize = buffer.len;
- while (i != 0) {
- i -= 1;
- result <<= 8;
- result |= buffer[i];
+ .Float => switch (ty.floatBits(target)) {
+ 16 => return Value.Tag.float_16.create(arena, @bitCast(f16, std.mem.readPackedInt(u16, buffer, bit_offset, endian))),
+ 32 => return Value.Tag.float_32.create(arena, @bitCast(f32, std.mem.readPackedInt(u32, buffer, bit_offset, endian))),
+ 64 => return Value.Tag.float_64.create(arena, @bitCast(f64, std.mem.readPackedInt(u64, buffer, bit_offset, endian))),
+ 80 => return Value.Tag.float_80.create(arena, @bitCast(f80, std.mem.readPackedInt(u80, buffer, bit_offset, endian))),
+ 128 => return Value.Tag.float_128.create(arena, @bitCast(f128, std.mem.readPackedInt(u128, buffer, bit_offset, endian))),
+ else => unreachable,
+ },
+ .Vector => {
+ const elem_ty = ty.childType();
+ const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen()));
+
+ var bits: u16 = 0;
+ const elem_bit_size = @intCast(u16, elem_ty.bitSize(target));
+ for (elems) |_, i| {
+ // On big-endian systems, LLVM reverses the element order of vectors by default
+ const tgt_elem_i = if (endian == .Big) elems.len - i - 1 else i;
+ elems[tgt_elem_i] = try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena);
+ bits += elem_bit_size;
}
+ return Tag.aggregate.create(arena, elems);
+ },
+ .Struct => switch (ty.containerLayout()) {
+ .Auto => unreachable, // Sema is supposed to have emitted a compile error already
+ .Extern => unreachable, // Handled by non-packed readFromMemory
+ .Packed => {
+ var bits: u16 = 0;
+ const fields = ty.structFields().values();
+ const field_vals = try arena.alloc(Value, fields.len);
+ for (fields) |field, i| {
+ const field_bits = @intCast(u16, field.ty.bitSize(target));
+ field_vals[i] = try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena);
+ bits += field_bits;
+ }
+ return Tag.aggregate.create(arena, field_vals);
+ },
},
+ else => @panic("TODO implement readFromPackedMemory for more types"),
}
- return result;
}
/// Asserts that the value is a float or an integer.
@@ -2357,6 +2356,8 @@ pub const Value = extern union {
const zig_ty_tag = ty.zigTypeTag();
std.hash.autoHash(hasher, zig_ty_tag);
if (val.isUndef()) return;
+ // The value is runtime-known and shouldn't affect the hash.
+ if (val.tag() == .runtime_value) return;
switch (zig_ty_tag) {
.BoundFn => unreachable, // TODO remove this from the language
@@ -2621,6 +2622,7 @@ pub const Value = extern union {
.zero,
.one,
+ .null_value,
.int_u64,
.int_i64,
.int_big_positive,
@@ -2632,9 +2634,6 @@ pub const Value = extern union {
.lazy_size,
=> return hashInt(ptr_val, hasher, target),
- // The value is runtime-known and shouldn't affect the hash.
- .runtime_int => {},
-
else => unreachable,
}
}