aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2022-09-11 20:26:53 -0700
committerAndrew Kelley <andrew@ziglang.org>2022-09-11 20:26:53 -0700
commitab3ac291ac08435f7aba1fc8a53fe0a0290cc1e1 (patch)
tree5f6dd2a4a7dc97a72e3652fff09399ed1cba8b83 /src
parent37cdb5dbf90acd61584bae4a6661d0a6f9b54295 (diff)
parentc97d64b677eb891144fb356e1f4b9011c60cc0e2 (diff)
downloadzig-ab3ac291ac08435f7aba1fc8a53fe0a0290cc1e1.tar.gz
zig-ab3ac291ac08435f7aba1fc8a53fe0a0290cc1e1.zip
Merge remote-tracking branch 'origin/master' into llvm15
Diffstat (limited to 'src')
-rw-r--r--src/Autodoc.zig102
-rw-r--r--src/Compilation.zig152
-rw-r--r--src/arch/aarch64/CodeGen.zig85
-rw-r--r--src/arch/aarch64/Emit.zig6
-rw-r--r--src/arch/arm/CodeGen.zig2587
-rw-r--r--src/arch/arm/Emit.zig100
-rw-r--r--src/arch/arm/Mir.zig23
-rw-r--r--src/arch/x86_64/CodeGen.zig208
-rw-r--r--src/arch/x86_64/Emit.zig19
-rw-r--r--src/autodoc/render_source.zig8
-rw-r--r--src/glibc.zig9
-rw-r--r--src/link/Coff.zig79
-rw-r--r--src/link/Coff/Atom.zig10
-rw-r--r--src/link/MachO.zig341
-rw-r--r--src/link/MachO/Atom.zig6
-rw-r--r--src/link/MachO/DebugSymbols.zig4
-rw-r--r--src/link/MachO/dead_strip.zig4
-rw-r--r--src/translate_c.zig20
-rw-r--r--src/type.zig3
19 files changed, 2217 insertions, 1549 deletions
diff --git a/src/Autodoc.zig b/src/Autodoc.zig
index e6e025b5b4..cb17c19cb3 100644
--- a/src/Autodoc.zig
+++ b/src/Autodoc.zig
@@ -280,8 +280,8 @@ pub fn generateZirData(self: *Autodoc) !void {
try std.json.stringify(
data,
.{
- .whitespace = .{ .indent = if (builtin.mode == .Debug) .{ .Space = 4 } else .None },
- .emit_null_optional_fields = false,
+ .whitespace = .{ .indent = .None, .separator = false },
+ .emit_null_optional_fields = true,
},
out,
);
@@ -404,6 +404,7 @@ const DocData = struct {
w: anytype,
) !void {
var jsw = std.json.writeStream(w, 15);
+ if (opts.whitespace) |ws| jsw.whitespace = ws;
try jsw.beginObject();
inline for (comptime std.meta.tags(std.meta.FieldEnum(DocData))) |f| {
const f_name = @tagName(f);
@@ -449,6 +450,8 @@ const DocData = struct {
w: anytype,
) !void {
var jsw = std.json.writeStream(w, 15);
+ if (opts.whitespace) |ws| jsw.whitespace = ws;
+
try jsw.beginObject();
inline for (comptime std.meta.tags(std.meta.FieldEnum(DocPackage))) |f| {
const f_name = @tagName(f);
@@ -474,6 +477,22 @@ const DocData = struct {
// The index in astNodes of the `test declname { }` node
decltest: ?usize = null,
_analyzed: bool, // omitted in json data
+
+ pub fn jsonStringify(
+ self: Decl,
+ opts: std.json.StringifyOptions,
+ w: anytype,
+ ) !void {
+ var jsw = std.json.writeStream(w, 15);
+ if (opts.whitespace) |ws| jsw.whitespace = ws;
+ try jsw.beginArray();
+ inline for (comptime std.meta.fields(Decl)) |f| {
+ try jsw.arrayElem();
+ try std.json.stringify(@field(self, f.name), opts, w);
+ jsw.state_index -= 1;
+ }
+ try jsw.endArray();
+ }
};
const AstNode = struct {
@@ -485,6 +504,22 @@ const DocData = struct {
docs: ?[]const u8 = null,
fields: ?[]usize = null, // index into astNodes
@"comptime": bool = false,
+
+ pub fn jsonStringify(
+ self: AstNode,
+ opts: std.json.StringifyOptions,
+ w: anytype,
+ ) !void {
+ var jsw = std.json.writeStream(w, 15);
+ if (opts.whitespace) |ws| jsw.whitespace = ws;
+ try jsw.beginArray();
+ inline for (comptime std.meta.fields(AstNode)) |f| {
+ try jsw.arrayElem();
+ try std.json.stringify(@field(self, f.name), opts, w);
+ jsw.state_index -= 1;
+ }
+ try jsw.endArray();
+ }
};
const Type = union(enum) {
@@ -525,7 +560,6 @@ const DocData = struct {
fields: ?[]Expr = null, // (use src->fields to find names)
line_number: usize,
outer_decl: usize,
- ast: usize,
},
ComptimeExpr: struct { name: []const u8 },
ComptimeFloat: struct { name: []const u8 },
@@ -548,7 +582,6 @@ const DocData = struct {
src: usize, // index into astNodes
privDecls: []usize = &.{}, // index into decls
pubDecls: []usize = &.{}, // index into decls
- ast: usize,
// (use src->fields to find field names)
},
Union: struct {
@@ -557,7 +590,6 @@ const DocData = struct {
privDecls: []usize = &.{}, // index into decls
pubDecls: []usize = &.{}, // index into decls
fields: []Expr = &.{}, // (use src->fields to find names)
- ast: usize,
},
Fn: struct {
name: []const u8,
@@ -582,7 +614,6 @@ const DocData = struct {
src: usize, // index into astNodes
privDecls: []usize = &.{}, // index into decls
pubDecls: []usize = &.{}, // index into decls
- ast: usize,
},
Frame: struct { name: []const u8 },
AnyFrame: struct { name: []const u8 },
@@ -601,14 +632,15 @@ const DocData = struct {
) !void {
const active_tag = std.meta.activeTag(self);
var jsw = std.json.writeStream(w, 15);
- try jsw.beginObject();
- try jsw.objectField("kind");
+ if (opts.whitespace) |ws| jsw.whitespace = ws;
+ try jsw.beginArray();
+ try jsw.arrayElem();
try jsw.emitNumber(@enumToInt(active_tag));
inline for (comptime std.meta.fields(Type)) |case| {
if (@field(Type, case.name) == active_tag) {
const current_value = @field(self, case.name);
inline for (comptime std.meta.fields(case.field_type)) |f| {
- try jsw.objectField(f.name);
+ try jsw.arrayElem();
if (f.field_type == std.builtin.TypeInfo.Pointer.Size) {
try jsw.emitNumber(@enumToInt(@field(current_value, f.name)));
} else {
@@ -618,7 +650,7 @@ const DocData = struct {
}
}
}
- try jsw.endObject();
+ try jsw.endArray();
}
};
@@ -686,7 +718,7 @@ const DocData = struct {
const SwitchOp = struct {
cond_index: usize,
file_name: []const u8,
- ast: usize,
+ src: usize,
outer_decl: usize, // index in `types`
};
const BuiltinBin = struct {
@@ -704,7 +736,15 @@ const DocData = struct {
end: ?usize = null,
sentinel: ?usize = null, // index in `exprs`
};
- const Cmpxchg = struct { name: []const u8, type: usize, ptr: usize, expected_value: usize, new_value: usize, success_order: usize, failure_order: usize };
+ const Cmpxchg = struct {
+ name: []const u8,
+ type: usize,
+ ptr: usize,
+ expected_value: usize,
+ new_value: usize,
+ success_order: usize,
+ failure_order: usize,
+ };
const As = struct {
typeRefArg: ?usize, // index in `exprs`
exprArg: usize, // index in `exprs`
@@ -721,11 +761,12 @@ const DocData = struct {
pub fn jsonStringify(
self: Expr,
- opt: std.json.StringifyOptions,
+ opts: std.json.StringifyOptions,
w: anytype,
) !void {
const active_tag = std.meta.activeTag(self);
var jsw = std.json.writeStream(w, 15);
+ if (opts.whitespace) |ws| jsw.whitespace = ws;
try jsw.beginObject();
try jsw.objectField(@tagName(active_tag));
switch (self) {
@@ -742,7 +783,7 @@ const DocData = struct {
if (comptime std.mem.eql(u8, case.name, "builtinField"))
continue;
if (@field(Expr, case.name) == active_tag) {
- try std.json.stringify(@field(self, case.name), opt, w);
+ try std.json.stringify(@field(self, case.name), opts, w);
jsw.state_index -= 1;
// TODO: we should not reach into the state of the
// json writer, but alas, this is what's
@@ -1874,7 +1915,12 @@ fn walkInstruction(
// log.debug("{s}", .{sep});
const switch_index = self.exprs.items.len;
- try self.exprs.append(self.arena, .{ .switchOp = .{ .cond_index = cond_index, .file_name = file.sub_file_path, .ast = ast_index, .outer_decl = type_index } });
+ try self.exprs.append(self.arena, .{ .switchOp = .{
+ .cond_index = cond_index,
+ .file_name = file.sub_file_path,
+ .src = ast_index,
+ .outer_decl = type_index,
+ } });
return DocData.WalkResult{
.typeRef = .{ .type = @enumToInt(Ref.type_type) },
@@ -2505,7 +2551,6 @@ fn walkInstruction(
.src = self_ast_node_index,
.privDecls = priv_decl_indexes.items,
.pubDecls = decl_indexes.items,
- .ast = self_ast_node_index,
},
};
if (self.ref_paths_pending_on_types.get(type_slot_index)) |paths| {
@@ -2644,7 +2689,13 @@ fn walkInstruction(
self.ast_nodes.items[self_ast_node_index].fields = field_name_indexes.items;
self.types.items[type_slot_index] = .{
- .Union = .{ .name = "todo_name", .src = self_ast_node_index, .privDecls = priv_decl_indexes.items, .pubDecls = decl_indexes.items, .fields = field_type_refs.items, .ast = self_ast_node_index },
+ .Union = .{
+ .name = "todo_name",
+ .src = self_ast_node_index,
+ .privDecls = priv_decl_indexes.items,
+ .pubDecls = decl_indexes.items,
+ .fields = field_type_refs.items,
+ },
};
if (self.ref_paths_pending_on_types.get(type_slot_index)) |paths| {
@@ -2796,7 +2847,12 @@ fn walkInstruction(
self.ast_nodes.items[self_ast_node_index].fields = field_name_indexes.items;
self.types.items[type_slot_index] = .{
- .Enum = .{ .name = "todo_name", .src = self_ast_node_index, .privDecls = priv_decl_indexes.items, .pubDecls = decl_indexes.items, .ast = self_ast_node_index },
+ .Enum = .{
+ .name = "todo_name",
+ .src = self_ast_node_index,
+ .privDecls = priv_decl_indexes.items,
+ .pubDecls = decl_indexes.items,
+ },
};
if (self.ref_paths_pending_on_types.get(type_slot_index)) |paths| {
for (paths.items) |resume_info| {
@@ -2910,7 +2966,15 @@ fn walkInstruction(
self.ast_nodes.items[self_ast_node_index].fields = field_name_indexes.items;
self.types.items[type_slot_index] = .{
- .Struct = .{ .name = "todo_name", .src = self_ast_node_index, .privDecls = priv_decl_indexes.items, .pubDecls = decl_indexes.items, .fields = field_type_refs.items, .line_number = self.ast_nodes.items[self_ast_node_index].line, .outer_decl = type_slot_index - 1, .ast = self_ast_node_index },
+ .Struct = .{
+ .name = "todo_name",
+ .src = self_ast_node_index,
+ .privDecls = priv_decl_indexes.items,
+ .pubDecls = decl_indexes.items,
+ .fields = field_type_refs.items,
+ .line_number = self.ast_nodes.items[self_ast_node_index].line,
+ .outer_decl = type_slot_index - 1,
+ },
};
if (self.ref_paths_pending_on_types.get(type_slot_index)) |paths| {
for (paths.items) |resume_info| {
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 597f5cffff..5a1abcb52b 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -1238,7 +1238,6 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
options.target,
options.is_native_abi,
link_libc,
- options.system_lib_names.len != 0 or options.frameworks.count() != 0,
options.libc_installation,
options.native_darwin_sdk != null,
);
@@ -4522,7 +4521,6 @@ fn detectLibCIncludeDirs(
target: Target,
is_native_abi: bool,
link_libc: bool,
- link_system_libs: bool,
libc_installation: ?*const LibCInstallation,
has_macos_sdk: bool,
) !LibCDirs {
@@ -4539,7 +4537,7 @@ fn detectLibCIncludeDirs(
// If linking system libraries and targeting the native abi, default to
// using the system libc installation.
- if (link_system_libs and is_native_abi and !target.isMinGW()) {
+ if (is_native_abi and !target.isMinGW()) {
if (target.isDarwin()) {
return if (has_macos_sdk)
// For Darwin/macOS, we are all set with getDarwinSDK found earlier.
@@ -4551,74 +4549,29 @@ fn detectLibCIncludeDirs(
getZigShippedLibCIncludeDirsDarwin(arena, zig_lib_dir, target);
}
const libc = try arena.create(LibCInstallation);
- libc.* = try LibCInstallation.findNative(.{ .allocator = arena, .verbose = true });
+ libc.* = LibCInstallation.findNative(.{ .allocator = arena }) catch |err| switch (err) {
+ error.CCompilerExitCode,
+ error.CCompilerCrashed,
+ error.CCompilerCannotFindHeaders,
+ error.UnableToSpawnCCompiler,
+ => |e| {
+ // We tried to integrate with the native system C compiler,
+ // however, it is not installed. So we must rely on our bundled
+ // libc files.
+ if (target_util.canBuildLibC(target)) {
+ return detectLibCFromBuilding(arena, zig_lib_dir, target, has_macos_sdk);
+ }
+ return e;
+ },
+ else => |e| return e,
+ };
return detectLibCFromLibCInstallation(arena, target, libc);
}
// If not linking system libraries, build and provide our own libc by
// default if possible.
if (target_util.canBuildLibC(target)) {
- switch (target.os.tag) {
- .macos => return if (has_macos_sdk)
- // For Darwin/macOS, we are all set with getDarwinSDK found earlier.
- LibCDirs{
- .libc_include_dir_list = &[0][]u8{},
- .libc_installation = null,
- }
- else
- getZigShippedLibCIncludeDirsDarwin(arena, zig_lib_dir, target),
- else => {
- const generic_name = target_util.libCGenericName(target);
- // Some architectures are handled by the same set of headers.
- const arch_name = if (target.abi.isMusl())
- musl.archName(target.cpu.arch)
- else if (target.cpu.arch.isThumb())
- // ARM headers are valid for Thumb too.
- switch (target.cpu.arch) {
- .thumb => "arm",
- .thumbeb => "armeb",
- else => unreachable,
- }
- else
- @tagName(target.cpu.arch);
- const os_name = @tagName(target.os.tag);
- // Musl's headers are ABI-agnostic and so they all have the "musl" ABI name.
- const abi_name = if (target.abi.isMusl()) "musl" else @tagName(target.abi);
- const s = std.fs.path.sep_str;
- const arch_include_dir = try std.fmt.allocPrint(
- arena,
- "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-{s}-{s}",
- .{ zig_lib_dir, arch_name, os_name, abi_name },
- );
- const generic_include_dir = try std.fmt.allocPrint(
- arena,
- "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "generic-{s}",
- .{ zig_lib_dir, generic_name },
- );
- const generic_arch_name = target_util.osArchName(target);
- const arch_os_include_dir = try std.fmt.allocPrint(
- arena,
- "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-{s}-any",
- .{ zig_lib_dir, generic_arch_name, os_name },
- );
- const generic_os_include_dir = try std.fmt.allocPrint(
- arena,
- "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "any-{s}-any",
- .{ zig_lib_dir, os_name },
- );
-
- const list = try arena.alloc([]const u8, 4);
- list[0] = arch_include_dir;
- list[1] = generic_include_dir;
- list[2] = arch_os_include_dir;
- list[3] = generic_os_include_dir;
-
- return LibCDirs{
- .libc_include_dir_list = list,
- .libc_installation = null,
- };
- },
- }
+ return detectLibCFromBuilding(arena, zig_lib_dir, target, has_macos_sdk);
}
// If zig can't build the libc for the target and we are targeting the
@@ -4677,6 +4630,75 @@ fn detectLibCFromLibCInstallation(arena: Allocator, target: Target, lci: *const
};
}
+fn detectLibCFromBuilding(
+ arena: Allocator,
+ zig_lib_dir: []const u8,
+ target: std.Target,
+ has_macos_sdk: bool,
+) !LibCDirs {
+ switch (target.os.tag) {
+ .macos => return if (has_macos_sdk)
+ // For Darwin/macOS, we are all set with getDarwinSDK found earlier.
+ LibCDirs{
+ .libc_include_dir_list = &[0][]u8{},
+ .libc_installation = null,
+ }
+ else
+ getZigShippedLibCIncludeDirsDarwin(arena, zig_lib_dir, target),
+ else => {
+ const generic_name = target_util.libCGenericName(target);
+ // Some architectures are handled by the same set of headers.
+ const arch_name = if (target.abi.isMusl())
+ musl.archName(target.cpu.arch)
+ else if (target.cpu.arch.isThumb())
+ // ARM headers are valid for Thumb too.
+ switch (target.cpu.arch) {
+ .thumb => "arm",
+ .thumbeb => "armeb",
+ else => unreachable,
+ }
+ else
+ @tagName(target.cpu.arch);
+ const os_name = @tagName(target.os.tag);
+ // Musl's headers are ABI-agnostic and so they all have the "musl" ABI name.
+ const abi_name = if (target.abi.isMusl()) "musl" else @tagName(target.abi);
+ const s = std.fs.path.sep_str;
+ const arch_include_dir = try std.fmt.allocPrint(
+ arena,
+ "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-{s}-{s}",
+ .{ zig_lib_dir, arch_name, os_name, abi_name },
+ );
+ const generic_include_dir = try std.fmt.allocPrint(
+ arena,
+ "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "generic-{s}",
+ .{ zig_lib_dir, generic_name },
+ );
+ const generic_arch_name = target_util.osArchName(target);
+ const arch_os_include_dir = try std.fmt.allocPrint(
+ arena,
+ "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-{s}-any",
+ .{ zig_lib_dir, generic_arch_name, os_name },
+ );
+ const generic_os_include_dir = try std.fmt.allocPrint(
+ arena,
+ "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "any-{s}-any",
+ .{ zig_lib_dir, os_name },
+ );
+
+ const list = try arena.alloc([]const u8, 4);
+ list[0] = arch_include_dir;
+ list[1] = generic_include_dir;
+ list[2] = arch_os_include_dir;
+ list[3] = generic_os_include_dir;
+
+ return LibCDirs{
+ .libc_include_dir_list = list,
+ .libc_installation = null,
+ };
+ },
+ }
+}
+
pub fn get_libc_crt_file(comp: *Compilation, arena: Allocator, basename: []const u8) ![]const u8 {
if (comp.wantBuildGLibCFromSource() or
comp.wantBuildMuslFromSource() or
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 884fd68d55..a54c8e059c 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -139,21 +139,10 @@ const MCValue = union(enum) {
/// If the type is a pointer, it means the pointer address is at
/// this memory location.
memory: u64,
- /// The value is in memory referenced indirectly via a GOT entry
- /// index.
- ///
- /// If the type is a pointer, it means the pointer is referenced
- /// indirectly via GOT. When lowered, linker will emit
- /// relocations of type ARM64_RELOC_GOT_LOAD_PAGE21 and
- /// ARM64_RELOC_GOT_LOAD_PAGEOFF12.
- got_load: u32,
- /// The value is in memory referenced directly via symbol index.
- ///
- /// If the type is a pointer, it means the pointer is referenced
- /// directly via symbol index. When lowered, linker will emit a
- /// relocation of type ARM64_RELOC_PAGE21 and
- /// ARM64_RELOC_PAGEOFF12.
- direct_load: u32,
+ /// The value is in memory but requires a linker relocation fixup:
+ /// * got - the value is referenced indirectly via GOT entry index (the linker emits a got-type reloc)
+ /// * direct - the value is referenced directly via symbol index index (the linker emits a displacement reloc)
+ linker_load: struct { @"type": enum { got, direct }, sym_index: u32 },
/// The value is one of the stack variables.
///
/// If the type is a pointer, it means the pointer address is in
@@ -2959,8 +2948,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
.memory,
.stack_offset,
.stack_argument_offset,
- .got_load,
- .direct_load,
+ .linker_load,
=> {
const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr);
try self.load(dst_mcv, .{ .register = addr_reg }, ptr_ty);
@@ -3197,8 +3185,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.memory,
.stack_offset,
.stack_argument_offset,
- .got_load,
- .direct_load,
+ .linker_load,
=> {
const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr);
try self.store(.{ .register = addr_reg }, value, ptr_ty, value_ty);
@@ -3493,7 +3480,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
try self.genSetReg(Type.initTag(.u64), .x30, .{
- .got_load = fn_owner_decl.link.macho.sym_index,
+ .linker_load = .{
+ .@"type" = .got,
+ .sym_index = fn_owner_decl.link.macho.sym_index,
+ },
});
// blr x30
_ = try self.addInst(.{
@@ -4427,8 +4417,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
.register = cond_reg,
});
},
- .got_load,
- .direct_load,
+ .linker_load,
.memory,
.stack_argument_offset,
.stack_offset,
@@ -4479,13 +4468,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
});
},
.memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = addr }),
- .got_load,
- .direct_load,
- => |sym_index| {
- const tag: Mir.Inst.Tag = switch (mcv) {
- .got_load => .load_memory_ptr_got,
- .direct_load => .load_memory_ptr_direct,
- else => unreachable,
+ .linker_load => |load_struct| {
+ const tag: Mir.Inst.Tag = switch (load_struct.@"type") {
+ .got => .load_memory_ptr_got,
+ .direct => .load_memory_ptr_direct,
};
const mod = self.bin_file.options.module.?;
_ = try self.addInst(.{
@@ -4494,7 +4480,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
.payload = try self.addExtra(Mir.LoadMemoryPie{
.register = @enumToInt(src_reg),
.atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.sym_index,
- .sym_index = sym_index,
+ .sym_index = load_struct.sym_index,
}),
},
});
@@ -4594,13 +4580,10 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
});
},
.register_with_overflow => unreachable, // doesn't fit into a register
- .got_load,
- .direct_load,
- => |sym_index| {
- const tag: Mir.Inst.Tag = switch (mcv) {
- .got_load => .load_memory_got,
- .direct_load => .load_memory_direct,
- else => unreachable,
+ .linker_load => |load_struct| {
+ const tag: Mir.Inst.Tag = switch (load_struct.@"type") {
+ .got => .load_memory_got,
+ .direct => .load_memory_direct,
};
const mod = self.bin_file.options.module.?;
_ = try self.addInst(.{
@@ -4609,7 +4592,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.payload = try self.addExtra(Mir.LoadMemoryPie{
.register = @enumToInt(reg),
.atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.sym_index,
- .sym_index = sym_index,
+ .sym_index = load_struct.sym_index,
}),
},
});
@@ -4741,8 +4724,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
.register_with_overflow => {
return self.fail("TODO implement genSetStackArgument {}", .{mcv});
},
- .got_load,
- .direct_load,
+ .linker_load,
.memory,
.stack_argument_offset,
.stack_offset,
@@ -4785,13 +4767,10 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
});
},
.memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }),
- .got_load,
- .direct_load,
- => |sym_index| {
- const tag: Mir.Inst.Tag = switch (mcv) {
- .got_load => .load_memory_ptr_got,
- .direct_load => .load_memory_ptr_direct,
- else => unreachable,
+ .linker_load => |load_struct| {
+ const tag: Mir.Inst.Tag = switch (load_struct.@"type") {
+ .got => .load_memory_ptr_got,
+ .direct => .load_memory_ptr_direct,
};
const mod = self.bin_file.options.module.?;
_ = try self.addInst(.{
@@ -4800,7 +4779,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
.payload = try self.addExtra(Mir.LoadMemoryPie{
.register = @enumToInt(src_reg),
.atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.sym_index,
- .sym_index = sym_index,
+ .sym_index = load_struct.sym_index,
}),
},
});
@@ -5107,7 +5086,10 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
// Because MachO is PIE-always-on, we defer memory address resolution until
// the linker has enough info to perform relocations.
assert(decl.link.macho.sym_index != 0);
- return MCValue{ .got_load = decl.link.macho.sym_index };
+ return MCValue{ .linker_load = .{
+ .@"type" = .got,
+ .sym_index = decl.link.macho.sym_index,
+ } };
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO codegen COFF const Decl pointer", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
@@ -5129,7 +5111,10 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
return MCValue{ .memory = vaddr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
- return MCValue{ .direct_load = local_sym_index };
+ return MCValue{ .linker_load = .{
+ .@"type" = .direct,
+ .sym_index = local_sym_index,
+ } };
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO lower unnamed const in COFF", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |_| {
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index 00a2ff380a..9e243a3f86 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -681,12 +681,10 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
};
// Add relocation to the decl.
const atom = macho_file.atom_by_index_table.get(relocation.atom_index).?;
+ const target = macho_file.getGlobalByIndex(relocation.sym_index);
try atom.relocs.append(emit.bin_file.allocator, .{
.offset = offset,
- .target = .{
- .sym_index = relocation.sym_index,
- .file = null,
- },
+ .target = target,
.addend = 0,
.subtractor = null,
.pcrel = true,
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index cefcf3b114..0eeb7a7ded 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -438,9 +438,8 @@ fn gen(self: *Self) !void {
// mov fp, sp
_ = try self.addInst(.{
.tag = .mov,
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = .fp,
- .rn = .r0,
.op = Instruction.Operand.reg(.sp, Instruction.Operand.Shift.none),
} },
});
@@ -452,9 +451,7 @@ fn gen(self: *Self) !void {
// The address of where to store the return value is in
// r0. As this register might get overwritten along the
// way, save the address to the stack.
- const stack_offset = mem.alignForwardGeneric(u32, self.next_stack_offset, 4) + 4;
- self.next_stack_offset = stack_offset;
- self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset);
+ const stack_offset = try self.allocMem(4, 4, null);
try self.genSetStack(Type.usize, stack_offset, MCValue{ .register = .r0 });
self.ret_mcv = MCValue{ .stack_offset = stack_offset };
@@ -491,14 +488,10 @@ fn gen(self: *Self) !void {
const aligned_total_stack_end = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align);
const stack_size = aligned_total_stack_end - self.saved_regs_stack_space;
self.max_end_stack = stack_size;
- if (Instruction.Operand.fromU32(stack_size)) |op| {
- self.mir_instructions.set(sub_reloc, .{
- .tag = .sub,
- .data = .{ .rr_op = .{ .rd = .sp, .rn = .sp, .op = op } },
- });
- } else {
- return self.failSymbol("TODO ARM: allow larger stacks", .{});
- }
+ self.mir_instructions.set(sub_reloc, .{
+ .tag = .sub_sp_scratch_r0,
+ .data = .{ .imm32 = stack_size },
+ });
_ = try self.addInst(.{
.tag = .dbg_epilogue_begin,
@@ -531,9 +524,8 @@ fn gen(self: *Self) !void {
// mov sp, fp
_ = try self.addInst(.{
.tag = .mov,
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = .sp,
- .rn = .r0,
.op = Instruction.Operand.reg(.fp, Instruction.Operand.Shift.none),
} },
});
@@ -895,17 +887,30 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
try table.ensureUnusedCapacity(self.gpa, additional_count);
}
-fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 {
+fn allocMem(
+ self: *Self,
+ abi_size: u32,
+ abi_align: u32,
+ maybe_inst: ?Air.Inst.Index,
+) !u32 {
+ assert(abi_size > 0);
+ assert(abi_align > 0);
+
if (abi_align > self.stack_align)
self.stack_align = abi_align;
+
// TODO find a free slot instead of always appending
const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size;
self.next_stack_offset = offset;
self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset);
- try self.stack.putNoClobber(self.gpa, offset, .{
- .inst = inst,
- .size = abi_size,
- });
+
+ if (maybe_inst) |inst| {
+ try self.stack.putNoClobber(self.gpa, offset, .{
+ .inst = inst,
+ .size = abi_size,
+ });
+ }
+
return offset;
}
@@ -927,35 +932,34 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
};
// TODO swap this for inst.ty.ptrAlign
const abi_align = elem_ty.abiAlignment(self.target.*);
- return self.allocMem(inst, abi_size, abi_align);
+
+ return self.allocMem(abi_size, abi_align, inst);
}
-fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
- const elem_ty = self.air.typeOfIndex(inst);
+fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue {
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
const abi_align = elem_ty.abiAlignment(self.target.*);
- if (abi_align > self.stack_align)
- self.stack_align = abi_align;
if (reg_ok) {
// Make sure the type can fit in a register before we try to allocate one.
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
if (abi_size <= ptr_bytes) {
- if (self.register_manager.tryAllocReg(inst, gp)) |reg| {
+ if (self.register_manager.tryAllocReg(maybe_inst, gp)) |reg| {
return MCValue{ .register = reg };
}
}
}
- const stack_offset = try self.allocMem(inst, abi_size, abi_align);
+
+ const stack_offset = try self.allocMem(abi_size, abi_align, maybe_inst);
return MCValue{ .stack_offset = stack_offset };
}
pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void {
- const stack_mcv = try self.allocRegOrMem(inst, false);
+ const stack_mcv = try self.allocRegOrMem(self.air.typeOfIndex(inst), false, inst);
log.debug("spilling {} (%{d}) to stack mcv {any}", .{ reg, inst, stack_mcv });
const reg_mcv = self.getResolvedInstValue(inst);
@@ -976,12 +980,13 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
/// occupied
fn spillCompareFlagsIfOccupied(self: *Self) !void {
if (self.cpsr_flags_inst) |inst_to_save| {
+ const ty = self.air.typeOfIndex(inst_to_save);
const mcv = self.getResolvedInstValue(inst_to_save);
const new_mcv = switch (mcv) {
- .cpsr_flags => try self.allocRegOrMem(inst_to_save, true),
+ .cpsr_flags => try self.allocRegOrMem(ty, true, inst_to_save),
.register_c_flag,
.register_v_flag,
- => try self.allocRegOrMem(inst_to_save, false),
+ => try self.allocRegOrMem(ty, false, inst_to_save),
else => unreachable, // mcv doesn't occupy the compare flags
};
@@ -1112,10 +1117,11 @@ fn truncRegister(
});
}
+/// Asserts that both operand_ty and dest_ty are integer types
fn trunc(
self: *Self,
maybe_inst: ?Air.Inst.Index,
- operand: MCValue,
+ operand_bind: ReadArg.Bind,
operand_ty: Type,
dest_ty: Type,
) !MCValue {
@@ -1123,39 +1129,38 @@ fn trunc(
const info_b = dest_ty.intInfo(self.target.*);
if (info_b.bits <= 32) {
- const operand_reg = switch (operand) {
- .register => |r| r,
- else => operand_reg: {
- if (info_a.bits <= 32) {
- break :operand_reg try self.copyToTmpRegister(operand_ty, operand);
- } else {
- return self.fail("TODO load least significant word into register", .{});
- }
- },
- };
- const operand_reg_lock = self.register_manager.lockReg(operand_reg);
- defer if (operand_reg_lock) |reg| self.register_manager.unlockReg(reg);
+ if (info_a.bits > 32) {
+ return self.fail("TODO load least significant word into register", .{});
+ }
- const dest_reg = if (maybe_inst) |inst| blk: {
- const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ var operand_reg: Register = undefined;
+ var dest_reg: Register = undefined;
- if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
- break :blk operand_reg;
- } else {
- break :blk try self.register_manager.allocReg(inst, gp);
- }
- } else try self.register_manager.allocReg(null, gp);
+ const read_args = [_]ReadArg{
+ .{ .ty = operand_ty, .bind = operand_bind, .class = gp, .reg = &operand_reg },
+ };
+ const write_args = [_]WriteArg{
+ .{ .ty = dest_ty, .bind = .none, .class = gp, .reg = &dest_reg },
+ };
+ try self.allocRegs(
+ &read_args,
+ &write_args,
+ if (maybe_inst) |inst| .{
+ .corresponding_inst = inst,
+ .operand_mapping = &.{0},
+ } else null,
+ );
switch (info_b.bits) {
32 => {
try self.genSetReg(operand_ty, dest_reg, .{ .register = operand_reg });
- return MCValue{ .register = dest_reg };
},
else => {
try self.truncRegister(operand_reg, dest_reg, info_b.signedness, info_b.bits);
- return MCValue{ .register = dest_reg };
},
}
+
+ return MCValue{ .register = dest_reg };
} else {
return self.fail("TODO: truncate to ints > 32 bits", .{});
}
@@ -1163,12 +1168,12 @@ fn trunc(
fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const operand = try self.resolveInst(ty_op.operand);
+ const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand };
const operand_ty = self.air.typeOf(ty_op.operand);
const dest_ty = self.air.typeOfIndex(inst);
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: {
- break :blk try self.trunc(inst, operand, operand_ty, dest_ty);
+ break :blk try self.trunc(inst, operand_bind, operand_ty, dest_ty);
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
@@ -1184,29 +1189,32 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void {
fn airNot(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(ty_op.operand);
+ const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand };
const operand_ty = self.air.typeOf(ty_op.operand);
- switch (operand) {
+ switch (try operand_bind.resolveToMcv(self)) {
.dead => unreachable,
.unreach => unreachable,
.cpsr_flags => |cond| break :result MCValue{ .cpsr_flags = cond.negate() },
else => {
switch (operand_ty.zigTypeTag()) {
.Bool => {
- const op_reg = switch (operand) {
- .register => |r| r,
- else => try self.copyToTmpRegister(operand_ty, operand),
- };
- const op_reg_lock = self.register_manager.lockRegAssumeUnused(op_reg);
- defer self.register_manager.unlockReg(op_reg_lock);
-
- const dest_reg = blk: {
- if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
- break :blk op_reg;
- }
+ var op_reg: Register = undefined;
+ var dest_reg: Register = undefined;
- break :blk try self.register_manager.allocReg(null, gp);
+ const read_args = [_]ReadArg{
+ .{ .ty = operand_ty, .bind = operand_bind, .class = gp, .reg = &op_reg },
};
+ const write_args = [_]WriteArg{
+ .{ .ty = operand_ty, .bind = .none, .class = gp, .reg = &dest_reg },
+ };
+ try self.allocRegs(
+ &read_args,
+ &write_args,
+ ReuseMetadata{
+ .corresponding_inst = inst,
+ .operand_mapping = &.{0},
+ },
+ );
_ = try self.addInst(.{
.tag = .eor,
@@ -1223,26 +1231,28 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
.Int => {
const int_info = operand_ty.intInfo(self.target.*);
if (int_info.bits <= 32) {
- const op_reg = switch (operand) {
- .register => |r| r,
- else => try self.copyToTmpRegister(operand_ty, operand),
- };
- const op_reg_lock = self.register_manager.lockRegAssumeUnused(op_reg);
- defer self.register_manager.unlockReg(op_reg_lock);
+ var op_reg: Register = undefined;
+ var dest_reg: Register = undefined;
- const dest_reg = blk: {
- if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
- break :blk op_reg;
- }
-
- break :blk try self.register_manager.allocReg(null, gp);
+ const read_args = [_]ReadArg{
+ .{ .ty = operand_ty, .bind = operand_bind, .class = gp, .reg = &op_reg },
+ };
+ const write_args = [_]WriteArg{
+ .{ .ty = operand_ty, .bind = .none, .class = gp, .reg = &dest_reg },
};
+ try self.allocRegs(
+ &read_args,
+ &write_args,
+ ReuseMetadata{
+ .corresponding_inst = inst,
+ .operand_mapping = &.{0},
+ },
+ );
_ = try self.addInst(.{
.tag = .mvn,
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = dest_reg,
- .rn = undefined,
.op = Instruction.Operand.reg(op_reg, Instruction.Operand.Shift.none),
} },
});
@@ -1267,11 +1277,11 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
fn minMax(
self: *Self,
tag: Air.Inst.Tag,
- maybe_inst: ?Air.Inst.Index,
- lhs: MCValue,
- rhs: MCValue,
+ lhs_bind: ReadArg.Bind,
+ rhs_bind: ReadArg.Bind,
lhs_ty: Type,
rhs_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
) !MCValue {
switch (lhs_ty.zigTypeTag()) {
.Float => return self.fail("TODO ARM min/max on floats", .{}),
@@ -1281,34 +1291,25 @@ fn minMax(
assert(lhs_ty.eql(rhs_ty, mod));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 32) {
- const lhs_is_register = lhs == .register;
- const rhs_is_register = rhs == .register;
+ var lhs_reg: Register = undefined;
+ var rhs_reg: Register = undefined;
+ var dest_reg: Register = undefined;
- const lhs_reg = switch (lhs) {
- .register => |r| r,
- else => try self.copyToTmpRegister(lhs_ty, lhs),
+ const read_args = [_]ReadArg{
+ .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg },
+ .{ .ty = rhs_ty, .bind = rhs_bind, .class = gp, .reg = &rhs_reg },
};
- const lhs_reg_lock = self.register_manager.lockReg(lhs_reg);
- defer if (lhs_reg_lock) |reg| self.register_manager.unlockReg(reg);
-
- const rhs_reg = switch (rhs) {
- .register => |r| r,
- else => try self.copyToTmpRegister(rhs_ty, rhs),
+ const write_args = [_]WriteArg{
+ .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg },
};
- const rhs_reg_lock = self.register_manager.lockReg(rhs_reg);
- defer if (rhs_reg_lock) |reg| self.register_manager.unlockReg(reg);
-
- const dest_reg = if (maybe_inst) |inst| blk: {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-
- if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) {
- break :blk lhs_reg;
- } else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) {
- break :blk rhs_reg;
- } else {
- break :blk try self.register_manager.allocReg(inst, gp);
- }
- } else try self.register_manager.allocReg(null, gp);
+ try self.allocRegs(
+ &read_args,
+ &write_args,
+ if (maybe_inst) |inst| .{
+ .corresponding_inst = inst,
+ .operand_mapping = &.{ 0, 1 },
+ } else null,
+ );
// lhs == reg should have been checked by airMinMax
//
@@ -1318,7 +1319,13 @@ fn minMax(
// register.
assert(lhs_reg != rhs_reg); // see note above
- _ = try self.binOpRegister(.cmp, .{ .register = lhs_reg }, .{ .register = rhs_reg }, lhs_ty, rhs_ty, null);
+ _ = try self.addInst(.{
+ .tag = .cmp,
+ .data = .{ .r_op_cmp = .{
+ .rn = lhs_reg,
+ .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none),
+ } },
+ });
const cond_choose_lhs: Condition = switch (tag) {
.max => switch (int_info.signedness) {
@@ -1337,9 +1344,8 @@ fn minMax(
_ = try self.addInst(.{
.tag = .mov,
.cond = cond_choose_lhs,
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = dest_reg,
- .rn = .r0,
.op = Instruction.Operand.reg(lhs_reg, Instruction.Operand.Shift.none),
} },
});
@@ -1348,9 +1354,8 @@ fn minMax(
_ = try self.addInst(.{
.tag = .mov,
.cond = cond_choose_rhs,
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = dest_reg,
- .rn = .r0,
.op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none),
} },
});
@@ -1368,15 +1373,17 @@ fn minMax(
fn airMinMax(self: *Self, inst: Air.Inst.Index) !void {
const tag = self.air.instructions.items(.tag)[inst];
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const lhs = try self.resolveInst(bin_op.lhs);
- const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs };
+ const rhs_bind: ReadArg.Bind = .{ .inst = bin_op.rhs };
+
+ const lhs = try self.resolveInst(bin_op.lhs);
if (bin_op.lhs == bin_op.rhs) break :result lhs;
- break :result try self.minMax(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
+ break :result try self.minMax(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst);
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -1390,7 +1397,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
const len = try self.resolveInst(bin_op.rhs);
const len_ty = self.air.typeOf(bin_op.rhs);
- const stack_offset = try self.allocMem(inst, 8, 4);
+ const stack_offset = try self.allocMem(8, 4, inst);
try self.genSetStack(ptr_ty, stack_offset, ptr);
try self.genSetStack(len_ty, stack_offset - 4, len);
break :result MCValue{ .stack_offset = stack_offset };
@@ -1400,38 +1407,65 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const lhs = try self.resolveInst(bin_op.lhs);
- const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
- const result: MCValue = if (self.liveness.isUnused(inst))
- .dead
- else
- try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{
- .lhs = bin_op.lhs,
- .rhs = bin_op.rhs,
- .inst = inst,
- });
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs };
+ const rhs_bind: ReadArg.Bind = .{ .inst = bin_op.rhs };
+
+ break :result switch (tag) {
+ .add => try self.addSub(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+ .sub => try self.addSub(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+
+ .mul => try self.mul(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+
+ .div_float => try self.divFloat(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+
+ .div_trunc => try self.div(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+ .div_floor => try self.div(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+
+ .div_exact => try self.divExact(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+
+ .rem => try self.rem(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+
+ .mod => try self.modulo(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+
+ .addwrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+ .subwrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+ .mulwrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+
+ .bit_and => try self.bitwise(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+ .bit_or => try self.bitwise(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+ .xor => try self.bitwise(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+
+ .shl_exact => try self.shiftExact(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+ .shr_exact => try self.shiftExact(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+
+ .shl => try self.shiftNormal(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+ .shr => try self.shiftNormal(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+
+ .bool_and => try self.booleanOp(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+ .bool_or => try self.booleanOp(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+
+ else => unreachable,
+ };
+ };
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const lhs = try self.resolveInst(bin_op.lhs);
- const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
- const result: MCValue = if (self.liveness.isUnused(inst))
- .dead
- else
- try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{
- .lhs = bin_op.lhs,
- .rhs = bin_op.rhs,
- .inst = inst,
- });
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs };
+ const rhs_bind: ReadArg.Bind = .{ .inst = bin_op.rhs };
+
+ break :result try self.ptrArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst);
+ };
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -1458,8 +1492,8 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const lhs = try self.resolveInst(extra.lhs);
- const rhs = try self.resolveInst(extra.rhs);
+ const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
+ const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
const lhs_ty = self.air.typeOf(extra.lhs);
const rhs_ty = self.air.typeOf(extra.rhs);
@@ -1475,17 +1509,16 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
assert(lhs_ty.eql(rhs_ty, mod));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits < 32) {
- const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
+ const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
try self.spillCompareFlagsIfOccupied();
- self.cpsr_flags_inst = null;
const base_tag: Air.Inst.Tag = switch (tag) {
.add_with_overflow => .add,
.sub_with_overflow => .sub,
else => unreachable,
};
- const dest = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, null);
+ const dest = try self.addSub(base_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, null);
const dest_reg = dest.register;
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
defer self.register_manager.unlockReg(dest_reg_lock);
@@ -1498,25 +1531,34 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
// cmp dest, truncated
- _ = try self.binOp(.cmp_eq, dest, .{ .register = truncated_reg }, Type.usize, Type.usize, null);
+ _ = try self.addInst(.{
+ .tag = .cmp,
+ .data = .{ .r_op_cmp = .{
+ .rn = dest_reg,
+ .op = Instruction.Operand.reg(truncated_reg, Instruction.Operand.Shift.none),
+ } },
+ });
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne });
break :result MCValue{ .stack_offset = stack_offset };
} else if (int_info.bits == 32) {
+ const lhs_immediate = try lhs_bind.resolveToImmediate(self);
+ const rhs_immediate = try rhs_bind.resolveToImmediate(self);
+
// Only say yes if the operation is
// commutative, i.e. we can swap both of the
// operands
const lhs_immediate_ok = switch (tag) {
- .add_with_overflow => lhs == .immediate and Instruction.Operand.fromU32(lhs.immediate) != null,
+ .add_with_overflow => if (lhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false,
.sub_with_overflow => false,
else => unreachable,
};
const rhs_immediate_ok = switch (tag) {
.add_with_overflow,
.sub_with_overflow,
- => rhs == .immediate and Instruction.Operand.fromU32(rhs.immediate) != null,
+ => if (rhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false,
else => unreachable,
};
@@ -1531,12 +1573,12 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const dest = blk: {
if (rhs_immediate_ok) {
- break :blk try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, null);
+ break :blk try self.binOpImmediateNew(mir_tag, lhs_bind, rhs_immediate.?, lhs_ty, false, null);
} else if (lhs_immediate_ok) {
// swap lhs and rhs
- break :blk try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, null);
+ break :blk try self.binOpImmediateNew(mir_tag, rhs_bind, lhs_immediate.?, rhs_ty, true, null);
} else {
- break :blk try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, null);
+ break :blk try self.binOpRegisterNew(mir_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, null);
}
};
@@ -1563,8 +1605,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
const result: MCValue = result: {
- const lhs = try self.resolveInst(extra.lhs);
- const rhs = try self.resolveInst(extra.rhs);
+ const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
+ const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
const lhs_ty = self.air.typeOf(extra.lhs);
const rhs_ty = self.air.typeOf(extra.rhs);
@@ -1580,17 +1622,16 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
assert(lhs_ty.eql(rhs_ty, mod));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 16) {
- const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
+ const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
try self.spillCompareFlagsIfOccupied();
- self.cpsr_flags_inst = null;
const base_tag: Mir.Inst.Tag = switch (int_info.signedness) {
.signed => .smulbb,
.unsigned => .mul,
};
- const dest = try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, null);
+ const dest = try self.binOpRegisterNew(base_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, null);
const dest_reg = dest.register;
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
defer self.register_manager.unlockReg(dest_reg_lock);
@@ -1603,62 +1644,48 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
// cmp dest, truncated
- _ = try self.binOp(.cmp_eq, dest, .{ .register = truncated_reg }, Type.usize, Type.usize, null);
+ _ = try self.addInst(.{
+ .tag = .cmp,
+ .data = .{ .r_op_cmp = .{
+ .rn = dest_reg,
+ .op = Instruction.Operand.reg(truncated_reg, Instruction.Operand.Shift.none),
+ } },
+ });
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne });
break :result MCValue{ .stack_offset = stack_offset };
} else if (int_info.bits <= 32) {
- const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
+ const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
try self.spillCompareFlagsIfOccupied();
- self.cpsr_flags_inst = null;
const base_tag: Mir.Inst.Tag = switch (int_info.signedness) {
.signed => .smull,
.unsigned => .umull,
};
- // TODO extract umull etc. to binOpTwoRegister
- // once MCValue.rr is implemented
- const lhs_is_register = lhs == .register;
- const rhs_is_register = rhs == .register;
-
- const lhs_lock: ?RegisterLock = if (lhs_is_register)
- self.register_manager.lockReg(lhs.register)
- else
- null;
- defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
-
- const lhs_reg = if (lhs_is_register)
- lhs.register
- else
- try self.register_manager.allocReg(null, gp);
- const new_lhs_lock = self.register_manager.lockReg(lhs_reg);
- defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
-
- const rhs_reg = if (rhs_is_register)
- rhs.register
- else
- try self.register_manager.allocReg(null, gp);
- const new_rhs_lock = self.register_manager.lockReg(rhs_reg);
- defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg);
-
- const dest_regs = try self.register_manager.allocRegs(2, .{ null, null }, gp);
- const dest_regs_locks = self.register_manager.lockRegsAssumeUnused(2, dest_regs);
- defer for (dest_regs_locks) |reg| {
- self.register_manager.unlockReg(reg);
- };
- const rdlo = dest_regs[0];
- const rdhi = dest_regs[1];
-
- if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
- if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
+ var lhs_reg: Register = undefined;
+ var rhs_reg: Register = undefined;
+ var rdhi: Register = undefined;
+ var rdlo: Register = undefined;
+ var truncated_reg: Register = undefined;
- const truncated_reg = try self.register_manager.allocReg(null, gp);
- const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg);
- defer self.register_manager.unlockReg(truncated_reg_lock);
+ const read_args = [_]ReadArg{
+ .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg },
+ .{ .ty = rhs_ty, .bind = rhs_bind, .class = gp, .reg = &rhs_reg },
+ };
+ const write_args = [_]WriteArg{
+ .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &rdhi },
+ .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &rdlo },
+ .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &truncated_reg },
+ };
+ try self.allocRegs(
+ &read_args,
+ &write_args,
+ null,
+ );
_ = try self.addInst(.{
.tag = base_tag,
@@ -1677,14 +1704,19 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
// cmp truncated, rdlo
- _ = try self.binOp(.cmp_eq, .{ .register = truncated_reg }, .{ .register = rdlo }, Type.usize, Type.usize, null);
+ _ = try self.addInst(.{
+ .tag = .cmp,
+ .data = .{ .r_op_cmp = .{
+ .rn = truncated_reg,
+ .op = Instruction.Operand.reg(rdlo, Instruction.Operand.Shift.none),
+ } },
+ });
// mov rdlo, #0
_ = try self.addInst(.{
.tag = .mov,
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = rdlo,
- .rn = .r0,
.op = Instruction.Operand.fromU32(0).?,
} },
});
@@ -1693,23 +1725,27 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = try self.addInst(.{
.tag = .mov,
.cond = .ne,
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = rdlo,
- .rn = .r0,
.op = Instruction.Operand.fromU32(1).?,
} },
});
// cmp rdhi, #0
- _ = try self.binOp(.cmp_eq, .{ .register = rdhi }, .{ .immediate = 0 }, Type.usize, Type.usize, null);
+ _ = try self.addInst(.{
+ .tag = .cmp,
+ .data = .{ .r_op_cmp = .{
+ .rn = rdhi,
+ .op = Instruction.Operand.fromU32(0).?,
+ } },
+ });
// movne rdlo, #1
_ = try self.addInst(.{
.tag = .mov,
.cond = .ne,
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = rdlo,
- .rn = .r0,
.op = Instruction.Operand.fromU32(1).?,
} },
});
@@ -1733,8 +1769,6 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
const result: MCValue = result: {
- const lhs = try self.resolveInst(extra.lhs);
- const rhs = try self.resolveInst(extra.rhs);
const lhs_ty = self.air.typeOf(extra.lhs);
const rhs_ty = self.air.typeOf(extra.rhs);
@@ -1748,30 +1782,109 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
.Int => {
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 32) {
- const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
-
- const lhs_lock: ?RegisterLock = if (lhs == .register)
- self.register_manager.lockRegAssumeUnused(lhs.register)
- else
- null;
- defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
+ const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
try self.spillCompareFlagsIfOccupied();
- self.cpsr_flags_inst = null;
- // lsl dest, lhs, rhs
- const dest = try self.binOp(.shl, lhs, rhs, lhs_ty, rhs_ty, null);
- const dest_reg = dest.register;
- const dest_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
- defer self.register_manager.unlockReg(dest_lock);
+ const shr_mir_tag: Mir.Inst.Tag = switch (int_info.signedness) {
+ .signed => Mir.Inst.Tag.asr,
+ .unsigned => Mir.Inst.Tag.lsr,
+ };
- // asr/lsr reconstructed, dest, rhs
- const reconstructed = try self.binOp(.shr, dest, rhs, lhs_ty, rhs_ty, null);
+ var lhs_reg: Register = undefined;
+ var rhs_reg: Register = undefined;
+ var dest_reg: Register = undefined;
+ var reconstructed_reg: Register = undefined;
+
+ const rhs_mcv = try self.resolveInst(extra.rhs);
+ const rhs_immediate_ok = rhs_mcv == .immediate and Instruction.Operand.fromU32(rhs_mcv.immediate) != null;
+
+ const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
+ const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
+
+ if (rhs_immediate_ok) {
+ const read_args = [_]ReadArg{
+ .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg },
+ };
+ const write_args = [_]WriteArg{
+ .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg },
+ .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &reconstructed_reg },
+ };
+ try self.allocRegs(
+ &read_args,
+ &write_args,
+ null,
+ );
+
+ // lsl dest, lhs, rhs
+ _ = try self.addInst(.{
+ .tag = .lsl,
+ .data = .{ .rr_shift = .{
+ .rd = dest_reg,
+ .rm = lhs_reg,
+ .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_mcv.immediate)),
+ } },
+ });
+
+ try self.truncRegister(dest_reg, dest_reg, int_info.signedness, int_info.bits);
+
+ // asr/lsr reconstructed, dest, rhs
+ _ = try self.addInst(.{
+ .tag = shr_mir_tag,
+ .data = .{ .rr_shift = .{
+ .rd = reconstructed_reg,
+ .rm = dest_reg,
+ .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_mcv.immediate)),
+ } },
+ });
+ } else {
+ const read_args = [_]ReadArg{
+ .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg },
+ .{ .ty = rhs_ty, .bind = rhs_bind, .class = gp, .reg = &rhs_reg },
+ };
+ const write_args = [_]WriteArg{
+ .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg },
+ .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &reconstructed_reg },
+ };
+ try self.allocRegs(
+ &read_args,
+ &write_args,
+ null,
+ );
+
+ // lsl dest, lhs, rhs
+ _ = try self.addInst(.{
+ .tag = .lsl,
+ .data = .{ .rr_shift = .{
+ .rd = dest_reg,
+ .rm = lhs_reg,
+ .shift_amount = Instruction.ShiftAmount.reg(rhs_reg),
+ } },
+ });
+
+ try self.truncRegister(dest_reg, dest_reg, int_info.signedness, int_info.bits);
+
+ // asr/lsr reconstructed, dest, rhs
+ _ = try self.addInst(.{
+ .tag = shr_mir_tag,
+ .data = .{ .rr_shift = .{
+ .rd = reconstructed_reg,
+ .rm = dest_reg,
+ .shift_amount = Instruction.ShiftAmount.reg(rhs_reg),
+ } },
+ });
+ }
// cmp lhs, reconstructed
- _ = try self.binOp(.cmp_eq, lhs, reconstructed, lhs_ty, lhs_ty, null);
+ _ = try self.addInst(.{
+ .tag = .cmp,
+ .data = .{ .r_op_cmp = .{
+ .rn = lhs_reg,
+ .op = Instruction.Operand.reg(reconstructed_reg, Instruction.Operand.Shift.none),
+ } },
+ });
- try self.genSetStack(lhs_ty, stack_offset, dest);
+ try self.genSetStack(lhs_ty, stack_offset, .{ .register = dest_reg });
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne });
break :result MCValue{ .stack_offset = stack_offset };
@@ -1826,19 +1939,57 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
}
/// Given an error union, returns the error
-fn errUnionErr(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue {
+fn errUnionErr(
+ self: *Self,
+ error_union_bind: ReadArg.Bind,
+ error_union_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) !MCValue {
const err_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
if (err_ty.errorSetIsEmpty()) {
return MCValue{ .immediate = 0 };
}
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
- return error_union_mcv;
+ return try error_union_bind.resolveToMcv(self);
}
const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*));
- switch (error_union_mcv) {
- .register => return self.fail("TODO errUnionErr for registers", .{}),
+ switch (try error_union_bind.resolveToMcv(self)) {
+ .register => {
+ var operand_reg: Register = undefined;
+ var dest_reg: Register = undefined;
+
+ const read_args = [_]ReadArg{
+ .{ .ty = error_union_ty, .bind = error_union_bind, .class = gp, .reg = &operand_reg },
+ };
+ const write_args = [_]WriteArg{
+ .{ .ty = err_ty, .bind = .none, .class = gp, .reg = &dest_reg },
+ };
+ try self.allocRegs(
+ &read_args,
+ &write_args,
+ if (maybe_inst) |inst| .{
+ .corresponding_inst = inst,
+ .operand_mapping = &.{0},
+ } else null,
+ );
+
+ const err_bit_offset = err_offset * 8;
+ const err_bit_size = @intCast(u32, err_ty.abiSize(self.target.*)) * 8;
+
+ _ = try self.addInst(.{
+ .tag = .ubfx, // errors are unsigned integers
+ .data = .{ .rr_lsb_width = .{
+ .rd = dest_reg,
+ .rn = operand_reg,
+ .lsb = @intCast(u5, err_bit_offset),
+ .width = @intCast(u6, err_bit_size),
+ } },
+ });
+
+ return MCValue{ .register = dest_reg };
+ },
.stack_argument_offset => |off| {
return MCValue{ .stack_argument_offset = off + err_offset };
},
@@ -1855,27 +2006,66 @@ fn errUnionErr(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCV
fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand };
const error_union_ty = self.air.typeOf(ty_op.operand);
- const mcv = try self.resolveInst(ty_op.operand);
- break :result try self.errUnionErr(mcv, error_union_ty);
+
+ break :result try self.errUnionErr(error_union_bind, error_union_ty, inst);
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
/// Given an error union, returns the payload
-fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue {
+fn errUnionPayload(
+ self: *Self,
+ error_union_bind: ReadArg.Bind,
+ error_union_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) !MCValue {
const err_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
if (err_ty.errorSetIsEmpty()) {
- return error_union_mcv;
+ return try error_union_bind.resolveToMcv(self);
}
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
return MCValue.none;
}
const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*));
- switch (error_union_mcv) {
- .register => return self.fail("TODO errUnionPayload for registers", .{}),
+ switch (try error_union_bind.resolveToMcv(self)) {
+ .register => {
+ var operand_reg: Register = undefined;
+ var dest_reg: Register = undefined;
+
+ const read_args = [_]ReadArg{
+ .{ .ty = error_union_ty, .bind = error_union_bind, .class = gp, .reg = &operand_reg },
+ };
+ const write_args = [_]WriteArg{
+ .{ .ty = err_ty, .bind = .none, .class = gp, .reg = &dest_reg },
+ };
+ try self.allocRegs(
+ &read_args,
+ &write_args,
+ if (maybe_inst) |inst| .{
+ .corresponding_inst = inst,
+ .operand_mapping = &.{0},
+ } else null,
+ );
+
+ const payload_bit_offset = payload_offset * 8;
+ const payload_bit_size = @intCast(u32, payload_ty.abiSize(self.target.*)) * 8;
+
+ _ = try self.addInst(.{
+ .tag = if (payload_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx,
+ .data = .{ .rr_lsb_width = .{
+ .rd = dest_reg,
+ .rn = operand_reg,
+ .lsb = @intCast(u5, payload_bit_offset),
+ .width = @intCast(u6, payload_bit_size),
+ } },
+ });
+
+ return MCValue{ .register = dest_reg };
+ },
.stack_argument_offset => |off| {
return MCValue{ .stack_argument_offset = off + payload_offset };
},
@@ -1892,9 +2082,10 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type)
fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand };
const error_union_ty = self.air.typeOf(ty_op.operand);
- const error_union = try self.resolveInst(ty_op.operand);
- break :result try self.errUnionPayload(error_union, error_union_ty);
+
+ break :result try self.errUnionPayload(error_union_bind, error_union_ty, inst);
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -1938,17 +2129,18 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = self.air.getRefType(ty_op.ty);
+ const error_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
const operand = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand;
const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*));
const abi_align = error_union_ty.abiAlignment(self.target.*);
- const stack_offset = @intCast(u32, try self.allocMem(inst, abi_size, abi_align));
+ const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst));
const payload_off = errUnionPayloadOffset(payload_ty, self.target.*);
const err_off = errUnionErrorOffset(payload_ty, self.target.*);
try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand);
- try self.genSetStack(Type.anyerror, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 });
+ try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 });
break :result MCValue{ .stack_offset = stack_offset };
};
@@ -1960,16 +2152,17 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = self.air.getRefType(ty_op.ty);
+ const error_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
const operand = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand;
const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*));
const abi_align = error_union_ty.abiAlignment(self.target.*);
- const stack_offset = @intCast(u32, try self.allocMem(inst, abi_size, abi_align));
+ const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst));
const payload_off = errUnionPayloadOffset(payload_ty, self.target.*);
const err_off = errUnionErrorOffset(payload_ty, self.target.*);
- try self.genSetStack(Type.anyerror, stack_offset - @intCast(u32, err_off), operand);
+ try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand);
try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef);
break :result MCValue{ .stack_offset = stack_offset };
@@ -2008,7 +2201,6 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(ty_op.operand);
switch (mcv) {
- .dead, .unreach => unreachable,
.register => unreachable, // a slice doesn't fit in one register
.stack_argument_offset => |off| {
break :result MCValue{ .stack_argument_offset = off + 4 };
@@ -2019,7 +2211,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
.memory => |addr| {
break :result MCValue{ .memory = addr + 4 };
},
- else => return self.fail("TODO implement slice_len for {}", .{mcv}),
+ else => unreachable, // invalid MCValue for a slice
}
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
@@ -2034,7 +2226,12 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off - 4 };
},
- else => return self.fail("TODO implement ptr_slice_len_ptr for {}", .{mcv}),
+ else => {
+ const lhs_bind: ReadArg.Bind = .{ .mcv = mcv };
+ const rhs_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = 4 } };
+
+ break :result try self.addSub(.add, lhs_bind, rhs_bind, Type.usize, Type.usize, null);
+ },
}
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
@@ -2049,91 +2246,96 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off };
},
- else => return self.fail("TODO implement ptr_slice_ptr_ptr for {}", .{mcv}),
+ else => {
+ if (self.reuseOperand(inst, ty_op.operand, 0, mcv)) {
+ break :result mcv;
+ } else {
+ break :result MCValue{ .register = try self.copyToTmpRegister(Type.usize, mcv) };
+ }
+ },
}
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const is_volatile = false; // TODO
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-
- if (!is_volatile and self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
- const result: MCValue = result: {
- const slice_mcv = try self.resolveInst(bin_op.lhs);
-
- // TODO optimize for the case where the index is a constant,
- // i.e. index_mcv == .immediate
- const index_mcv = try self.resolveInst(bin_op.rhs);
- const index_is_register = index_mcv == .register;
-
- const slice_ty = self.air.typeOf(bin_op.lhs);
- const elem_ty = slice_ty.childType();
- const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
-
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
+fn ptrElemVal(
+ self: *Self,
+ ptr_bind: ReadArg.Bind,
+ index_bind: ReadArg.Bind,
+ ptr_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) !MCValue {
+ const elem_ty = ptr_ty.childType();
+ const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
- const index_lock: ?RegisterLock = if (index_is_register)
- self.register_manager.lockRegAssumeUnused(index_mcv.register)
- else
- null;
- defer if (index_lock) |reg| self.register_manager.unlockReg(reg);
+ switch (elem_size) {
+ 1, 4 => {
+ var base_reg: Register = undefined;
+ var index_reg: Register = undefined;
+ var dest_reg: Register = undefined;
- const base_mcv = slicePtr(slice_mcv);
+ const read_args = [_]ReadArg{
+ .{ .ty = ptr_ty, .bind = ptr_bind, .class = gp, .reg = &base_reg },
+ .{ .ty = Type.usize, .bind = index_bind, .class = gp, .reg = &index_reg },
+ };
+ const write_args = [_]WriteArg{
+ .{ .ty = elem_ty, .bind = .none, .class = gp, .reg = &dest_reg },
+ };
+ try self.allocRegs(
+ &read_args,
+ &write_args,
+ if (maybe_inst) |inst| .{
+ .corresponding_inst = inst,
+ .operand_mapping = &.{ 0, 1 },
+ } else null,
+ );
+
+ const tag: Mir.Inst.Tag = switch (elem_size) {
+ 1 => .ldrb,
+ 4 => .ldr,
+ else => unreachable,
+ };
+ const shift: u5 = switch (elem_size) {
+ 1 => 0,
+ 4 => 2,
+ else => unreachable,
+ };
- switch (elem_size) {
- 1, 4 => {
- const base_reg = switch (base_mcv) {
- .register => |r| r,
- else => try self.copyToTmpRegister(slice_ptr_field_type, base_mcv),
- };
- const base_reg_lock = self.register_manager.lockRegAssumeUnused(base_reg);
- defer self.register_manager.unlockReg(base_reg_lock);
+ _ = try self.addInst(.{
+ .tag = tag,
+ .data = .{ .rr_offset = .{
+ .rt = dest_reg,
+ .rn = base_reg,
+ .offset = .{ .offset = Instruction.Offset.reg(index_reg, .{ .lsl = shift }) },
+ } },
+ });
- const dst_reg = try self.register_manager.allocReg(inst, gp);
- const dst_mcv = MCValue{ .register = dst_reg };
- const dst_reg_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
- defer self.register_manager.unlockReg(dst_reg_lock);
+ return MCValue{ .register = dest_reg };
+ },
+ else => {
+ const addr = try self.ptrArithmetic(.ptr_add, ptr_bind, index_bind, ptr_ty, Type.usize, null);
- const index_reg: Register = switch (index_mcv) {
- .register => |reg| reg,
- else => try self.copyToTmpRegister(Type.usize, index_mcv),
- };
- const index_reg_lock = self.register_manager.lockReg(index_reg);
- defer if (index_reg_lock) |lock| self.register_manager.unlockReg(lock);
+ const dest = try self.allocRegOrMem(elem_ty, true, maybe_inst);
+ try self.load(dest, addr, ptr_ty);
+ return dest;
+ },
+ }
+}
- const tag: Mir.Inst.Tag = switch (elem_size) {
- 1 => .ldrb,
- 4 => .ldr,
- else => unreachable,
- };
- const shift: u5 = switch (elem_size) {
- 1 => 0,
- 4 => 2,
- else => unreachable,
- };
+fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const slice_ty = self.air.typeOf(bin_op.lhs);
+ const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: {
+ var buf: Type.SlicePtrFieldTypeBuffer = undefined;
+ const ptr_ty = slice_ty.slicePtrFieldType(&buf);
- _ = try self.addInst(.{
- .tag = tag,
- .data = .{ .rr_offset = .{
- .rt = dst_reg,
- .rn = base_reg,
- .offset = .{ .offset = Instruction.Offset.reg(index_reg, .{ .lsl = shift }) },
- } },
- });
+ const slice_mcv = try self.resolveInst(bin_op.lhs);
+ const base_mcv = slicePtr(slice_mcv);
- break :result dst_mcv;
- },
- else => {
- const dest = try self.allocRegOrMem(inst, true);
- const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ptr_field_type, Type.usize, null);
- try self.load(dest, addr, slice_ptr_field_type);
+ const base_bind: ReadArg.Bind = .{ .mcv = base_mcv };
+ const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs };
- break :result dest;
- },
- }
+ break :result try self.ptrElemVal(base_bind, index_bind, ptr_ty, inst);
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -2143,27 +2345,94 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const slice_mcv = try self.resolveInst(extra.lhs);
- const index_mcv = try self.resolveInst(extra.rhs);
const base_mcv = slicePtr(slice_mcv);
+ const base_bind: ReadArg.Bind = .{ .mcv = base_mcv };
+ const index_bind: ReadArg.Bind = .{ .inst = extra.rhs };
+
const slice_ty = self.air.typeOf(extra.lhs);
+ const index_ty = self.air.typeOf(extra.rhs);
- const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ty, Type.usize, null);
+ const addr = try self.ptrArithmetic(.ptr_add, base_bind, index_bind, slice_ty, index_ty, null);
break :result addr;
};
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
+fn arrayElemVal(
+ self: *Self,
+ array_bind: ReadArg.Bind,
+ index_bind: ReadArg.Bind,
+ array_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) InnerError!MCValue {
+ const elem_ty = array_ty.childType();
+
+ const mcv = try array_bind.resolveToMcv(self);
+ switch (mcv) {
+ .stack_offset,
+ .memory,
+ .stack_argument_offset,
+ => {
+ const ptr_to_mcv = switch (mcv) {
+ .stack_offset => |off| MCValue{ .ptr_stack_offset = off },
+ .memory => |addr| MCValue{ .immediate = @intCast(u32, addr) },
+ .stack_argument_offset => |off| blk: {
+ const reg = try self.register_manager.allocReg(null, gp);
+
+ _ = try self.addInst(.{
+ .tag = .ldr_ptr_stack_argument,
+ .data = .{ .r_stack_offset = .{
+ .rt = reg,
+ .stack_offset = off,
+ } },
+ });
+
+ break :blk MCValue{ .register = reg };
+ },
+ else => unreachable,
+ };
+ const ptr_to_mcv_lock: ?RegisterLock = switch (ptr_to_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (ptr_to_mcv_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const base_bind: ReadArg.Bind = .{ .mcv = ptr_to_mcv };
+
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = elem_ty,
+ };
+ const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+
+ return try self.ptrElemVal(base_bind, index_bind, ptr_ty, maybe_inst);
+ },
+ else => return self.fail("TODO implement array_elem_val for {}", .{mcv}),
+ }
+}
+
fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement array_elem_val for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const array_bind: ReadArg.Bind = .{ .inst = bin_op.lhs };
+ const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs };
+ const array_ty = self.air.typeOf(bin_op.lhs);
+
+ break :result try self.arrayElemVal(array_bind, index_bind, array_ty, inst);
+ };
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_elem_val for {}", .{self.target.cpu.arch});
+ const ptr_ty = self.air.typeOf(bin_op.lhs);
+ const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: {
+ const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs };
+ const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs };
+
+ break :result try self.ptrElemVal(base_bind, index_bind, ptr_ty, inst);
+ };
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -2171,12 +2440,13 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const ptr_mcv = try self.resolveInst(extra.lhs);
- const index_mcv = try self.resolveInst(extra.rhs);
+ const ptr_bind: ReadArg.Bind = .{ .inst = extra.lhs };
+ const index_bind: ReadArg.Bind = .{ .inst = extra.rhs };
const ptr_ty = self.air.typeOf(extra.lhs);
+ const index_ty = self.air.typeOf(extra.rhs);
- const addr = try self.binOp(.ptr_add, ptr_mcv, index_mcv, ptr_ty, Type.usize, null);
+ const addr = try self.ptrArithmetic(.ptr_add, ptr_bind, index_bind, ptr_ty, index_ty, null);
break :result addr;
};
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
@@ -2240,7 +2510,13 @@ fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
-fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool {
+fn reuseOperand(
+ self: *Self,
+ inst: Air.Inst.Index,
+ operand: Air.Inst.Ref,
+ op_index: Liveness.OperandInt,
+ mcv: MCValue,
+) bool {
if (!self.liveness.operandDies(inst, op_index))
return false;
@@ -2362,16 +2638,18 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
if (self.liveness.isUnused(inst) and !is_volatile)
break :result MCValue.dead;
- const dst_mcv: MCValue = blk: {
- if (self.reuseOperand(inst, ty_op.operand, 0, ptr)) {
+ const dest_mcv: MCValue = blk: {
+ const ptr_fits_dest = elem_ty.abiSize(self.target.*) <= 4;
+ if (ptr_fits_dest and self.reuseOperand(inst, ty_op.operand, 0, ptr)) {
// The MCValue that holds the pointer can be re-used as the value.
break :blk ptr;
} else {
- break :blk try self.allocRegOrMem(inst, true);
+ break :blk try self.allocRegOrMem(elem_ty, true, inst);
}
};
- try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand));
- break :result dst_mcv;
+ try self.load(dest_mcv, ptr, self.air.typeOf(ty_op.operand));
+
+ break :result dest_mcv;
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -2498,26 +2776,10 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
},
else => {
- const offset_reg = try self.copyToTmpRegister(ptr_ty, .{
- .immediate = struct_field_offset,
- });
- const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
- defer self.register_manager.unlockReg(offset_reg_lock);
-
- const addr_reg = try self.copyToTmpRegister(ptr_ty, mcv);
- const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
- defer self.register_manager.unlockReg(addr_reg_lock);
+ const lhs_bind: ReadArg.Bind = .{ .mcv = mcv };
+ const rhs_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = struct_field_offset } };
- const dest = try self.binOp(
- .add,
- .{ .register = addr_reg },
- .{ .register = offset_reg },
- Type.usize,
- Type.usize,
- null,
- );
-
- break :result dest;
+ break :result try self.addSub(.add, lhs_bind, rhs_bind, Type.usize, Type.usize, null);
},
}
};
@@ -2532,6 +2794,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const mcv = try self.resolveInst(operand);
const struct_ty = self.air.typeOf(operand);
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
+ const struct_field_ty = struct_ty.structFieldType(index);
switch (mcv) {
.dead, .unreach => unreachable,
@@ -2569,11 +2832,45 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
} else {
// Copy to new register
const dest_reg = try self.register_manager.allocReg(null, gp);
- try self.genSetReg(struct_ty.structFieldType(index), dest_reg, field);
+ try self.genSetReg(struct_field_ty, dest_reg, field);
break :result MCValue{ .register = dest_reg };
}
},
+ .register => {
+ var operand_reg: Register = undefined;
+ var dest_reg: Register = undefined;
+
+ const read_args = [_]ReadArg{
+ .{ .ty = struct_ty, .bind = .{ .mcv = mcv }, .class = gp, .reg = &operand_reg },
+ };
+ const write_args = [_]WriteArg{
+ .{ .ty = struct_field_ty, .bind = .none, .class = gp, .reg = &dest_reg },
+ };
+ try self.allocRegs(
+ &read_args,
+ &write_args,
+ ReuseMetadata{
+ .corresponding_inst = inst,
+ .operand_mapping = &.{0},
+ },
+ );
+
+ const field_bit_offset = struct_field_offset * 8;
+ const field_bit_size = @intCast(u32, struct_field_ty.abiSize(self.target.*)) * 8;
+
+ _ = try self.addInst(.{
+ .tag = if (struct_field_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx,
+ .data = .{ .rr_lsb_width = .{
+ .rd = dest_reg,
+ .rn = operand_reg,
+ .lsb = @intCast(u5, field_bit_offset),
+ .width = @intCast(u6, field_bit_size),
+ } },
+ });
+
+ break :result MCValue{ .register = dest_reg };
+ },
else => return self.fail("TODO implement codegen struct_field_val for {}", .{mcv}),
}
};
@@ -2583,114 +2880,285 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFieldParentPtr", .{});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const field_ptr = try self.resolveInst(extra.field_ptr);
+ const struct_ty = self.air.getRefType(ty_pl.ty).childType();
+ const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*));
+ switch (field_ptr) {
+ .ptr_stack_offset => |off| {
+ break :result MCValue{ .ptr_stack_offset = off + struct_field_offset };
+ },
+ else => {
+ const lhs_bind: ReadArg.Bind = .{ .mcv = field_ptr };
+ const rhs_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = struct_field_offset } };
+
+ break :result try self.addSub(.sub, lhs_bind, rhs_bind, Type.usize, Type.usize, null);
+ },
+ }
+ };
+ return self.finishAir(inst, result, .{ extra.field_ptr, .none, .none });
}
-/// Allocates a new register. If Inst in non-null, additionally tracks
-/// this register and the corresponding int and removes all previous
-/// tracking. Does not do the actual moving (that is handled by
-/// genSetReg).
-fn prepareNewRegForMoving(
- self: *Self,
- track_inst: ?Air.Inst.Index,
- register_class: RegisterManager.RegisterBitSet,
- mcv: MCValue,
-) !Register {
- const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
- const reg = try self.register_manager.allocReg(track_inst, register_class);
+/// An argument to a Mir instruction which is read (and possibly also
+/// written to) by the respective instruction
+const ReadArg = struct {
+ ty: Type,
+ bind: Bind,
+ class: RegisterManager.RegisterBitSet,
+ reg: *Register,
- if (track_inst) |inst| {
- // Overwrite the MCValue associated with this inst
- branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
+ const Bind = union(enum) {
+ inst: Air.Inst.Ref,
+ mcv: MCValue,
- // If the previous MCValue occupied some space we track, we
- // need to make sure it is marked as free now.
- switch (mcv) {
- .cpsr_flags => {
- assert(self.cpsr_flags_inst.? == inst);
- self.cpsr_flags_inst = null;
- },
- .register => |prev_reg| {
- assert(!self.register_manager.isRegFree(prev_reg));
- self.register_manager.freeReg(prev_reg);
- },
- else => {},
+ fn resolveToMcv(bind: Bind, function: *Self) InnerError!MCValue {
+ return switch (bind) {
+ .inst => |inst| try function.resolveInst(inst),
+ .mcv => |mcv| mcv,
+ };
}
- }
- return reg;
-}
+ fn resolveToImmediate(bind: Bind, function: *Self) InnerError!?u32 {
+ switch (bind) {
+ .inst => |inst| {
+ // TODO resolve independently of inst_table
+ const mcv = try function.resolveInst(inst);
+ switch (mcv) {
+ .immediate => |imm| return imm,
+ else => return null,
+ }
+ },
+ .mcv => |mcv| {
+ switch (mcv) {
+ .immediate => |imm| return imm,
+ else => return null,
+ }
+ },
+ }
+ }
+ };
+};
+
+/// An argument to a Mir instruction which is written to (but not read
+/// from) by the respective instruction
+const WriteArg = struct {
+ ty: Type,
+ bind: Bind,
+ class: RegisterManager.RegisterBitSet,
+ reg: *Register,
+
+ const Bind = union(enum) {
+ reg: Register,
+ none: void,
+ };
+};
+
+/// Holds all data necessary for enabling the potential reuse of
+/// operand registers as destinations
+const ReuseMetadata = struct {
+ corresponding_inst: Air.Inst.Index,
+
+ /// Maps every element index of read_args to the corresponding
+ /// index in the Air instruction
+ ///
+ /// When the order of read_args corresponds exactly to the order
+ /// of the inputs of the Air instruction, this would be e.g.
+ /// &.{ 0, 1 }. However, when the order is not the same or some
+ /// inputs to the Air instruction are omitted (e.g. when they can
+ /// be represented as immediates to the Mir instruction),
+ /// operand_mapping should reflect that fact.
+ operand_mapping: []const Liveness.OperandInt,
+};
-/// Don't call this function directly. Use binOp instead.
+/// Allocate a set of registers for use as arguments for a Mir
+/// instruction
///
-/// Calling this function signals an intention to generate a Mir
-/// instruction of the form
+/// If the Mir instruction these registers are allocated for
+/// corresponds exactly to a single Air instruction, populate
+/// reuse_metadata in order to enable potential reuse of an operand as
+/// the destination (provided that that operand dies in this
+/// instruction).
///
-/// op dest, lhs, rhs
+/// Reusing an operand register as destination is the only time two
+/// arguments may share the same register. In all other cases,
+/// allocRegs guarantees that a register will never be allocated to
+/// more than one argument.
///
-/// Asserts that generating an instruction of that form is possible.
-fn binOpRegister(
+/// Furthermore, allocReg guarantees that all arguments which are
+/// already bound to registers before calling allocRegs will not
+/// change their register binding. This is done by locking these
+/// registers.
+fn allocRegs(
self: *Self,
- mir_tag: Mir.Inst.Tag,
- lhs: MCValue,
- rhs: MCValue,
- lhs_ty: Type,
- rhs_ty: Type,
- metadata: ?BinOpMetadata,
-) !MCValue {
- const lhs_is_register = lhs == .register;
- const rhs_is_register = rhs == .register;
+ read_args: []const ReadArg,
+ write_args: []const WriteArg,
+ reuse_metadata: ?ReuseMetadata,
+) InnerError!void {
+ // Air instructions have exactly one output
+ assert(!(reuse_metadata != null and write_args.len != 1)); // see note above
+
+ // The operand mapping is a 1:1 mapping of read args to their
+ // corresponding operand index in the Air instruction
+ assert(!(reuse_metadata != null and reuse_metadata.?.operand_mapping.len != read_args.len)); // see note above
+
+ const locks = try self.gpa.alloc(?RegisterLock, read_args.len + write_args.len);
+ defer self.gpa.free(locks);
+ const read_locks = locks[0..read_args.len];
+ const write_locks = locks[read_args.len..];
+
+ std.mem.set(?RegisterLock, locks, null);
+ defer for (locks) |lock| {
+ if (lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
+ };
- const lhs_lock: ?RegisterLock = if (lhs_is_register)
- self.register_manager.lockReg(lhs.register)
- else
- null;
- defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
+ // When we reuse a read_arg as a destination, the corresponding
+ // MCValue of the read_arg will be set to .dead. In that case, we
+ // skip allocating this read_arg.
+ var reused_read_arg: ?usize = null;
- const lhs_reg = if (lhs_is_register) lhs.register else blk: {
- const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
- break :inst Air.refToIndex(md.lhs).?;
- } else null;
+ // Lock all args which are already allocated to registers
+ for (read_args) |arg, i| {
+ const mcv = try arg.bind.resolveToMcv(self);
+ if (mcv == .register) {
+ read_locks[i] = self.register_manager.lockReg(mcv.register);
+ }
+ }
- break :blk try self.prepareNewRegForMoving(track_inst, gp, lhs);
- };
- const new_lhs_lock = self.register_manager.lockReg(lhs_reg);
- defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
+ for (write_args) |arg, i| {
+ if (arg.bind == .reg) {
+ write_locks[i] = self.register_manager.lockReg(arg.bind.reg);
+ }
+ }
- const rhs_reg = if (rhs_is_register) rhs.register else blk: {
- const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
- break :inst Air.refToIndex(md.rhs).?;
- } else null;
+ // Allocate registers for all args which aren't allocated to
+ // registers yet
+ for (read_args) |arg, i| {
+ const mcv = try arg.bind.resolveToMcv(self);
+ if (mcv == .register) {
+ arg.reg.* = mcv.register;
+ } else {
+ const track_inst: ?Air.Inst.Index = switch (arg.bind) {
+ .inst => |inst| Air.refToIndex(inst).?,
+ else => null,
+ };
+ arg.reg.* = try self.register_manager.allocReg(track_inst, arg.class);
+ read_locks[i] = self.register_manager.lockReg(arg.reg.*);
+ }
+ }
- break :blk try self.prepareNewRegForMoving(track_inst, gp, rhs);
- };
- const new_rhs_lock = self.register_manager.lockReg(rhs_reg);
- defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg);
-
- const dest_reg = switch (mir_tag) {
- .cmp => .r0, // cmp has no destination regardless
- else => if (metadata) |md| blk: {
- if (lhs_is_register and self.reuseOperand(md.inst, md.lhs, 0, lhs)) {
- break :blk lhs_reg;
- } else if (rhs_is_register and self.reuseOperand(md.inst, md.rhs, 1, rhs)) {
- break :blk rhs_reg;
+ if (reuse_metadata != null) {
+ const inst = reuse_metadata.?.corresponding_inst;
+ const operand_mapping = reuse_metadata.?.operand_mapping;
+ const arg = write_args[0];
+ if (arg.bind == .reg) {
+ arg.reg.* = arg.bind.reg;
+ } else {
+ reuse_operand: for (read_args) |read_arg, i| {
+ if (read_arg.bind == .inst) {
+ const operand = read_arg.bind.inst;
+ const mcv = try self.resolveInst(operand);
+ if (mcv == .register and
+ std.meta.eql(arg.class, read_arg.class) and
+ self.reuseOperand(inst, operand, operand_mapping[i], mcv))
+ {
+ arg.reg.* = mcv.register;
+ write_locks[0] = null;
+ reused_read_arg = i;
+ break :reuse_operand;
+ }
+ }
} else {
- break :blk try self.register_manager.allocReg(md.inst, gp);
+ arg.reg.* = try self.register_manager.allocReg(inst, arg.class);
+ write_locks[0] = self.register_manager.lockReg(arg.reg.*);
}
- } else try self.register_manager.allocReg(null, gp),
- };
+ }
+ } else {
+ for (write_args) |arg, i| {
+ if (arg.bind == .reg) {
+ arg.reg.* = arg.bind.reg;
+ } else {
+ arg.reg.* = try self.register_manager.allocReg(null, arg.class);
+ write_locks[i] = self.register_manager.lockReg(arg.reg.*);
+ }
+ }
+ }
+
+ // For all read_args which need to be moved from non-register to
+ // register, perform the move
+ for (read_args) |arg, i| {
+ if (reused_read_arg) |j| {
+ // Check whether this read_arg was reused
+ if (i == j) continue;
+ }
+
+ const mcv = try arg.bind.resolveToMcv(self);
+ if (mcv != .register) {
+ if (arg.bind == .inst) {
+ const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
+ const inst = Air.refToIndex(arg.bind.inst).?;
+
+ // Overwrite the MCValue associated with this inst
+ branch.inst_table.putAssumeCapacity(inst, .{ .register = arg.reg.* });
+
+ // If the previous MCValue occupied some space we track, we
+ // need to make sure it is marked as free now.
+ switch (mcv) {
+ .cpsr_flags => {
+ assert(self.cpsr_flags_inst.? == inst);
+ self.cpsr_flags_inst = null;
+ },
+ .register => |prev_reg| {
+ assert(!self.register_manager.isRegFree(prev_reg));
+ self.register_manager.freeReg(prev_reg);
+ },
+ else => {},
+ }
+ }
+
+ try self.genSetReg(arg.ty, arg.reg.*, mcv);
+ }
+ }
+}
+
+/// Wrapper around allocRegs and addInst tailored for specific Mir
+/// instructions which are binary operations acting on two registers
+///
+/// Returns the destination register
+fn binOpRegisterNew(
+ self: *Self,
+ mir_tag: Mir.Inst.Tag,
+ lhs_bind: ReadArg.Bind,
+ rhs_bind: ReadArg.Bind,
+ lhs_ty: Type,
+ rhs_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) !MCValue {
+ var lhs_reg: Register = undefined;
+ var rhs_reg: Register = undefined;
+ var dest_reg: Register = undefined;
- if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
- if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
+ const read_args = [_]ReadArg{
+ .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg },
+ .{ .ty = rhs_ty, .bind = rhs_bind, .class = gp, .reg = &rhs_reg },
+ };
+ const write_args = [_]WriteArg{
+ .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg },
+ };
+ try self.allocRegs(
+ &read_args,
+ &write_args,
+ if (maybe_inst) |inst| .{
+ .corresponding_inst = inst,
+ .operand_mapping = &.{ 0, 1 },
+ } else null,
+ );
const mir_data: Mir.Inst.Data = switch (mir_tag) {
.add,
.adds,
.sub,
.subs,
- .cmp,
.@"and",
.orr,
.eor,
@@ -2725,78 +3193,51 @@ fn binOpRegister(
return MCValue{ .register = dest_reg };
}
-/// Don't call this function directly. Use binOp instead.
-///
-/// Calling this function signals an intention to generate a Mir
-/// instruction of the form
-///
-/// op dest, lhs, #rhs_imm
+/// Wrapper around allocRegs and addInst tailored for specific Mir
+/// instructions which are binary operations acting on a register and
+/// an immediate
///
-/// Set lhs_and_rhs_swapped to true iff inst.bin_op.lhs corresponds to
-/// rhs and vice versa. This parameter is only used when maybe_inst !=
-/// null.
-///
-/// Asserts that generating an instruction of that form is possible.
-fn binOpImmediate(
+/// Returns the destination register
+fn binOpImmediateNew(
self: *Self,
mir_tag: Mir.Inst.Tag,
- lhs: MCValue,
- rhs: MCValue,
+ lhs_bind: ReadArg.Bind,
+ rhs_immediate: u32,
lhs_ty: Type,
lhs_and_rhs_swapped: bool,
- metadata: ?BinOpMetadata,
+ maybe_inst: ?Air.Inst.Index,
) !MCValue {
- const lhs_is_register = lhs == .register;
+ var lhs_reg: Register = undefined;
+ var dest_reg: Register = undefined;
- const lhs_lock: ?RegisterLock = if (lhs_is_register)
- self.register_manager.lockReg(lhs.register)
- else
- null;
- defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
-
- const lhs_reg = if (lhs_is_register) lhs.register else blk: {
- const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
- break :inst Air.refToIndex(
- if (lhs_and_rhs_swapped) md.rhs else md.lhs,
- ).?;
- } else null;
-
- break :blk try self.prepareNewRegForMoving(track_inst, gp, lhs);
+ const read_args = [_]ReadArg{
+ .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg },
};
- const new_lhs_lock = self.register_manager.lockReg(lhs_reg);
- defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
-
- const dest_reg = switch (mir_tag) {
- .cmp => .r0, // cmp has no destination reg
- else => if (metadata) |md| blk: {
- if (lhs_is_register and self.reuseOperand(
- md.inst,
- if (lhs_and_rhs_swapped) md.rhs else md.lhs,
- if (lhs_and_rhs_swapped) 1 else 0,
- lhs,
- )) {
- break :blk lhs_reg;
- } else {
- break :blk try self.register_manager.allocReg(md.inst, gp);
- }
- } else try self.register_manager.allocReg(null, gp),
+ const write_args = [_]WriteArg{
+ .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg },
};
-
- if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
+ const operand_mapping: []const Liveness.OperandInt = if (lhs_and_rhs_swapped) &.{1} else &.{0};
+ try self.allocRegs(
+ &read_args,
+ &write_args,
+ if (maybe_inst) |inst| .{
+ .corresponding_inst = inst,
+ .operand_mapping = operand_mapping,
+ } else null,
+ );
const mir_data: Mir.Inst.Data = switch (mir_tag) {
.add,
.adds,
.sub,
.subs,
- .cmp,
.@"and",
.orr,
.eor,
=> .{ .rr_op = .{
.rd = dest_reg,
.rn = lhs_reg,
- .op = Instruction.Operand.fromU32(rhs.immediate).?,
+ .op = Instruction.Operand.fromU32(rhs_immediate).?,
} },
.lsl,
.asr,
@@ -2804,7 +3245,7 @@ fn binOpImmediate(
=> .{ .rr_shift = .{
.rd = dest_reg,
.rm = lhs_reg,
- .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs.immediate)),
+ .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_immediate)),
} },
else => unreachable,
};
@@ -2817,417 +3258,502 @@ fn binOpImmediate(
return MCValue{ .register = dest_reg };
}
-const BinOpMetadata = struct {
- inst: Air.Inst.Index,
- lhs: Air.Inst.Ref,
- rhs: Air.Inst.Ref,
-};
-
-/// For all your binary operation needs, this function will generate
-/// the corresponding Mir instruction(s). Returns the location of the
-/// result.
-///
-/// If the binary operation itself happens to be an Air instruction,
-/// pass the corresponding index in the inst parameter. That helps
-/// this function do stuff like reusing operands.
-///
-/// This function does not do any lowering to Mir itself, but instead
-/// looks at the lhs and rhs and determines which kind of lowering
-/// would be best suitable and then delegates the lowering to other
-/// functions.
-fn binOp(
+fn addSub(
self: *Self,
tag: Air.Inst.Tag,
- lhs: MCValue,
- rhs: MCValue,
+ lhs_bind: ReadArg.Bind,
+ rhs_bind: ReadArg.Bind,
lhs_ty: Type,
rhs_ty: Type,
- metadata: ?BinOpMetadata,
+ maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- switch (tag) {
- .add,
- .sub,
- .cmp_eq,
- => {
- switch (lhs_ty.zigTypeTag()) {
- .Float => return self.fail("TODO ARM binary operations on floats", .{}),
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => {
- const mod = self.bin_file.options.module.?;
- assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
- if (int_info.bits <= 32) {
- // Only say yes if the operation is
- // commutative, i.e. we can swap both of the
- // operands
- const lhs_immediate_ok = switch (tag) {
- .add => lhs == .immediate and Instruction.Operand.fromU32(lhs.immediate) != null,
- .sub,
- .cmp_eq,
- => false,
- else => unreachable,
- };
- const rhs_immediate_ok = switch (tag) {
- .add,
- .sub,
- .cmp_eq,
- => rhs == .immediate and Instruction.Operand.fromU32(rhs.immediate) != null,
- else => unreachable,
- };
+ switch (lhs_ty.zigTypeTag()) {
+ .Float => return self.fail("TODO ARM binary operations on floats", .{}),
+ .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .Int => {
+ const mod = self.bin_file.options.module.?;
+ assert(lhs_ty.eql(rhs_ty, mod));
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
+ const lhs_immediate = try lhs_bind.resolveToImmediate(self);
+ const rhs_immediate = try rhs_bind.resolveToImmediate(self);
+
+ // Only say yes if the operation is
+ // commutative, i.e. we can swap both of the
+ // operands
+ const lhs_immediate_ok = switch (tag) {
+ .add => if (lhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false,
+ .sub => false,
+ else => unreachable,
+ };
+ const rhs_immediate_ok = switch (tag) {
+ .add,
+ .sub,
+ => if (rhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false,
+ else => unreachable,
+ };
- const mir_tag: Mir.Inst.Tag = switch (tag) {
- .add => .add,
- .sub => .sub,
- .cmp_eq => .cmp,
- else => unreachable,
- };
+ const mir_tag: Mir.Inst.Tag = switch (tag) {
+ .add => .add,
+ .sub => .sub,
+ else => unreachable,
+ };
- if (rhs_immediate_ok) {
- return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
- } else if (lhs_immediate_ok) {
- // swap lhs and rhs
- return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata);
- } else {
- return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
- }
- } else {
- return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
- }
- },
- else => unreachable,
- }
- },
- .mul => {
- switch (lhs_ty.zigTypeTag()) {
- .Float => return self.fail("TODO ARM binary operations on floats", .{}),
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => {
- const mod = self.bin_file.options.module.?;
- assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
- if (int_info.bits <= 32) {
- // TODO add optimisations for multiplication
- // with immediates, for example a * 2 can be
- // lowered to a << 1
- return try self.binOpRegister(.mul, lhs, rhs, lhs_ty, rhs_ty, metadata);
- } else {
- return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
- }
- },
- else => unreachable,
- }
- },
- .div_float => {
- switch (lhs_ty.zigTypeTag()) {
- .Float => return self.fail("TODO ARM binary operations on floats", .{}),
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- else => unreachable,
- }
- },
- .div_trunc, .div_floor => {
- switch (lhs_ty.zigTypeTag()) {
- .Float => return self.fail("TODO ARM binary operations on floats", .{}),
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => {
- const mod = self.bin_file.options.module.?;
- assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
- if (int_info.bits <= 32) {
- switch (int_info.signedness) {
- .signed => {
- return self.fail("TODO ARM signed integer division", .{});
- },
- .unsigned => {
- switch (rhs) {
- .immediate => |imm| {
- if (std.math.isPowerOfTwo(imm)) {
- const shift = MCValue{ .immediate = std.math.log2_int(u32, imm) };
- return try self.binOp(.shr, lhs, shift, lhs_ty, rhs_ty, metadata);
- } else {
- return self.fail("TODO ARM integer division by constants", .{});
- }
- },
- else => return self.fail("TODO ARM integer division", .{}),
- }
- },
- }
- } else {
- return self.fail("TODO ARM integer division for integers > u32/i32", .{});
- }
- },
- else => unreachable,
+ if (rhs_immediate_ok) {
+ return try self.binOpImmediateNew(mir_tag, lhs_bind, rhs_immediate.?, lhs_ty, false, maybe_inst);
+ } else if (lhs_immediate_ok) {
+ // swap lhs and rhs
+ return try self.binOpImmediateNew(mir_tag, rhs_bind, lhs_immediate.?, rhs_ty, true, maybe_inst);
+ } else {
+ return try self.binOpRegisterNew(mir_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst);
+ }
+ } else {
+ return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
}
},
- .div_exact => {
- switch (lhs_ty.zigTypeTag()) {
- .Float => return self.fail("TODO ARM binary operations on floats", .{}),
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => return self.fail("TODO ARM div_exact", .{}),
- else => unreachable,
+ else => unreachable,
+ }
+}
+
+fn mul(
+ self: *Self,
+ lhs_bind: ReadArg.Bind,
+ rhs_bind: ReadArg.Bind,
+ lhs_ty: Type,
+ rhs_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) InnerError!MCValue {
+ switch (lhs_ty.zigTypeTag()) {
+ .Float => return self.fail("TODO ARM binary operations on floats", .{}),
+ .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .Int => {
+ const mod = self.bin_file.options.module.?;
+ assert(lhs_ty.eql(rhs_ty, mod));
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
+ // TODO add optimisations for multiplication
+ // with immediates, for example a * 2 can be
+ // lowered to a << 1
+ return try self.binOpRegisterNew(.mul, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst);
+ } else {
+ return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
}
},
- .rem => {
- switch (lhs_ty.zigTypeTag()) {
- .Float => return self.fail("TODO ARM binary operations on floats", .{}),
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => {
- const mod = self.bin_file.options.module.?;
- assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
- if (int_info.bits <= 32) {
- switch (int_info.signedness) {
- .signed => {
- return self.fail("TODO ARM signed integer mod", .{});
- },
- .unsigned => {
- switch (rhs) {
- .immediate => |imm| {
- if (std.math.isPowerOfTwo(imm)) {
- const log2 = std.math.log2_int(u32, imm);
-
- const lhs_is_register = lhs == .register;
-
- const lhs_lock: ?RegisterLock = if (lhs_is_register)
- self.register_manager.lockReg(lhs.register)
- else
- null;
- defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
-
- const lhs_reg = if (lhs_is_register) lhs.register else blk: {
- const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
- break :inst Air.refToIndex(md.lhs).?;
- } else null;
-
- break :blk try self.prepareNewRegForMoving(track_inst, gp, lhs);
- };
- const new_lhs_lock = self.register_manager.lockReg(lhs_reg);
- defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
-
- const dest_reg = if (metadata) |md| blk: {
- if (lhs_is_register and self.reuseOperand(md.inst, md.lhs, 0, lhs)) {
- break :blk lhs_reg;
- } else {
- break :blk try self.register_manager.allocReg(md.inst, gp);
- }
- } else try self.register_manager.allocReg(null, gp);
-
- if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
-
- try self.truncRegister(lhs_reg, dest_reg, int_info.signedness, log2);
- return MCValue{ .register = dest_reg };
- } else {
- return self.fail("TODO ARM integer mod by constants", .{});
- }
- },
- else => return self.fail("TODO ARM integer mod", .{}),
- }
- },
+ else => unreachable,
+ }
+}
+
+fn divFloat(
+ self: *Self,
+ lhs_bind: ReadArg.Bind,
+ rhs_bind: ReadArg.Bind,
+ lhs_ty: Type,
+ rhs_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) InnerError!MCValue {
+ _ = lhs_bind;
+ _ = rhs_bind;
+ _ = lhs_ty;
+ _ = rhs_ty;
+ _ = maybe_inst;
+
+ switch (lhs_ty.zigTypeTag()) {
+ .Float => return self.fail("TODO ARM binary operations on floats", .{}),
+ .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ else => unreachable,
+ }
+}
+
+fn div(
+ self: *Self,
+ tag: Air.Inst.Tag,
+ lhs_bind: ReadArg.Bind,
+ rhs_bind: ReadArg.Bind,
+ lhs_ty: Type,
+ rhs_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) InnerError!MCValue {
+ _ = tag;
+
+ switch (lhs_ty.zigTypeTag()) {
+ .Float => return self.fail("TODO ARM binary operations on floats", .{}),
+ .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .Int => {
+ const mod = self.bin_file.options.module.?;
+ assert(lhs_ty.eql(rhs_ty, mod));
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
+ switch (int_info.signedness) {
+ .signed => {
+ return self.fail("TODO ARM signed integer division", .{});
+ },
+ .unsigned => {
+ const rhs_immediate = try rhs_bind.resolveToImmediate(self);
+
+ if (rhs_immediate) |imm| {
+ if (std.math.isPowerOfTwo(imm)) {
+ const shift = std.math.log2_int(u32, imm);
+ return try self.binOpImmediateNew(.lsr, lhs_bind, shift, lhs_ty, false, maybe_inst);
+ } else {
+ return self.fail("TODO ARM integer division by constants", .{});
+ }
+ } else {
+ return self.fail("TODO ARM integer division", .{});
}
- } else {
- return self.fail("TODO ARM integer division for integers > u32/i32", .{});
- }
- },
- else => unreachable,
- }
- },
- .mod => {
- switch (lhs_ty.zigTypeTag()) {
- .Float => return self.fail("TODO ARM binary operations on floats", .{}),
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => return self.fail("TODO ARM mod", .{}),
- else => unreachable,
+ },
+ }
+ } else {
+ return self.fail("TODO ARM integer division for integers > u32/i32", .{});
}
},
- .addwrap,
- .subwrap,
- .mulwrap,
- => {
- const base_tag: Air.Inst.Tag = switch (tag) {
- .addwrap => .add,
- .subwrap => .sub,
- .mulwrap => .mul,
- else => unreachable,
- };
+ else => unreachable,
+ }
+}
- // Generate an add/sub/mul
- const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
-
- // Truncate if necessary
- switch (lhs_ty.zigTypeTag()) {
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => {
- const int_info = lhs_ty.intInfo(self.target.*);
- if (int_info.bits <= 32) {
- const result_reg = result.register;
-
- if (int_info.bits < 32) {
- try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits);
- return result;
- } else return result;
- } else {
- return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
- }
- },
- else => unreachable,
- }
- },
- .bit_and,
- .bit_or,
- .xor,
- => {
- switch (lhs_ty.zigTypeTag()) {
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => {
- const mod = self.bin_file.options.module.?;
- assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
- if (int_info.bits <= 32) {
- const lhs_immediate_ok = lhs == .immediate and Instruction.Operand.fromU32(lhs.immediate) != null;
- const rhs_immediate_ok = rhs == .immediate and Instruction.Operand.fromU32(rhs.immediate) != null;
-
- const mir_tag: Mir.Inst.Tag = switch (tag) {
- .bit_and => .@"and",
- .bit_or => .orr,
- .xor => .eor,
- else => unreachable,
- };
+fn divExact(
+ self: *Self,
+ lhs_bind: ReadArg.Bind,
+ rhs_bind: ReadArg.Bind,
+ lhs_ty: Type,
+ rhs_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) InnerError!MCValue {
+ _ = lhs_bind;
+ _ = rhs_bind;
+ _ = lhs_ty;
+ _ = rhs_ty;
+ _ = maybe_inst;
- if (rhs_immediate_ok) {
- return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
- } else if (lhs_immediate_ok) {
- // swap lhs and rhs
- return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata);
+ switch (lhs_ty.zigTypeTag()) {
+ .Float => return self.fail("TODO ARM binary operations on floats", .{}),
+ .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .Int => return self.fail("TODO ARM div_exact", .{}),
+ else => unreachable,
+ }
+}
+
+fn rem(
+ self: *Self,
+ lhs_bind: ReadArg.Bind,
+ rhs_bind: ReadArg.Bind,
+ lhs_ty: Type,
+ rhs_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) InnerError!MCValue {
+ switch (lhs_ty.zigTypeTag()) {
+ .Float => return self.fail("TODO ARM binary operations on floats", .{}),
+ .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .Int => {
+ const mod = self.bin_file.options.module.?;
+ assert(lhs_ty.eql(rhs_ty, mod));
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
+ switch (int_info.signedness) {
+ .signed => {
+ return self.fail("TODO ARM signed integer mod", .{});
+ },
+ .unsigned => {
+ const rhs_immediate = try rhs_bind.resolveToImmediate(self);
+
+ if (rhs_immediate) |imm| {
+ if (std.math.isPowerOfTwo(imm)) {
+ const log2 = std.math.log2_int(u32, imm);
+
+ var lhs_reg: Register = undefined;
+ var dest_reg: Register = undefined;
+
+ const read_args = [_]ReadArg{
+ .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg },
+ };
+ const write_args = [_]WriteArg{
+ .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg },
+ };
+ try self.allocRegs(
+ &read_args,
+ &write_args,
+ if (maybe_inst) |inst| .{
+ .corresponding_inst = inst,
+ .operand_mapping = &.{0},
+ } else null,
+ );
+
+ try self.truncRegister(lhs_reg, dest_reg, int_info.signedness, log2);
+
+ return MCValue{ .register = dest_reg };
+ } else {
+ return self.fail("TODO ARM integer mod by constants", .{});
+ }
} else {
- return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
+ return self.fail("TODO ARM integer mod", .{});
}
- } else {
- return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
- }
- },
- else => unreachable,
+ },
+ }
+ } else {
+ return self.fail("TODO ARM integer division for integers > u32/i32", .{});
}
},
- .shl_exact,
- .shr_exact,
- => {
- switch (lhs_ty.zigTypeTag()) {
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => {
- const int_info = lhs_ty.intInfo(self.target.*);
- if (int_info.bits <= 32) {
- const rhs_immediate_ok = rhs == .immediate;
-
- const mir_tag: Mir.Inst.Tag = switch (tag) {
- .shl_exact => .lsl,
- .shr_exact => switch (lhs_ty.intInfo(self.target.*).signedness) {
- .signed => Mir.Inst.Tag.asr,
- .unsigned => Mir.Inst.Tag.lsr,
- },
- else => unreachable,
- };
+ else => unreachable,
+ }
+}
- if (rhs_immediate_ok) {
- return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
- } else {
- return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
- }
- } else {
- return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
- }
- },
- else => unreachable,
+fn modulo(
+ self: *Self,
+ lhs_bind: ReadArg.Bind,
+ rhs_bind: ReadArg.Bind,
+ lhs_ty: Type,
+ rhs_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) InnerError!MCValue {
+ _ = lhs_bind;
+ _ = rhs_bind;
+ _ = lhs_ty;
+ _ = rhs_ty;
+ _ = maybe_inst;
+
+ switch (lhs_ty.zigTypeTag()) {
+ .Float => return self.fail("TODO ARM binary operations on floats", .{}),
+ .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .Int => return self.fail("TODO ARM mod", .{}),
+ else => unreachable,
+ }
+}
+
+fn wrappingArithmetic(
+ self: *Self,
+ tag: Air.Inst.Tag,
+ lhs_bind: ReadArg.Bind,
+ rhs_bind: ReadArg.Bind,
+ lhs_ty: Type,
+ rhs_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) InnerError!MCValue {
+ const base_tag: Air.Inst.Tag = switch (tag) {
+ .addwrap => .add,
+ .subwrap => .sub,
+ .mulwrap => .mul,
+ else => unreachable,
+ };
+
+ // Generate an add/sub/mul
+ const result = try self.addSub(base_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst);
+
+ // Truncate if necessary
+ switch (lhs_ty.zigTypeTag()) {
+ .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .Int => {
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
+ const result_reg = result.register;
+
+ if (int_info.bits < 32) {
+ try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits);
+ return result;
+ } else return result;
+ } else {
+ return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
}
},
- .shl,
- .shr,
- => {
- const base_tag: Air.Inst.Tag = switch (tag) {
- .shl => .shl_exact,
- .shr => .shr_exact,
- else => unreachable,
- };
+ else => unreachable,
+ }
+}
- // Generate a shl_exact/shr_exact
- const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
+fn bitwise(
+ self: *Self,
+ tag: Air.Inst.Tag,
+ lhs_bind: ReadArg.Bind,
+ rhs_bind: ReadArg.Bind,
+ lhs_ty: Type,
+ rhs_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) InnerError!MCValue {
+ switch (lhs_ty.zigTypeTag()) {
+ .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .Int => {
+ const mod = self.bin_file.options.module.?;
+ assert(lhs_ty.eql(rhs_ty, mod));
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
+ const lhs_immediate = try lhs_bind.resolveToImmediate(self);
+ const rhs_immediate = try rhs_bind.resolveToImmediate(self);
- // Truncate if necessary
- switch (tag) {
- .shr => return result,
- .shl => switch (lhs_ty.zigTypeTag()) {
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => {
- const int_info = lhs_ty.intInfo(self.target.*);
- if (int_info.bits <= 32) {
- const result_reg = result.register;
+ const lhs_immediate_ok = if (lhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false;
+ const rhs_immediate_ok = if (rhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false;
- if (int_info.bits < 32) {
- try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits);
- return result;
- } else return result;
- } else {
- return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
- }
+ const mir_tag: Mir.Inst.Tag = switch (tag) {
+ .bit_and => .@"and",
+ .bit_or => .orr,
+ .xor => .eor,
+ else => unreachable,
+ };
+
+ if (rhs_immediate_ok) {
+ return try self.binOpImmediateNew(mir_tag, lhs_bind, rhs_immediate.?, lhs_ty, false, maybe_inst);
+ } else if (lhs_immediate_ok) {
+ // swap lhs and rhs
+ return try self.binOpImmediateNew(mir_tag, rhs_bind, lhs_immediate.?, rhs_ty, true, maybe_inst);
+ } else {
+ return try self.binOpRegisterNew(mir_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst);
+ }
+ } else {
+ return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
+ }
+ },
+ else => unreachable,
+ }
+}
+
+fn shiftExact(
+ self: *Self,
+ tag: Air.Inst.Tag,
+ lhs_bind: ReadArg.Bind,
+ rhs_bind: ReadArg.Bind,
+ lhs_ty: Type,
+ rhs_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) InnerError!MCValue {
+ switch (lhs_ty.zigTypeTag()) {
+ .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .Int => {
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
+ const rhs_immediate = try rhs_bind.resolveToImmediate(self);
+
+ const mir_tag: Mir.Inst.Tag = switch (tag) {
+ .shl_exact => .lsl,
+ .shr_exact => switch (lhs_ty.intInfo(self.target.*).signedness) {
+ .signed => Mir.Inst.Tag.asr,
+ .unsigned => Mir.Inst.Tag.lsr,
},
else => unreachable,
- },
- else => unreachable,
+ };
+
+ if (rhs_immediate) |imm| {
+ return try self.binOpImmediateNew(mir_tag, lhs_bind, imm, lhs_ty, false, maybe_inst);
+ } else {
+ return try self.binOpRegisterNew(mir_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst);
+ }
+ } else {
+ return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
}
},
- .bool_and,
- .bool_or,
- => {
- switch (lhs_ty.zigTypeTag()) {
- .Bool => {
- const lhs_immediate_ok = lhs == .immediate;
- const rhs_immediate_ok = rhs == .immediate;
+ else => unreachable,
+ }
+}
- const mir_tag: Mir.Inst.Tag = switch (tag) {
- .bool_and => .@"and",
- .bool_or => .orr,
- else => unreachable,
- };
+fn shiftNormal(
+ self: *Self,
+ tag: Air.Inst.Tag,
+ lhs_bind: ReadArg.Bind,
+ rhs_bind: ReadArg.Bind,
+ lhs_ty: Type,
+ rhs_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) InnerError!MCValue {
+ const base_tag: Air.Inst.Tag = switch (tag) {
+ .shl => .shl_exact,
+ .shr => .shr_exact,
+ else => unreachable,
+ };
- if (rhs_immediate_ok) {
- return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
- } else if (lhs_immediate_ok) {
- // swap lhs and rhs
- return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata);
- } else {
- return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
- }
- },
+ // Generate a shl_exact/shr_exact
+ const result = try self.shiftExact(base_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst);
+
+ // Truncate if necessary
+ switch (tag) {
+ .shr => return result,
+ .shl => switch (lhs_ty.zigTypeTag()) {
+ .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .Int => {
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
+ const result_reg = result.register;
+
+ if (int_info.bits < 32) {
+ try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits);
+ return result;
+ } else return result;
+ } else {
+ return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
+ }
+ },
+ else => unreachable,
+ },
+ else => unreachable,
+ }
+}
+
+fn booleanOp(
+ self: *Self,
+ tag: Air.Inst.Tag,
+ lhs_bind: ReadArg.Bind,
+ rhs_bind: ReadArg.Bind,
+ lhs_ty: Type,
+ rhs_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) InnerError!MCValue {
+ switch (lhs_ty.zigTypeTag()) {
+ .Bool => {
+ const lhs_immediate = try lhs_bind.resolveToImmediate(self);
+ const rhs_immediate = try rhs_bind.resolveToImmediate(self);
+
+ const mir_tag: Mir.Inst.Tag = switch (tag) {
+ .bool_and => .@"and",
+ .bool_or => .orr,
else => unreachable,
+ };
+
+ if (rhs_immediate) |imm| {
+ return try self.binOpImmediateNew(mir_tag, lhs_bind, imm, lhs_ty, false, maybe_inst);
+ } else if (lhs_immediate) |imm| {
+ // swap lhs and rhs
+ return try self.binOpImmediateNew(mir_tag, rhs_bind, imm, rhs_ty, true, maybe_inst);
+ } else {
+ return try self.binOpRegisterNew(mir_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst);
}
},
- .ptr_add,
- .ptr_sub,
- => {
- switch (lhs_ty.zigTypeTag()) {
- .Pointer => {
- const ptr_ty = lhs_ty;
- const elem_ty = switch (ptr_ty.ptrSize()) {
- .One => ptr_ty.childType().childType(), // ptr to array, so get array element type
- else => ptr_ty.childType(),
- };
- const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
+ else => unreachable,
+ }
+}
- if (elem_size == 1) {
- const base_tag: Mir.Inst.Tag = switch (tag) {
- .ptr_add => .add,
- .ptr_sub => .sub,
- else => unreachable,
- };
+fn ptrArithmetic(
+ self: *Self,
+ tag: Air.Inst.Tag,
+ lhs_bind: ReadArg.Bind,
+ rhs_bind: ReadArg.Bind,
+ lhs_ty: Type,
+ rhs_ty: Type,
+ maybe_inst: ?Air.Inst.Index,
+) InnerError!MCValue {
+ switch (lhs_ty.zigTypeTag()) {
+ .Pointer => {
+ const mod = self.bin_file.options.module.?;
+ assert(rhs_ty.eql(Type.usize, mod));
- return try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
- } else {
- // convert the offset into a byte offset by
- // multiplying it with elem_size
- const offset = try self.binOp(.mul, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize, null);
- const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null);
- return addr;
- }
- },
+ const ptr_ty = lhs_ty;
+ const elem_ty = switch (ptr_ty.ptrSize()) {
+ .One => ptr_ty.childType().childType(), // ptr to array, so get array element type
+ else => ptr_ty.childType(),
+ };
+ const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
+
+ const base_tag: Air.Inst.Tag = switch (tag) {
+ .ptr_add => .add,
+ .ptr_sub => .sub,
else => unreachable,
+ };
+
+ if (elem_size == 1) {
+ return try self.addSub(base_tag, lhs_bind, rhs_bind, Type.usize, Type.usize, maybe_inst);
+ } else {
+ // convert the offset into a byte offset by
+ // multiplying it with elem_size
+ const imm_bind = ReadArg.Bind{ .mcv = .{ .immediate = elem_size } };
+
+ const offset = try self.mul(rhs_bind, imm_bind, Type.usize, Type.usize, null);
+ const offset_bind = ReadArg.Bind{ .mcv = offset };
+
+ const addr = try self.addSub(base_tag, lhs_bind, offset_bind, Type.usize, Type.usize, null);
+ return addr;
}
},
else => unreachable,
@@ -3312,9 +3838,8 @@ fn genInlineMemcpy(
// mov count, #0
_ = try self.addInst(.{
.tag = .mov,
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = count,
- .rn = .r0,
.op = Instruction.Operand.imm(0, 0),
} },
});
@@ -3323,8 +3848,7 @@ fn genInlineMemcpy(
// cmp count, len
_ = try self.addInst(.{
.tag = .cmp,
- .data = .{ .rr_op = .{
- .rd = .r0,
+ .data = .{ .r_op_cmp = .{
.rn = count,
.op = Instruction.Operand.reg(len, Instruction.Operand.Shift.none),
} },
@@ -3418,9 +3942,8 @@ fn genInlineMemsetCode(
// mov count, #0
_ = try self.addInst(.{
.tag = .mov,
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = count,
- .rn = .r0,
.op = Instruction.Operand.imm(0, 0),
} },
});
@@ -3429,8 +3952,7 @@ fn genInlineMemsetCode(
// cmp count, len
_ = try self.addInst(.{
.tag = .cmp,
- .data = .{ .rr_op = .{
- .rd = .r0,
+ .data = .{ .r_op_cmp = .{
.rn = count,
.op = Instruction.Operand.reg(len, Instruction.Operand.Shift.none),
} },
@@ -3568,7 +4090,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
.register => |reg| blk: {
const abi_size = @intCast(u32, ty.abiSize(self.target.*));
const abi_align = ty.abiAlignment(self.target.*);
- const stack_offset = try self.allocMem(inst, abi_size, abi_align);
+ const stack_offset = try self.allocMem(abi_size, abi_align, inst);
try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
break :blk MCValue{ .stack_offset = stack_offset };
@@ -3655,7 +4177,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const ret_ty = fn_ty.fnReturnType();
const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*));
const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*));
- const stack_offset = try self.allocMem(inst, ret_abi_size, ret_abi_align);
+ const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
var ptr_ty_payload: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
@@ -3843,14 +4365,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
const abi_size = @intCast(u32, ret_ty.abiSize(self.target.*));
const abi_align = ret_ty.abiAlignment(self.target.*);
- // This is essentially allocMem without the
- // instruction tracking
- if (abi_align > self.stack_align)
- self.stack_align = abi_align;
- // TODO find a free slot instead of always appending
- const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size;
- self.next_stack_offset = offset;
- self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset);
+ const offset = try self.allocMem(abi_size, abi_align, null);
const tmp_mcv = MCValue{ .stack_offset = offset };
try self.load(tmp_mcv, ptr, ptr_ty);
@@ -3871,32 +4386,16 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const lhs_ty = self.air.typeOf(bin_op.lhs);
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: {
- const operands: BinOpOperands = .{ .inst = .{
- .inst = inst,
- .lhs = bin_op.lhs,
- .rhs = bin_op.rhs,
- } };
- break :blk try self.cmp(operands, lhs_ty, op);
+ break :blk try self.cmp(.{ .inst = bin_op.lhs }, .{ .inst = bin_op.rhs }, lhs_ty, op);
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-const BinOpOperands = union(enum) {
- inst: struct {
- inst: Air.Inst.Index,
- lhs: Air.Inst.Ref,
- rhs: Air.Inst.Ref,
- },
- mcv: struct {
- lhs: MCValue,
- rhs: MCValue,
- },
-};
-
fn cmp(
self: *Self,
- operands: BinOpOperands,
+ lhs: ReadArg.Bind,
+ rhs: ReadArg.Bind,
lhs_ty: Type,
op: math.CompareOperator,
) !MCValue {
@@ -3926,22 +4425,47 @@ fn cmp(
if (int_info.bits <= 32) {
try self.spillCompareFlagsIfOccupied();
- switch (operands) {
- .inst => |inst_op| {
- const metadata: BinOpMetadata = .{
- .inst = inst_op.inst,
- .lhs = inst_op.lhs,
- .rhs = inst_op.rhs,
- };
- const lhs = try self.resolveInst(inst_op.lhs);
- const rhs = try self.resolveInst(inst_op.rhs);
+ var lhs_reg: Register = undefined;
+ var rhs_reg: Register = undefined;
- self.cpsr_flags_inst = inst_op.inst;
- _ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, metadata);
- },
- .mcv => |mcv_op| {
- _ = try self.binOp(.cmp_eq, mcv_op.lhs, mcv_op.rhs, int_ty, int_ty, null);
- },
+ const rhs_immediate = try rhs.resolveToImmediate(self);
+ const rhs_immediate_ok = if (rhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false;
+
+ if (rhs_immediate_ok) {
+ const read_args = [_]ReadArg{
+ .{ .ty = int_ty, .bind = lhs, .class = gp, .reg = &lhs_reg },
+ };
+ try self.allocRegs(
+ &read_args,
+ &.{},
+ null, // we won't be able to reuse a register as there are no write_regs
+ );
+
+ _ = try self.addInst(.{
+ .tag = .cmp,
+ .data = .{ .r_op_cmp = .{
+ .rn = lhs_reg,
+ .op = Instruction.Operand.fromU32(rhs_immediate.?).?,
+ } },
+ });
+ } else {
+ const read_args = [_]ReadArg{
+ .{ .ty = int_ty, .bind = lhs, .class = gp, .reg = &lhs_reg },
+ .{ .ty = int_ty, .bind = rhs, .class = gp, .reg = &rhs_reg },
+ };
+ try self.allocRegs(
+ &read_args,
+ &.{},
+ null, // we won't be able to reuse a register as there are no write_regs
+ );
+
+ _ = try self.addInst(.{
+ .tag = .cmp,
+ .data = .{ .r_op_cmp = .{
+ .rn = lhs_reg,
+ .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none),
+ } },
+ });
}
return switch (int_info.signedness) {
@@ -4020,9 +4544,7 @@ fn condBr(self: *Self, condition: MCValue) !Mir.Inst.Index {
// bne ...
_ = try self.addInst(.{
.tag = .cmp,
- .cond = .al,
- .data = .{ .rr_op = .{
- .rd = .r0,
+ .data = .{ .r_op_cmp = .{
.rn = reg,
.op = Instruction.Operand.imm(1, 0),
} },
@@ -4132,7 +4654,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
if (else_value == .dead)
continue;
// The instruction is only overridden in the else branch.
- var i: usize = self.branch_stack.items.len - 2;
+ var i: usize = self.branch_stack.items.len - 1;
while (true) {
i -= 1; // If this overflows, the question is: why wasn't the instruction marked dead?
if (self.branch_stack.items[i].inst_table.get(else_key)) |mcv| {
@@ -4159,7 +4681,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
if (then_value == .dead)
continue;
const parent_mcv = blk: {
- var i: usize = self.branch_stack.items.len - 2;
+ var i: usize = self.branch_stack.items.len - 1;
while (true) {
i -= 1;
if (self.branch_stack.items[i].inst_table.get(then_key)) |mcv| {
@@ -4185,75 +4707,39 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, .unreach, .{ .none, .none, .none });
}
-fn isNull(self: *Self, ty: Type, operand: MCValue) !MCValue {
- if (ty.isPtrLikeOptional()) {
- assert(ty.abiSize(self.target.*) == 4);
-
- const reg_mcv: MCValue = switch (operand) {
- .register => operand,
- else => .{ .register = try self.copyToTmpRegister(ty, operand) },
- };
-
- _ = try self.addInst(.{
- .tag = .cmp,
- .data = .{ .rr_op = .{
- .rd = undefined,
- .rn = reg_mcv.register,
- .op = Instruction.Operand.fromU32(0).?,
- } },
- });
+fn isNull(
+ self: *Self,
+ operand_bind: ReadArg.Bind,
+ operand_ty: Type,
+) !MCValue {
+ if (operand_ty.isPtrLikeOptional()) {
+ assert(operand_ty.abiSize(self.target.*) == 4);
- return MCValue{ .cpsr_flags = .eq };
+ const imm_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = 0 } };
+ return self.cmp(operand_bind, imm_bind, Type.usize, .eq);
} else {
return self.fail("TODO implement non-pointer optionals", .{});
}
}
-fn isNonNull(self: *Self, ty: Type, operand: MCValue) !MCValue {
- const is_null_result = try self.isNull(ty, operand);
+fn isNonNull(
+ self: *Self,
+ operand_bind: ReadArg.Bind,
+ operand_ty: Type,
+) !MCValue {
+ const is_null_result = try self.isNull(operand_bind, operand_ty);
assert(is_null_result.cpsr_flags == .eq);
return MCValue{ .cpsr_flags = .ne };
}
-fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
- const error_type = ty.errorUnionSet();
- const error_int_type = Type.initTag(.u16);
-
- if (error_type.errorSetIsEmpty()) {
- return MCValue{ .immediate = 0 }; // always false
- }
-
- const error_mcv = try self.errUnionErr(operand, ty);
- _ = try self.binOp(.cmp_eq, error_mcv, .{ .immediate = 0 }, error_int_type, error_int_type, null);
- return MCValue{ .cpsr_flags = .hi };
-}
-
-fn isNonErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
- const is_err_result = try self.isErr(ty, operand);
- switch (is_err_result) {
- .cpsr_flags => |cond| {
- assert(cond == .hi);
- return MCValue{ .cpsr_flags = cond.negate() };
- },
- .immediate => |imm| {
- assert(imm == 0);
- return MCValue{ .immediate = 1 };
- },
- else => unreachable,
- }
-}
-
fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
-
- try self.spillCompareFlagsIfOccupied();
- self.cpsr_flags_inst = inst;
-
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result try self.isNull(ty, operand);
+ const operand_bind: ReadArg.Bind = .{ .inst = un_op };
+ const operand_ty = self.air.typeOf(un_op);
+
+ break :result try self.isNull(operand_bind, operand_ty);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -4263,16 +4749,12 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
const ptr_ty = self.air.typeOf(un_op);
- const operand: MCValue = blk: {
- if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
- // The MCValue that holds the pointer can be re-used as the value.
- break :blk operand_ptr;
- } else {
- break :blk try self.allocRegOrMem(inst, true);
- }
- };
+ const elem_ty = ptr_ty.elemType();
+
+ const operand = try self.allocRegOrMem(elem_ty, true, null);
try self.load(operand, operand_ptr, ptr_ty);
- break :result try self.isNull(ptr_ty.elemType(), operand);
+
+ break :result try self.isNull(.{ .mcv = operand }, elem_ty);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -4280,9 +4762,10 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result try self.isNonNull(ty, operand);
+ const operand_bind: ReadArg.Bind = .{ .inst = un_op };
+ const operand_ty = self.air.typeOf(un_op);
+
+ break :result try self.isNonNull(operand_bind, operand_ty);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -4292,26 +4775,57 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
const ptr_ty = self.air.typeOf(un_op);
- const operand: MCValue = blk: {
- if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
- // The MCValue that holds the pointer can be re-used as the value.
- break :blk operand_ptr;
- } else {
- break :blk try self.allocRegOrMem(inst, true);
- }
- };
+ const elem_ty = ptr_ty.elemType();
+
+ const operand = try self.allocRegOrMem(elem_ty, true, null);
try self.load(operand, operand_ptr, ptr_ty);
- break :result try self.isNonNull(ptr_ty.elemType(), operand);
+
+ break :result try self.isNonNull(.{ .mcv = operand }, elem_ty);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
+fn isErr(
+ self: *Self,
+ error_union_bind: ReadArg.Bind,
+ error_union_ty: Type,
+) !MCValue {
+ const error_type = error_union_ty.errorUnionSet();
+
+ if (error_type.errorSetIsEmpty()) {
+ return MCValue{ .immediate = 0 }; // always false
+ }
+
+ const error_mcv = try self.errUnionErr(error_union_bind, error_union_ty, null);
+ return try self.cmp(.{ .mcv = error_mcv }, .{ .mcv = .{ .immediate = 0 } }, error_type, .gt);
+}
+
+fn isNonErr(
+ self: *Self,
+ error_union_bind: ReadArg.Bind,
+ error_union_ty: Type,
+) !MCValue {
+ const is_err_result = try self.isErr(error_union_bind, error_union_ty);
+ switch (is_err_result) {
+ .cpsr_flags => |cond| {
+ assert(cond == .hi);
+ return MCValue{ .cpsr_flags = cond.negate() };
+ },
+ .immediate => |imm| {
+ assert(imm == 0);
+ return MCValue{ .immediate = 1 };
+ },
+ else => unreachable,
+ }
+}
+
fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result try self.isErr(ty, operand);
+ const error_union_bind: ReadArg.Bind = .{ .inst = un_op };
+ const error_union_ty = self.air.typeOf(un_op);
+
+ break :result try self.isErr(error_union_bind, error_union_ty);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -4321,16 +4835,12 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
const ptr_ty = self.air.typeOf(un_op);
- const operand: MCValue = blk: {
- if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
- // The MCValue that holds the pointer can be re-used as the value.
- break :blk operand_ptr;
- } else {
- break :blk try self.allocRegOrMem(inst, true);
- }
- };
+ const elem_ty = ptr_ty.elemType();
+
+ const operand = try self.allocRegOrMem(elem_ty, true, null);
try self.load(operand, operand_ptr, ptr_ty);
- break :result try self.isErr(ptr_ty.elemType(), operand);
+
+ break :result try self.isErr(.{ .mcv = operand }, elem_ty);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -4338,9 +4848,10 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result try self.isNonErr(ty, operand);
+ const error_union_bind: ReadArg.Bind = .{ .inst = un_op };
+ const error_union_ty = self.air.typeOf(un_op);
+
+ break :result try self.isNonErr(error_union_bind, error_union_ty);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -4350,16 +4861,12 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
const ptr_ty = self.air.typeOf(un_op);
- const operand: MCValue = blk: {
- if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
- // The MCValue that holds the pointer can be re-used as the value.
- break :blk operand_ptr;
- } else {
- break :blk try self.allocRegOrMem(inst, true);
- }
- };
+ const elem_ty = ptr_ty.elemType();
+
+ const operand = try self.allocRegOrMem(elem_ty, true, null);
try self.load(operand, operand_ptr, ptr_ty);
- break :result try self.isNonErr(ptr_ty.elemType(), operand);
+
+ break :result try self.isNonErr(.{ .mcv = operand }, elem_ty);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -4456,14 +4963,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
defer self.gpa.free(branch_into_prong_relocs);
for (items) |item, idx| {
- const condition = try self.resolveInst(pl_op.operand);
- const item_mcv = try self.resolveInst(item);
-
- const operands: BinOpOperands = .{ .mcv = .{
- .lhs = condition,
- .rhs = item_mcv,
- } };
- const cmp_result = try self.cmp(operands, condition_ty, .neq);
+ const cmp_result = try self.cmp(.{ .inst = pl_op.operand }, .{ .inst = item }, condition_ty, .neq);
branch_into_prong_relocs[idx] = try self.condBr(cmp_result);
}
@@ -4579,7 +5079,7 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
.none, .dead, .unreach => unreachable,
.register, .stack_offset, .memory => operand_mcv,
.immediate, .stack_argument_offset, .cpsr_flags => blk: {
- const new_mcv = try self.allocRegOrMem(block, true);
+ const new_mcv = try self.allocRegOrMem(self.air.typeOfIndex(block), true, block);
try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv);
break :blk new_mcv;
},
@@ -4832,9 +5332,8 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
.register_v_flag => .vs,
else => unreachable,
},
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = cond_reg,
- .rn = .r0,
.op = Instruction.Operand.fromU32(1).?,
} },
});
@@ -4935,9 +5434,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
// mov reg, 0
_ = try self.addInst(.{
.tag = .mov,
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = reg,
- .rn = .r0,
.op = zero,
} },
});
@@ -4946,9 +5444,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
_ = try self.addInst(.{
.tag = .mov,
.cond = condition,
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = reg,
- .rn = .r0,
.op = one,
} },
});
@@ -4957,18 +5454,16 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
if (Instruction.Operand.fromU32(x)) |op| {
_ = try self.addInst(.{
.tag = .mov,
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = reg,
- .rn = .r0,
.op = op,
} },
});
} else if (Instruction.Operand.fromU32(~x)) |op| {
_ = try self.addInst(.{
.tag = .mvn,
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = reg,
- .rn = .r0,
.op = op,
} },
});
@@ -4984,9 +5479,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
} else {
_ = try self.addInst(.{
.tag = .mov,
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = reg,
- .rn = .r0,
.op = Instruction.Operand.imm(@truncate(u8, x), 0),
} },
});
@@ -5028,9 +5522,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
// orr reg, reg, #0xdd, 8
_ = try self.addInst(.{
.tag = .mov,
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = reg,
- .rn = .r0,
.op = Instruction.Operand.imm(@truncate(u8, x), 0),
} },
});
@@ -5069,9 +5562,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
// mov reg, src_reg
_ = try self.addInst(.{
.tag = .mov,
- .data = .{ .rr_op = .{
+ .data = .{ .r_op_mov = .{
.rd = reg,
- .rn = .r0,
.op = Instruction.Operand.reg(src_reg, Instruction.Operand.Shift.none),
} },
});
@@ -5307,7 +5799,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const array_ty = ptr_ty.childType();
const array_len = @intCast(u32, array_ty.arrayLen());
- const stack_offset = try self.allocMem(inst, 8, 8);
+ const stack_offset = try self.allocMem(8, 8, inst);
try self.genSetStack(ptr_ty, stack_offset, ptr);
try self.genSetStack(Type.initTag(.usize), stack_offset - 4, .{ .immediate = array_len });
break :result MCValue{ .stack_offset = stack_offset };
@@ -5461,15 +5953,24 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.Try, pl_op.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
const result: MCValue = result: {
+ const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand };
const error_union_ty = self.air.typeOf(pl_op.operand);
- const error_union = try self.resolveInst(pl_op.operand);
- const is_err_result = try self.isErr(error_union_ty, error_union);
+ const error_union_size = @intCast(u32, error_union_ty.abiSize(self.target.*));
+ const error_union_align = error_union_ty.abiAlignment(self.target.*);
+
+ // The error union will die in the body. However, we need the
+ // error union after the body in order to extract the payload
+ // of the error union, so we create a copy of it
+ const error_union_copy = try self.allocMem(error_union_size, error_union_align, null);
+ try self.genSetStack(error_union_ty, error_union_copy, try error_union_bind.resolveToMcv(self));
+
+ const is_err_result = try self.isErr(error_union_bind, error_union_ty);
const reloc = try self.condBr(is_err_result);
try self.genBody(body);
-
try self.performReloc(reloc);
- break :result try self.errUnionPayload(error_union, error_union_ty);
+
+ break :result try self.errUnionPayload(.{ .mcv = .{ .stack_offset = error_union_copy } }, error_union_ty, null);
};
return self.finishAir(inst, result, .{ pl_op.operand, .none, .none });
}
diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig
index cf749792f0..188f5a5cfe 100644
--- a/src/arch/arm/Emit.zig
+++ b/src/arch/arm/Emit.zig
@@ -11,6 +11,7 @@ const link = @import("../../link.zig");
const Module = @import("../../Module.zig");
const Type = @import("../../type.zig").Type;
const ErrorMsg = Module.ErrorMsg;
+const Target = std.Target;
const assert = std.debug.assert;
const DW = std.dwarf;
const leb128 = std.leb;
@@ -93,6 +94,8 @@ pub fn emitMir(
.sub => try emit.mirDataProcessing(inst),
.subs => try emit.mirDataProcessing(inst),
+ .sub_sp_scratch_r0 => try emit.mirSubStackPointer(inst),
+
.asr => try emit.mirShift(inst),
.lsl => try emit.mirShift(inst),
.lsr => try emit.mirShift(inst),
@@ -190,6 +193,24 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
.dbg_epilogue_begin,
.dbg_prologue_end,
=> return 0,
+
+ .sub_sp_scratch_r0 => {
+ const imm32 = emit.mir.instructions.items(.data)[inst].imm32;
+
+ if (imm32 == 0) {
+ return 0 * 4;
+ } else if (Instruction.Operand.fromU32(imm32) != null) {
+ // sub
+ return 1 * 4;
+ } else if (Target.arm.featureSetHas(emit.target.cpu.features, .has_v7)) {
+ // movw; movt; sub
+ return 3 * 4;
+ } else {
+ // mov; orr; orr; orr; sub
+ return 5 * 4;
+ }
+ },
+
else => return 4,
}
}
@@ -385,20 +406,75 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
fn mirDataProcessing(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
- const rr_op = emit.mir.instructions.items(.data)[inst].rr_op;
switch (tag) {
- .add => try emit.writeInstruction(Instruction.add(cond, rr_op.rd, rr_op.rn, rr_op.op)),
- .adds => try emit.writeInstruction(Instruction.adds(cond, rr_op.rd, rr_op.rn, rr_op.op)),
- .@"and" => try emit.writeInstruction(Instruction.@"and"(cond, rr_op.rd, rr_op.rn, rr_op.op)),
- .cmp => try emit.writeInstruction(Instruction.cmp(cond, rr_op.rn, rr_op.op)),
- .eor => try emit.writeInstruction(Instruction.eor(cond, rr_op.rd, rr_op.rn, rr_op.op)),
- .mov => try emit.writeInstruction(Instruction.mov(cond, rr_op.rd, rr_op.op)),
- .mvn => try emit.writeInstruction(Instruction.mvn(cond, rr_op.rd, rr_op.op)),
- .orr => try emit.writeInstruction(Instruction.orr(cond, rr_op.rd, rr_op.rn, rr_op.op)),
- .rsb => try emit.writeInstruction(Instruction.rsb(cond, rr_op.rd, rr_op.rn, rr_op.op)),
- .sub => try emit.writeInstruction(Instruction.sub(cond, rr_op.rd, rr_op.rn, rr_op.op)),
- .subs => try emit.writeInstruction(Instruction.subs(cond, rr_op.rd, rr_op.rn, rr_op.op)),
+ .add,
+ .adds,
+ .@"and",
+ .eor,
+ .orr,
+ .rsb,
+ .sub,
+ .subs,
+ => {
+ const rr_op = emit.mir.instructions.items(.data)[inst].rr_op;
+ switch (tag) {
+ .add => try emit.writeInstruction(Instruction.add(cond, rr_op.rd, rr_op.rn, rr_op.op)),
+ .adds => try emit.writeInstruction(Instruction.adds(cond, rr_op.rd, rr_op.rn, rr_op.op)),
+ .@"and" => try emit.writeInstruction(Instruction.@"and"(cond, rr_op.rd, rr_op.rn, rr_op.op)),
+ .eor => try emit.writeInstruction(Instruction.eor(cond, rr_op.rd, rr_op.rn, rr_op.op)),
+ .orr => try emit.writeInstruction(Instruction.orr(cond, rr_op.rd, rr_op.rn, rr_op.op)),
+ .rsb => try emit.writeInstruction(Instruction.rsb(cond, rr_op.rd, rr_op.rn, rr_op.op)),
+ .sub => try emit.writeInstruction(Instruction.sub(cond, rr_op.rd, rr_op.rn, rr_op.op)),
+ .subs => try emit.writeInstruction(Instruction.subs(cond, rr_op.rd, rr_op.rn, rr_op.op)),
+ else => unreachable,
+ }
+ },
+ .cmp => {
+ const r_op_cmp = emit.mir.instructions.items(.data)[inst].r_op_cmp;
+ try emit.writeInstruction(Instruction.cmp(cond, r_op_cmp.rn, r_op_cmp.op));
+ },
+ .mov,
+ .mvn,
+ => {
+ const r_op_mov = emit.mir.instructions.items(.data)[inst].r_op_mov;
+ switch (tag) {
+ .mov => try emit.writeInstruction(Instruction.mov(cond, r_op_mov.rd, r_op_mov.op)),
+ .mvn => try emit.writeInstruction(Instruction.mvn(cond, r_op_mov.rd, r_op_mov.op)),
+ else => unreachable,
+ }
+ },
+ else => unreachable,
+ }
+}
+
+fn mirSubStackPointer(emit: *Emit, inst: Mir.Inst.Index) !void {
+ const tag = emit.mir.instructions.items(.tag)[inst];
+ const cond = emit.mir.instructions.items(.cond)[inst];
+ const imm32 = emit.mir.instructions.items(.data)[inst].imm32;
+
+ switch (tag) {
+ .sub_sp_scratch_r0 => {
+ if (imm32 == 0) return;
+
+ const operand = Instruction.Operand.fromU32(imm32) orelse blk: {
+ const scratch: Register = .r0;
+
+ if (Target.arm.featureSetHas(emit.target.cpu.features, .has_v7)) {
+ try emit.writeInstruction(Instruction.movw(cond, scratch, @truncate(u16, imm32)));
+ try emit.writeInstruction(Instruction.movt(cond, scratch, @truncate(u16, imm32 >> 16)));
+ } else {
+ try emit.writeInstruction(Instruction.mov(cond, scratch, Instruction.Operand.imm(@truncate(u8, imm32), 0)));
+ try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 8), 12)));
+ try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 16), 8)));
+ try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 24), 4)));
+ }
+
+ break :blk Instruction.Operand.reg(scratch, Instruction.Operand.Shift.none);
+ };
+
+ try emit.writeInstruction(Instruction.sub(cond, .sp, .sp, operand));
+ },
else => unreachable,
}
}
diff --git a/src/arch/arm/Mir.zig b/src/arch/arm/Mir.zig
index d5da7e5d4e..38cf4da3fd 100644
--- a/src/arch/arm/Mir.zig
+++ b/src/arch/arm/Mir.zig
@@ -111,6 +111,11 @@ pub const Inst = struct {
strh,
/// Subtract
sub,
+ /// Pseudo-instruction: Subtract 32-bit immediate from stack
+ ///
+ /// r0 can be used by Emit as a scratch register for loading
+ /// the immediate
+ sub_sp_scratch_r0,
/// Subtract, update condition flags
subs,
/// Supervisor Call
@@ -144,6 +149,10 @@ pub const Inst = struct {
///
/// Used by e.g. svc
imm24: u24,
+ /// A 32-bit immediate value.
+ ///
+ /// Used by e.g. sub_sp_scratch_r0
+ imm32: u32,
/// Index into `extra`. Meaning of what can be found there is context-dependent.
///
/// Used by e.g. load_memory
@@ -166,6 +175,20 @@ pub const Inst = struct {
rd: Register,
imm16: u16,
},
+ /// A register and an operand
+ ///
+ /// Used by mov and mvn
+ r_op_mov: struct {
+ rd: Register,
+ op: bits.Instruction.Operand,
+ },
+ /// A register and an operand
+ ///
+ /// Used by cmp
+ r_op_cmp: struct {
+ rn: Register,
+ op: bits.Instruction.Operand,
+ },
/// Two registers and a shift amount
///
/// Used by e.g. lsl
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 25e8695e82..05d7f2b73b 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -128,15 +128,11 @@ pub const MCValue = union(enum) {
/// The value is in memory at a hard-coded address.
/// If the type is a pointer, it means the pointer address is at this memory location.
memory: u64,
- /// The value is in memory referenced indirectly via a GOT entry index.
- /// If the type is a pointer, it means the pointer is referenced indirectly via GOT.
- /// When lowered, linker will emit a relocation of type X86_64_RELOC_GOT.
- got_load: u32,
- imports_load: u32,
- /// The value is in memory referenced directly via symbol index.
- /// If the type is a pointer, it means the pointer is referenced directly via symbol index.
- /// When lowered, linker will emit a relocation of type X86_64_RELOC_SIGNED.
- direct_load: u32,
+ /// The value is in memory but requires a linker relocation fixup:
+ /// * got - the value is referenced indirectly via GOT entry index (the linker emits a got-type reloc)
+ /// * direct - the value is referenced directly via symbol index index (the linker emits a displacement reloc)
+ /// * import - the value is referenced indirectly via import entry index (the linker emits an import-type reloc)
+ linker_load: struct { @"type": enum { got, direct, import }, sym_index: u32 },
/// The value is one of the stack variables.
/// If the type is a pointer, it means the pointer address is in the stack at this offset.
stack_offset: i32,
@@ -150,9 +146,7 @@ pub const MCValue = union(enum) {
.memory,
.stack_offset,
.ptr_stack_offset,
- .direct_load,
- .got_load,
- .imports_load,
+ .linker_load,
=> true,
else => false,
};
@@ -165,26 +159,6 @@ pub const MCValue = union(enum) {
};
}
- fn isMutable(mcv: MCValue) bool {
- return switch (mcv) {
- .none => unreachable,
- .unreach => unreachable,
- .dead => unreachable,
-
- .immediate,
- .memory,
- .eflags,
- .ptr_stack_offset,
- .undef,
- .register_overflow,
- => false,
-
- .register,
- .stack_offset,
- => true,
- };
- }
-
fn isRegister(mcv: MCValue) bool {
return switch (mcv) {
.register => true,
@@ -2307,11 +2281,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
.data = .{ .imm = @bitCast(u32, -off) },
});
},
- .memory,
- .got_load,
- .direct_load,
- .imports_load,
- => {
+ .memory, .linker_load => {
try self.loadMemPtrIntoRegister(addr_reg, Type.usize, array);
},
else => return self.fail("TODO implement array_elem_val when array is {}", .{array}),
@@ -2652,11 +2622,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
else => return self.fail("TODO implement loading from register into {}", .{dst_mcv}),
}
},
- .memory,
- .got_load,
- .direct_load,
- .imports_load,
- => {
+ .memory, .linker_load => {
const reg = try self.copyToTmpRegister(ptr_ty, ptr);
try self.load(dst_mcv, .{ .register = reg }, ptr_ty);
},
@@ -2691,10 +2657,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue) InnerError!void {
switch (ptr) {
- .got_load,
- .direct_load,
- .imports_load,
- => |sym_index| {
+ .linker_load => |load_struct| {
const abi_size = @intCast(u32, ptr_ty.abiSize(self.target.*));
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl);
@@ -2702,11 +2665,10 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
fn_owner_decl.link.macho.sym_index
else
fn_owner_decl.link.coff.sym_index;
- const flags: u2 = switch (ptr) {
- .got_load => 0b00,
- .direct_load => 0b01,
- .imports_load => 0b10,
- else => unreachable,
+ const flags: u2 = switch (load_struct.@"type") {
+ .got => 0b00,
+ .direct => 0b01,
+ .import => 0b10,
};
_ = try self.addInst(.{
.tag = .lea_pic,
@@ -2717,7 +2679,7 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
.data = .{
.relocation = .{
.atom_index = atom_index,
- .sym_index = sym_index,
+ .sym_index = load_struct.sym_index,
},
},
});
@@ -2801,9 +2763,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.register => |src_reg| {
try self.genInlineMemcpyRegisterRegister(value_ty, reg, src_reg, 0);
},
- .got_load,
- .direct_load,
- .imports_load,
+ .linker_load,
.memory,
.stack_offset,
=> {
@@ -2822,11 +2782,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
},
}
},
- .got_load,
- .direct_load,
- .imports_load,
- .memory,
- => {
+ .linker_load, .memory => {
const value_lock: ?RegisterLock = switch (value) {
.register => |reg| self.register_manager.lockReg(reg),
else => null,
@@ -2894,11 +2850,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.register => {
return self.store(new_ptr, value, ptr_ty, value_ty);
},
- .got_load,
- .direct_load,
- .imports_load,
- .memory,
- => {
+ .linker_load, .memory => {
if (abi_size <= 8) {
const tmp_reg = try self.register_manager.allocReg(null, gp);
const tmp_reg_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
@@ -3606,9 +3558,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
});
},
.memory,
- .got_load,
- .direct_load,
- .imports_load,
+ .linker_load,
.eflags,
=> {
assert(abi_size <= 8);
@@ -3694,10 +3644,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
=> {
return self.fail("TODO implement x86 ADD/SUB/CMP source memory", .{});
},
- .got_load,
- .direct_load,
- .imports_load,
- => {
+ .linker_load => {
return self.fail("TODO implement x86 ADD/SUB/CMP source symbol at index in linker", .{});
},
.eflags => {
@@ -3708,10 +3655,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
.memory => {
return self.fail("TODO implement x86 ADD/SUB/CMP destination memory", .{});
},
- .got_load,
- .direct_load,
- .imports_load,
- => {
+ .linker_load => {
return self.fail("TODO implement x86 ADD/SUB/CMP destination symbol at index", .{});
},
}
@@ -3779,10 +3723,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.memory => {
return self.fail("TODO implement x86 multiply source memory", .{});
},
- .got_load,
- .direct_load,
- .imports_load,
- => {
+ .linker_load => {
return self.fail("TODO implement x86 multiply source symbol at index in linker", .{});
},
.eflags => {
@@ -3826,10 +3767,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.memory, .stack_offset => {
return self.fail("TODO implement x86 multiply source memory", .{});
},
- .got_load,
- .direct_load,
- .imports_load,
- => {
+ .linker_load => {
return self.fail("TODO implement x86 multiply source symbol at index in linker", .{});
},
.eflags => {
@@ -3840,10 +3778,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.memory => {
return self.fail("TODO implement x86 multiply destination memory", .{});
},
- .got_load,
- .direct_load,
- .imports_load,
- => {
+ .linker_load => {
return self.fail("TODO implement x86 multiply destination symbol at index in linker", .{});
},
}
@@ -4006,9 +3941,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
.unreach => unreachable,
.dead => unreachable,
.memory => unreachable,
- .got_load => unreachable,
- .direct_load => unreachable,
- .imports_load => unreachable,
+ .linker_load => unreachable,
.eflags => unreachable,
.register_overflow => unreachable,
}
@@ -4066,7 +3999,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
try self.genSetReg(Type.initTag(.usize), .rax, .{
- .got_load = fn_owner_decl.link.coff.sym_index,
+ .linker_load = .{
+ .@"type" = .got,
+ .sym_index = fn_owner_decl.link.coff.sym_index,
+ },
});
_ = try self.addInst(.{
.tag = .call,
@@ -4087,7 +4023,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
}
const sym_index = try coff_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
try self.genSetReg(Type.initTag(.usize), .rax, .{
- .imports_load = sym_index,
+ .linker_load = .{
+ .@"type" = .import,
+ .sym_index = sym_index,
+ },
});
_ = try self.addInst(.{
.tag = .call,
@@ -4119,7 +4058,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
const sym_index = fn_owner_decl.link.macho.sym_index;
- try self.genSetReg(Type.initTag(.usize), .rax, .{ .got_load = sym_index });
+ try self.genSetReg(Type.initTag(.usize), .rax, .{
+ .linker_load = .{
+ .@"type" = .got,
+ .sym_index = sym_index,
+ },
+ });
// callq *%rax
_ = try self.addInst(.{
.tag = .call,
@@ -4505,11 +4449,7 @@ fn genVarDbgInfo(
leb128.writeILEB128(dbg_info.writer(), -off) catch unreachable;
dbg_info.items[fixup] += @intCast(u8, dbg_info.items.len - fixup - 2);
},
- .memory,
- .got_load,
- .direct_load,
- .imports_load,
- => {
+ .memory, .linker_load => {
const ptr_width = @intCast(u8, @divExact(self.target.cpu.arch.ptrBitWidth(), 8));
const is_ptr = switch (tag) {
.dbg_var_ptr => true,
@@ -4540,10 +4480,11 @@ fn genVarDbgInfo(
try dbg_info.append(DW.OP.deref);
}
switch (mcv) {
- .got_load,
- .direct_load,
- .imports_load,
- => |index| try dw.addExprlocReloc(index, offset, is_ptr),
+ .linker_load => |load_struct| try dw.addExprlocReloc(
+ load_struct.sym_index,
+ offset,
+ is_ptr,
+ ),
else => {},
}
},
@@ -5587,11 +5528,7 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
else => return self.fail("TODO implement inputs on stack for {} with abi size > 8", .{mcv}),
}
},
- .memory,
- .direct_load,
- .got_load,
- .imports_load,
- => {
+ .memory, .linker_load => {
if (abi_size <= 8) {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg });
@@ -5835,11 +5772,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
},
}
},
- .memory,
- .got_load,
- .direct_load,
- .imports_load,
- => {
+ .memory, .linker_load => {
if (abi_size <= 8) {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }, opts);
@@ -5959,11 +5892,7 @@ fn genInlineMemcpy(
const tmp_reg = regs[4].to8();
switch (dst_ptr) {
- .memory,
- .got_load,
- .direct_load,
- .imports_load,
- => {
+ .memory, .linker_load => {
try self.loadMemPtrIntoRegister(dst_addr_reg, Type.usize, dst_ptr);
},
.ptr_stack_offset, .stack_offset => |off| {
@@ -5992,11 +5921,7 @@ fn genInlineMemcpy(
}
switch (src_ptr) {
- .memory,
- .got_load,
- .direct_load,
- .imports_load,
- => {
+ .memory, .linker_load => {
try self.loadMemPtrIntoRegister(src_addr_reg, Type.usize, src_ptr);
},
.ptr_stack_offset, .stack_offset => |off| {
@@ -6120,11 +6045,7 @@ fn genInlineMemset(
const index_reg = regs[1].to64();
switch (dst_ptr) {
- .memory,
- .got_load,
- .direct_load,
- .imports_load,
- => {
+ .memory, .linker_load => {
try self.loadMemPtrIntoRegister(addr_reg, Type.usize, dst_ptr);
},
.ptr_stack_offset, .stack_offset => |off| {
@@ -6356,10 +6277,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.data = undefined,
});
},
- .direct_load,
- .got_load,
- .imports_load,
- => {
+ .linker_load => {
switch (ty.zigTypeTag()) {
.Float => {
const base_reg = try self.register_manager.allocReg(null, gp);
@@ -6753,11 +6671,7 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
// TODO Is this the only condition for pointer dereference for memcpy?
const src: MCValue = blk: {
switch (src_ptr) {
- .got_load,
- .direct_load,
- .imports_load,
- .memory,
- => {
+ .linker_load, .memory => {
const reg = try self.register_manager.allocReg(null, gp);
try self.loadMemPtrIntoRegister(reg, src_ty, src_ptr);
_ = try self.addInst(.{
@@ -6997,10 +6911,16 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
assert(decl.link.macho.sym_index != 0);
- return MCValue{ .got_load = decl.link.macho.sym_index };
+ return MCValue{ .linker_load = .{
+ .@"type" = .got,
+ .sym_index = decl.link.macho.sym_index,
+ } };
} else if (self.bin_file.cast(link.File.Coff)) |_| {
assert(decl.link.coff.sym_index != 0);
- return MCValue{ .got_load = decl.link.coff.sym_index };
+ return MCValue{ .linker_load = .{
+ .@"type" = .got,
+ .sym_index = decl.link.coff.sym_index,
+ } };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
@@ -7019,9 +6939,15 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
return MCValue{ .memory = vaddr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
- return MCValue{ .direct_load = local_sym_index };
+ return MCValue{ .linker_load = .{
+ .@"type" = .direct,
+ .sym_index = local_sym_index,
+ } };
} else if (self.bin_file.cast(link.File.Coff)) |_| {
- return MCValue{ .direct_load = local_sym_index };
+ return MCValue{ .linker_load = .{
+ .@"type" = .direct,
+ .sym_index = local_sym_index,
+ } };
} else if (self.bin_file.cast(link.File.Plan9)) |_| {
return self.fail("TODO lower unnamed const in Plan9", .{});
} else {
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index 45e58be972..e99f6ff4f5 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -1021,10 +1021,14 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
.@"type" = switch (ops.flags) {
0b00 => .got,
0b01 => .direct,
- 0b10 => .imports,
+ 0b10 => .import,
+ else => unreachable,
+ },
+ .target = switch (ops.flags) {
+ 0b00, 0b01 => .{ .sym_index = relocation.sym_index, .file = null },
+ 0b10 => coff_file.getGlobalByIndex(relocation.sym_index),
else => unreachable,
},
- .target = .{ .sym_index = relocation.sym_index, .file = null },
.offset = @intCast(u32, end_offset - 4),
.addend = 0,
.pcrel = true,
@@ -1142,12 +1146,10 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
const atom = macho_file.atom_by_index_table.get(relocation.atom_index).?;
+ const target = macho_file.getGlobalByIndex(relocation.sym_index);
try atom.relocs.append(emit.bin_file.allocator, .{
.offset = offset,
- .target = .{
- .sym_index = relocation.sym_index,
- .file = null,
- },
+ .target = target,
.addend = 0,
.subtractor = null,
.pcrel = true,
@@ -1157,16 +1159,17 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
// Add relocation to the decl.
const atom = coff_file.atom_by_index_table.get(relocation.atom_index).?;
+ const target = coff_file.getGlobalByIndex(relocation.sym_index);
try atom.addRelocation(coff_file, .{
.@"type" = .direct,
- .target = .{ .sym_index = relocation.sym_index, .file = null },
+ .target = target,
.offset = offset,
.addend = 0,
.pcrel = true,
.length = 2,
});
} else {
- return emit.fail("TODO implement call_extern for linking backends different than MachO", .{});
+ return emit.fail("TODO implement call_extern for linking backends different than MachO and COFF", .{});
}
}
diff --git a/src/autodoc/render_source.zig b/src/autodoc/render_source.zig
index ceba230276..aa9eca7e95 100644
--- a/src/autodoc/render_source.zig
+++ b/src/autodoc/render_source.zig
@@ -137,7 +137,7 @@ pub fn genHtml(
);
const source = try src.getSource(allocator);
- try tokenizeAndPrintRaw(allocator, out, source.bytes);
+ try tokenizeAndPrintRaw(out, source.bytes);
try out.writeAll(
\\</body>
\\</html>
@@ -150,13 +150,9 @@ const end_line = "</span>\n";
var line_counter: usize = 1;
pub fn tokenizeAndPrintRaw(
- allocator: Allocator,
out: anytype,
- raw_src: [:0]const u8,
+ src: [:0]const u8,
) !void {
- const src = try allocator.dupeZ(u8, raw_src);
- defer allocator.free(src);
-
line_counter = 1;
try out.print("<pre><code>" ++ start_line, .{line_counter});
diff --git a/src/glibc.zig b/src/glibc.zig
index 4e33867169..3dd7565e96 100644
--- a/src/glibc.zig
+++ b/src/glibc.zig
@@ -719,17 +719,16 @@ pub fn buildSharedObjects(comp: *Compilation) !void {
.lt => continue,
.gt => {
// TODO Expose via compile error mechanism instead of log.
- log.err("invalid target glibc version: {}", .{target_version});
+ log.warn("invalid target glibc version: {}", .{target_version});
return error.InvalidTargetGLibCVersion;
},
}
- } else {
+ } else blk: {
const latest_index = metadata.all_versions.len - 1;
- // TODO Expose via compile error mechanism instead of log.
- log.err("zig does not yet provide glibc version {}, the max provided version is {}", .{
+ log.warn("zig cannot build new glibc version {}; providing instead {}", .{
target_version, metadata.all_versions[latest_index],
});
- return error.InvalidTargetGLibCVersion;
+ break :blk latest_index;
};
{
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 49263df225..013a0c0475 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -127,7 +127,7 @@ pub const Reloc = struct {
@"type": enum {
got,
direct,
- imports,
+ import,
},
target: SymbolWithLoc,
offset: u32,
@@ -141,7 +141,7 @@ pub const Reloc = struct {
switch (self.@"type") {
.got => return coff_file.getGotAtomForSymbol(self.target),
.direct => return coff_file.getAtomForSymbol(self.target),
- .imports => return coff_file.getImportAtomForSymbol(self.target),
+ .import => return coff_file.getImportAtomForSymbol(self.target),
}
}
};
@@ -1423,23 +1423,22 @@ fn resolveGlobalSymbol(self: *Coff, current: SymbolWithLoc) !void {
const sym = self.getSymbol(current);
const sym_name = self.getSymbolName(current);
- const global_index = self.resolver.get(sym_name) orelse {
- const name = try gpa.dupe(u8, sym_name);
- const global_index = try self.allocateGlobal();
- self.globals.items[global_index] = current;
- try self.resolver.putNoClobber(gpa, name, global_index);
+ const gop = try self.getOrPutGlobalPtr(sym_name);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = current;
if (sym.section_number == .UNDEFINED) {
- try self.unresolved.putNoClobber(gpa, global_index, false);
+ try self.unresolved.putNoClobber(gpa, self.getGlobalIndex(sym_name).?, false);
}
return;
- };
+ }
log.debug("TODO finish resolveGlobalSymbols implementation", .{});
if (sym.section_number == .UNDEFINED) return;
- _ = self.unresolved.swapRemove(global_index);
- self.globals.items[global_index] = current;
+ _ = self.unresolved.swapRemove(self.getGlobalIndex(sym_name).?);
+
+ gop.value_ptr.* = current;
}
pub fn flush(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Node) !void {
@@ -1544,25 +1543,26 @@ pub fn getDeclVAddr(
}
pub fn getGlobalSymbol(self: *Coff, name: []const u8) !u32 {
- if (self.resolver.get(name)) |global_index| {
- return self.globals.items[global_index].sym_index;
+ const gop = try self.getOrPutGlobalPtr(name);
+ const global_index = self.getGlobalIndex(name).?;
+
+ if (gop.found_existing) {
+ return global_index;
}
- const gpa = self.base.allocator;
const sym_index = try self.allocateSymbol();
- const global_index = try self.allocateGlobal();
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
- self.globals.items[global_index] = sym_loc;
+ gop.value_ptr.* = sym_loc;
+ const gpa = self.base.allocator;
const sym_name = try gpa.dupe(u8, name);
const sym = self.getSymbolPtr(sym_loc);
try self.setSymbolName(sym, sym_name);
sym.storage_class = .EXTERNAL;
- try self.resolver.putNoClobber(gpa, sym_name, global_index);
try self.unresolved.putNoClobber(gpa, global_index, true);
- return sym_index;
+ return global_index;
}
pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void {
@@ -2061,6 +2061,49 @@ pub fn getSymbolName(self: *const Coff, sym_loc: SymbolWithLoc) []const u8 {
return self.strtab.get(offset).?;
}
+/// Returns pointer to the global entry for `name` if one exists.
+pub fn getGlobalPtr(self: *Coff, name: []const u8) ?*SymbolWithLoc {
+ const global_index = self.resolver.get(name) orelse return null;
+ return &self.globals.items[global_index];
+}
+
+/// Returns the global entry for `name` if one exists.
+pub fn getGlobal(self: *const Coff, name: []const u8) ?SymbolWithLoc {
+ const global_index = self.resolver.get(name) orelse return null;
+ return self.globals.items[global_index];
+}
+
+/// Returns the index of the global entry for `name` if one exists.
+pub fn getGlobalIndex(self: *const Coff, name: []const u8) ?u32 {
+ return self.resolver.get(name);
+}
+
+/// Returns global entry at `index`.
+pub fn getGlobalByIndex(self: *const Coff, index: u32) SymbolWithLoc {
+ assert(index < self.globals.items.len);
+ return self.globals.items[index];
+}
+
+const GetOrPutGlobalPtrResult = struct {
+ found_existing: bool,
+ value_ptr: *SymbolWithLoc,
+};
+
+/// Return pointer to the global entry for `name` if one exists.
+/// Puts a new global entry for `name` if one doesn't exist, and
+/// returns a pointer to it.
+pub fn getOrPutGlobalPtr(self: *Coff, name: []const u8) !GetOrPutGlobalPtrResult {
+ if (self.getGlobalPtr(name)) |ptr| {
+ return GetOrPutGlobalPtrResult{ .found_existing = true, .value_ptr = ptr };
+ }
+ const gpa = self.base.allocator;
+ const global_index = try self.allocateGlobal();
+ const global_name = try gpa.dupe(u8, name);
+ _ = try self.resolver.put(gpa, global_name, global_index);
+ const ptr = &self.globals.items[global_index];
+ return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr };
+}
+
/// Returns atom if there is an atom referenced by the symbol described by `sym_loc` descriptor.
/// Returns null on failure.
pub fn getAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
diff --git a/src/link/Coff/Atom.zig b/src/link/Coff/Atom.zig
index ffd8fe45e6..39e04b2641 100644
--- a/src/link/Coff/Atom.zig
+++ b/src/link/Coff/Atom.zig
@@ -111,13 +111,3 @@ pub fn addBaseRelocation(self: *Atom, coff_file: *Coff, offset: u32) !void {
}
try gop.value_ptr.append(gpa, offset);
}
-
-pub fn addBinding(self: *Atom, coff_file: *Coff, target: SymbolWithLoc) !void {
- const gpa = coff_file.base.allocator;
- log.debug(" (adding binding to target %{d} in %{d})", .{ target.sym_index, self.sym_index });
- const gop = try coff_file.bindings.getOrPut(gpa, self);
- if (!gop.found_existing) {
- gop.value_ptr.* = .{};
- }
- try gop.value_ptr.append(gpa, target);
-}
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 429bf64eb2..a6720f8dd3 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -131,17 +131,12 @@ la_symbol_ptr_section_index: ?u8 = null,
data_section_index: ?u8 = null,
locals: std.ArrayListUnmanaged(macho.nlist_64) = .{},
-globals: std.StringArrayHashMapUnmanaged(SymbolWithLoc) = .{},
-// FIXME Jakub
-// TODO storing index into globals might be dangerous if we delete a global
-// while not having everything resolved. Actually, perhaps `unresolved`
-// should not be stored at the global scope? Is this possible?
-// Otherwise, audit if this can be a problem.
-// An alternative, which I still need to investigate for perf reasons is to
-// store all global names in an adapted with context strtab.
+globals: std.ArrayListUnmanaged(SymbolWithLoc) = .{},
+resolver: std.StringHashMapUnmanaged(u32) = .{},
unresolved: std.AutoArrayHashMapUnmanaged(u32, bool) = .{},
locals_free_list: std.ArrayListUnmanaged(u32) = .{},
+globals_free_list: std.ArrayListUnmanaged(u32) = .{},
dyld_stub_binder_index: ?u32 = null,
dyld_private_atom: ?*Atom = null,
@@ -1917,7 +1912,7 @@ fn allocateSpecialSymbols(self: *MachO) !void {
"___dso_handle",
"__mh_execute_header",
}) |name| {
- const global = self.globals.get(name) orelse continue;
+ const global = self.getGlobal(name) orelse continue;
if (global.file != null) continue;
const sym = self.getSymbolPtr(global);
const seg = self.segments.items[self.text_segment_cmd_index.?];
@@ -2048,16 +2043,11 @@ fn writeAtomsIncremental(self: *MachO) !void {
pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom {
const gpa = self.base.allocator;
- const sym_index = @intCast(u32, self.locals.items.len);
- try self.locals.append(gpa, .{
- .n_strx = 0,
- .n_type = macho.N_SECT,
- .n_sect = 0,
- .n_desc = 0,
- .n_value = 0,
- });
-
+ const sym_index = try self.allocateSymbol();
const atom = try MachO.createEmptyAtom(gpa, sym_index, @sizeOf(u64), 3);
+ const sym = atom.getSymbolPtr(self);
+ sym.n_type = macho.N_SECT;
+
try atom.relocs.append(gpa, .{
.offset = 0,
.target = target,
@@ -2074,7 +2064,7 @@ pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom {
const target_sym = self.getSymbol(target);
if (target_sym.undf()) {
- const global = self.globals.get(self.getSymbolName(target)).?;
+ const global = self.getGlobal(self.getSymbolName(target)).?;
try atom.bindings.append(gpa, .{
.target = global,
.offset = 0,
@@ -2093,20 +2083,15 @@ pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom {
pub fn createTlvPtrAtom(self: *MachO, target: SymbolWithLoc) !*Atom {
const gpa = self.base.allocator;
- const sym_index = @intCast(u32, self.locals.items.len);
- try self.locals.append(gpa, .{
- .n_strx = 0,
- .n_type = macho.N_SECT,
- .n_sect = 0,
- .n_desc = 0,
- .n_value = 0,
- });
-
+ const sym_index = try self.allocateSymbol();
const atom = try MachO.createEmptyAtom(gpa, sym_index, @sizeOf(u64), 3);
+ const sym = atom.getSymbolPtr(self);
+ sym.n_type = macho.N_SECT;
+
const target_sym = self.getSymbol(target);
assert(target_sym.undf());
- const global = self.globals.get(self.getSymbolName(target)).?;
+ const global = self.getGlobal(self.getSymbolName(target)).?;
try atom.bindings.append(gpa, .{
.target = global,
.offset = 0,
@@ -2130,15 +2115,10 @@ fn createDyldPrivateAtom(self: *MachO) !void {
if (self.dyld_private_atom != null) return;
const gpa = self.base.allocator;
- const sym_index = @intCast(u32, self.locals.items.len);
- try self.locals.append(gpa, .{
- .n_strx = 0,
- .n_type = macho.N_SECT,
- .n_sect = 0,
- .n_desc = 0,
- .n_value = 0,
- });
+ const sym_index = try self.allocateSymbol();
const atom = try MachO.createEmptyAtom(gpa, sym_index, @sizeOf(u64), 3);
+ const sym = atom.getSymbolPtr(self);
+ sym.n_type = macho.N_SECT;
self.dyld_private_atom = atom;
try self.allocateAtomCommon(atom, self.data_section_index.?);
@@ -2163,15 +2143,11 @@ fn createStubHelperPreambleAtom(self: *MachO) !void {
.aarch64 => 2,
else => unreachable,
};
- const sym_index = @intCast(u32, self.locals.items.len);
- try self.locals.append(gpa, .{
- .n_strx = 0,
- .n_type = macho.N_SECT,
- .n_sect = 0,
- .n_desc = 0,
- .n_value = 0,
- });
+ const sym_index = try self.allocateSymbol();
const atom = try MachO.createEmptyAtom(gpa, sym_index, size, alignment);
+ const sym = atom.getSymbolPtr(self);
+ sym.n_type = macho.N_SECT;
+
const dyld_private_sym_index = self.dyld_private_atom.?.sym_index;
switch (arch) {
.x86_64 => {
@@ -2288,15 +2264,11 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
.aarch64 => 2,
else => unreachable,
};
- const sym_index = @intCast(u32, self.locals.items.len);
- try self.locals.append(gpa, .{
- .n_strx = 0,
- .n_type = macho.N_SECT,
- .n_sect = 0,
- .n_desc = 0,
- .n_value = 0,
- });
+ const sym_index = try self.allocateSymbol();
const atom = try MachO.createEmptyAtom(gpa, sym_index, stub_size, alignment);
+ const sym = atom.getSymbolPtr(self);
+ sym.n_type = macho.N_SECT;
+
try atom.relocs.ensureTotalCapacity(gpa, 1);
switch (arch) {
@@ -2352,15 +2324,11 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWithLoc) !*Atom {
const gpa = self.base.allocator;
- const sym_index = @intCast(u32, self.locals.items.len);
- try self.locals.append(gpa, .{
- .n_strx = 0,
- .n_type = macho.N_SECT,
- .n_sect = 0,
- .n_desc = 0,
- .n_value = 0,
- });
+ const sym_index = try self.allocateSymbol();
const atom = try MachO.createEmptyAtom(gpa, sym_index, @sizeOf(u64), 3);
+ const sym = atom.getSymbolPtr(self);
+ sym.n_type = macho.N_SECT;
+
try atom.relocs.append(gpa, .{
.offset = 0,
.target = .{ .sym_index = stub_sym_index, .file = null },
@@ -2376,7 +2344,7 @@ pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWi
});
try atom.rebases.append(gpa, 0);
- const global = self.globals.get(self.getSymbolName(target)).?;
+ const global = self.getGlobal(self.getSymbolName(target)).?;
try atom.lazy_bindings.append(gpa, .{
.target = global,
.offset = 0,
@@ -2403,15 +2371,11 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
.aarch64 => 3 * @sizeOf(u32),
else => unreachable, // unhandled architecture type
};
- const sym_index = @intCast(u32, self.locals.items.len);
- try self.locals.append(gpa, .{
- .n_strx = 0,
- .n_type = macho.N_SECT,
- .n_sect = 0,
- .n_desc = 0,
- .n_value = 0,
- });
+ const sym_index = try self.allocateSymbol();
const atom = try MachO.createEmptyAtom(gpa, sym_index, stub_size, alignment);
+ const sym = atom.getSymbolPtr(self);
+ sym.n_type = macho.N_SECT;
+
switch (arch) {
.x86_64 => {
// jmp
@@ -2472,7 +2436,7 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
fn createTentativeDefAtoms(self: *MachO) !void {
const gpa = self.base.allocator;
- for (self.globals.values()) |global| {
+ for (self.globals.items) |global| {
const sym = self.getSymbolPtr(global);
if (!sym.tentative()) continue;
@@ -2516,51 +2480,44 @@ fn createTentativeDefAtoms(self: *MachO) !void {
fn createMhExecuteHeaderSymbol(self: *MachO) !void {
if (self.base.options.output_mode != .Exe) return;
- if (self.globals.get("__mh_execute_header")) |global| {
+ if (self.getGlobal("__mh_execute_header")) |global| {
const sym = self.getSymbol(global);
if (!sym.undf() and !(sym.pext() or sym.weakDef())) return;
}
const gpa = self.base.allocator;
- const n_strx = try self.strtab.insert(gpa, "__mh_execute_header");
- const sym_index = @intCast(u32, self.locals.items.len);
- try self.locals.append(gpa, .{
- .n_strx = n_strx,
+ const sym_index = try self.allocateSymbol();
+ const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ const sym = self.getSymbolPtr(sym_loc);
+ sym.* = .{
+ .n_strx = try self.strtab.insert(gpa, "__mh_execute_header"),
.n_type = macho.N_SECT | macho.N_EXT,
.n_sect = 0,
.n_desc = macho.REFERENCED_DYNAMICALLY,
.n_value = 0,
- });
-
- const name = try gpa.dupe(u8, "__mh_execute_header");
- const gop = try self.globals.getOrPut(gpa, name);
- defer if (gop.found_existing) gpa.free(name);
- gop.value_ptr.* = .{
- .sym_index = sym_index,
- .file = null,
};
+
+ const gop = try self.getOrPutGlobalPtr("__mh_execute_header");
+ gop.value_ptr.* = sym_loc;
}
fn createDsoHandleSymbol(self: *MachO) !void {
- const global = self.globals.getPtr("___dso_handle") orelse return;
- const sym = self.getSymbolPtr(global.*);
- if (!sym.undf()) return;
+ const global = self.getGlobalPtr("___dso_handle") orelse return;
+ if (!self.getSymbol(global.*).undf()) return;
const gpa = self.base.allocator;
- const n_strx = try self.strtab.insert(gpa, "___dso_handle");
- const sym_index = @intCast(u32, self.locals.items.len);
- try self.locals.append(gpa, .{
- .n_strx = n_strx,
+ const sym_index = try self.allocateSymbol();
+ const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ const sym = self.getSymbolPtr(sym_loc);
+ sym.* = .{
+ .n_strx = try self.strtab.insert(gpa, "___dso_handle"),
.n_type = macho.N_SECT | macho.N_EXT,
.n_sect = 0,
.n_desc = macho.N_WEAK_DEF,
.n_value = 0,
- });
- global.* = .{
- .sym_index = sym_index,
- .file = null,
};
- _ = self.unresolved.swapRemove(@intCast(u32, self.globals.getIndex("___dso_handle").?));
+ global.* = sym_loc;
+ _ = self.unresolved.swapRemove(self.getGlobalIndex("___dso_handle").?);
}
fn resolveGlobalSymbol(self: *MachO, current: SymbolWithLoc) !void {
@@ -2568,19 +2525,14 @@ fn resolveGlobalSymbol(self: *MachO, current: SymbolWithLoc) !void {
const sym = self.getSymbol(current);
const sym_name = self.getSymbolName(current);
- const name = try gpa.dupe(u8, sym_name);
- const global_index = @intCast(u32, self.globals.values().len);
- const gop = try self.globals.getOrPut(gpa, name);
- defer if (gop.found_existing) gpa.free(name);
-
+ const gop = try self.getOrPutGlobalPtr(sym_name);
if (!gop.found_existing) {
gop.value_ptr.* = current;
if (sym.undf() and !sym.tentative()) {
- try self.unresolved.putNoClobber(gpa, global_index, false);
+ try self.unresolved.putNoClobber(gpa, self.getGlobalIndex(sym_name).?, false);
}
return;
}
-
const global = gop.value_ptr.*;
const global_sym = self.getSymbol(global);
@@ -2619,7 +2571,7 @@ fn resolveGlobalSymbol(self: *MachO, current: SymbolWithLoc) !void {
}
if (sym.undf() and !sym.tentative()) return;
- _ = self.unresolved.swapRemove(@intCast(u32, self.globals.getIndex(name).?));
+ _ = self.unresolved.swapRemove(self.getGlobalIndex(sym_name).?);
gop.value_ptr.* = current;
}
@@ -2664,7 +2616,7 @@ fn resolveSymbolsInObject(self: *MachO, object_id: u16) !void {
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = object_id };
self.resolveGlobalSymbol(sym_loc) catch |err| switch (err) {
error.MultipleSymbolDefinitions => {
- const global = self.globals.get(sym_name).?;
+ const global = self.getGlobal(sym_name).?;
log.err("symbol '{s}' defined multiple times", .{sym_name});
if (global.file) |file| {
log.err(" first definition in '{s}'", .{self.objects.items[file].name});
@@ -2684,7 +2636,8 @@ fn resolveSymbolsInArchives(self: *MachO) !void {
const cpu_arch = self.base.options.target.cpu.arch;
var next_sym: usize = 0;
loop: while (next_sym < self.unresolved.count()) {
- const global = self.globals.values()[self.unresolved.keys()[next_sym]];
+ const global_index = self.unresolved.keys()[next_sym];
+ const global = self.globals.items[global_index];
const sym_name = self.getSymbolName(global);
for (self.archives.items) |archive| {
@@ -2710,10 +2663,11 @@ fn resolveSymbolsInArchives(self: *MachO) !void {
fn resolveSymbolsInDylibs(self: *MachO) !void {
if (self.dylibs.items.len == 0) return;
+ const gpa = self.base.allocator;
var next_sym: usize = 0;
loop: while (next_sym < self.unresolved.count()) {
const global_index = self.unresolved.keys()[next_sym];
- const global = self.globals.values()[global_index];
+ const global = self.globals.items[global_index];
const sym = self.getSymbolPtr(global);
const sym_name = self.getSymbolName(global);
@@ -2722,7 +2676,7 @@ fn resolveSymbolsInDylibs(self: *MachO) !void {
const dylib_id = @intCast(u16, id);
if (!self.referenced_dylibs.contains(dylib_id)) {
- try self.referenced_dylibs.putNoClobber(self.base.allocator, dylib_id, {});
+ try self.referenced_dylibs.putNoClobber(gpa, dylib_id, {});
}
const ordinal = self.referenced_dylibs.getIndex(dylib_id) orelse unreachable;
@@ -2760,7 +2714,7 @@ fn resolveSymbolsAtLoading(self: *MachO) !void {
var next_sym: usize = 0;
while (next_sym < self.unresolved.count()) {
const global_index = self.unresolved.keys()[next_sym];
- const global = self.globals.values()[global_index];
+ const global = self.globals.items[global_index];
const sym = self.getSymbolPtr(global);
const sym_name = self.getSymbolName(global);
@@ -2800,26 +2754,27 @@ fn resolveDyldStubBinder(self: *MachO) !void {
if (self.unresolved.count() == 0) return; // no need for a stub binder if we don't have any imports
const gpa = self.base.allocator;
- const n_strx = try self.strtab.insert(gpa, "dyld_stub_binder");
- const sym_index = @intCast(u32, self.locals.items.len);
- try self.locals.append(gpa, .{
- .n_strx = n_strx,
+ const sym_index = try self.allocateSymbol();
+ const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ const sym = self.getSymbolPtr(sym_loc);
+ const sym_name = "dyld_stub_binder";
+ sym.* = .{
+ .n_strx = try self.strtab.insert(gpa, sym_name),
.n_type = macho.N_UNDF,
.n_sect = 0,
.n_desc = 0,
.n_value = 0,
- });
- const sym_name = try gpa.dupe(u8, "dyld_stub_binder");
- const global = SymbolWithLoc{ .sym_index = sym_index, .file = null };
- try self.globals.putNoClobber(gpa, sym_name, global);
- const sym = &self.locals.items[sym_index];
+ };
+ const gop = try self.getOrPutGlobalPtr(sym_name);
+ gop.value_ptr.* = sym_loc;
+ const global = gop.value_ptr.*;
for (self.dylibs.items) |dylib, id| {
if (!dylib.symbols.contains(sym_name)) continue;
const dylib_id = @intCast(u16, id);
if (!self.referenced_dylibs.contains(dylib_id)) {
- try self.referenced_dylibs.putNoClobber(self.base.allocator, dylib_id, {});
+ try self.referenced_dylibs.putNoClobber(gpa, dylib_id, {});
}
const ordinal = self.referenced_dylibs.getIndex(dylib_id) orelse unreachable;
@@ -3050,14 +3005,20 @@ pub fn deinit(self: *MachO) void {
self.stubs_free_list.deinit(gpa);
self.stubs_table.deinit(gpa);
self.strtab.deinit(gpa);
+
self.locals.deinit(gpa);
+ self.globals.deinit(gpa);
self.locals_free_list.deinit(gpa);
+ self.globals_free_list.deinit(gpa);
self.unresolved.deinit(gpa);
- for (self.globals.keys()) |key| {
- gpa.free(key);
+ {
+ var it = self.resolver.keyIterator();
+ while (it.next()) |key_ptr| {
+ gpa.free(key_ptr.*);
+ }
+ self.resolver.deinit(gpa);
}
- self.globals.deinit(gpa);
for (self.objects.items) |*object| {
object.deinit(gpa);
@@ -3211,6 +3172,29 @@ fn allocateSymbol(self: *MachO) !u32 {
return index;
}
+fn allocateGlobal(self: *MachO) !u32 {
+ try self.globals.ensureUnusedCapacity(self.base.allocator, 1);
+
+ const index = blk: {
+ if (self.globals_free_list.popOrNull()) |index| {
+ log.debug(" (reusing global index {d})", .{index});
+ break :blk index;
+ } else {
+ log.debug(" (allocating symbol index {d})", .{self.globals.items.len});
+ const index = @intCast(u32, self.globals.items.len);
+ _ = self.globals.addOneAssumeCapacity();
+ break :blk index;
+ }
+ };
+
+ self.globals.items[index] = .{
+ .sym_index = 0,
+ .file = null,
+ };
+
+ return index;
+}
+
pub fn allocateGotEntry(self: *MachO, target: SymbolWithLoc) !u32 {
const gpa = self.base.allocator;
try self.got_entries.ensureUnusedCapacity(gpa, 1);
@@ -3832,7 +3816,7 @@ pub fn updateDeclExports(
self.resolveGlobalSymbol(sym_loc) catch |err| switch (err) {
error.MultipleSymbolDefinitions => {
- const global = self.globals.get(exp_name).?;
+ const global = self.getGlobal(exp_name).?;
if (sym_loc.sym_index != global.sym_index and global.file != null) {
_ = try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(
gpa,
@@ -3869,11 +3853,13 @@ pub fn deleteExport(self: *MachO, exp: Export) void {
};
self.locals_free_list.append(gpa, sym_index) catch {};
- if (self.globals.get(sym_name)) |global| blk: {
- if (global.sym_index != sym_index) break :blk;
- if (global.file != null) break :blk;
- const kv = self.globals.fetchSwapRemove(sym_name);
- gpa.free(kv.?.key);
+ if (self.resolver.fetchRemove(sym_name)) |entry| {
+ defer gpa.free(entry.key);
+ self.globals_free_list.append(gpa, entry.value) catch {};
+ self.globals.items[entry.value] = .{
+ .sym_index = 0,
+ .file = null,
+ };
}
}
@@ -4864,32 +4850,26 @@ pub fn addAtomToSection(self: *MachO, atom: *Atom, sect_id: u8) !void {
pub fn getGlobalSymbol(self: *MachO, name: []const u8) !u32 {
const gpa = self.base.allocator;
+
const sym_name = try std.fmt.allocPrint(gpa, "_{s}", .{name});
- const global_index = @intCast(u32, self.globals.values().len);
- const gop = try self.globals.getOrPut(gpa, sym_name);
- defer if (gop.found_existing) gpa.free(sym_name);
+ defer gpa.free(sym_name);
+ const gop = try self.getOrPutGlobalPtr(sym_name);
+ const global_index = self.getGlobalIndex(sym_name).?;
if (gop.found_existing) {
- // TODO audit this: can we ever reference anything from outside the Zig module?
- assert(gop.value_ptr.file == null);
- return gop.value_ptr.sym_index;
+ return global_index;
}
- const sym_index = @intCast(u32, self.locals.items.len);
- try self.locals.append(gpa, .{
- .n_strx = try self.strtab.insert(gpa, sym_name),
- .n_type = macho.N_UNDF,
- .n_sect = 0,
- .n_desc = 0,
- .n_value = 0,
- });
- gop.value_ptr.* = .{
- .sym_index = sym_index,
- .file = null,
- };
+ const sym_index = try self.allocateSymbol();
+ const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ gop.value_ptr.* = sym_loc;
+
+ const sym = self.getSymbolPtr(sym_loc);
+ sym.n_strx = try self.strtab.insert(gpa, sym_name);
+
try self.unresolved.putNoClobber(gpa, global_index, true);
- return sym_index;
+ return global_index;
}
fn getSegmentAllocBase(self: MachO, indices: []const ?u8) struct { vmaddr: u64, fileoff: u64 } {
@@ -5055,7 +5035,7 @@ fn writeDyldInfoData(self: *MachO, ncmds: *u32, lc_writer: anytype) !void {
if (self.base.options.output_mode == .Exe) {
for (&[_]SymbolWithLoc{
try self.getEntryPoint(),
- self.globals.get("__mh_execute_header").?,
+ self.getGlobal("__mh_execute_header").?,
}) |global| {
const sym = self.getSymbol(global);
const sym_name = self.getSymbolName(global);
@@ -5068,7 +5048,7 @@ fn writeDyldInfoData(self: *MachO, ncmds: *u32, lc_writer: anytype) !void {
}
} else {
assert(self.base.options.output_mode == .Lib);
- for (self.globals.values()) |global| {
+ for (self.globals.items) |global| {
const sym = self.getSymbol(global);
if (sym.undf()) continue;
@@ -5271,9 +5251,9 @@ fn writeFunctionStarts(self: *MachO, ncmds: *u32, lc_writer: anytype) !void {
// We need to sort by address first
var addresses = std.ArrayList(u64).init(gpa);
defer addresses.deinit();
- try addresses.ensureTotalCapacityPrecise(self.globals.count());
+ try addresses.ensureTotalCapacityPrecise(self.globals.items.len);
- for (self.globals.values()) |global| {
+ for (self.globals.items) |global| {
const sym = self.getSymbol(global);
if (sym.undf()) continue;
if (sym.n_desc == N_DESC_GCED) continue;
@@ -5453,7 +5433,7 @@ fn writeSymtab(self: *MachO, lc: *macho.symtab_command) !SymtabCtx {
if (sym.n_desc == N_DESC_GCED) continue; // GCed, skip
const sym_loc = SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null };
if (self.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip
- if (self.globals.contains(self.getSymbolName(sym_loc))) continue; // global symbol is either an export or import, skip
+ if (self.getGlobal(self.getSymbolName(sym_loc)) != null) continue; // global symbol is either an export or import, skip
try locals.append(sym);
}
@@ -5463,7 +5443,7 @@ fn writeSymtab(self: *MachO, lc: *macho.symtab_command) !SymtabCtx {
if (sym.n_desc == N_DESC_GCED) continue; // GCed, skip
const sym_loc = SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = @intCast(u32, object_id) };
if (self.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip
- if (self.globals.contains(self.getSymbolName(sym_loc))) continue; // global symbol is either an export or import, skip
+ if (self.getGlobal(self.getSymbolName(sym_loc)) != null) continue; // global symbol is either an export or import, skip
var out_sym = sym;
out_sym.n_strx = try self.strtab.insert(gpa, self.getSymbolName(sym_loc));
try locals.append(out_sym);
@@ -5477,7 +5457,7 @@ fn writeSymtab(self: *MachO, lc: *macho.symtab_command) !SymtabCtx {
var exports = std.ArrayList(macho.nlist_64).init(gpa);
defer exports.deinit();
- for (self.globals.values()) |global| {
+ for (self.globals.items) |global| {
const sym = self.getSymbol(global);
if (sym.undf()) continue; // import, skip
if (sym.n_desc == N_DESC_GCED) continue; // GCed, skip
@@ -5491,7 +5471,7 @@ fn writeSymtab(self: *MachO, lc: *macho.symtab_command) !SymtabCtx {
var imports_table = std.AutoHashMap(SymbolWithLoc, u32).init(gpa);
- for (self.globals.values()) |global| {
+ for (self.globals.items) |global| {
const sym = self.getSymbol(global);
if (sym.n_strx == 0) continue; // no name, skip
if (!sym.undf()) continue; // not an import, skip
@@ -5798,6 +5778,49 @@ pub fn getSymbolName(self: *MachO, sym_with_loc: SymbolWithLoc) []const u8 {
}
}
+/// Returns pointer to the global entry for `name` if one exists.
+pub fn getGlobalPtr(self: *MachO, name: []const u8) ?*SymbolWithLoc {
+ const global_index = self.resolver.get(name) orelse return null;
+ return &self.globals.items[global_index];
+}
+
+/// Returns the global entry for `name` if one exists.
+pub fn getGlobal(self: *const MachO, name: []const u8) ?SymbolWithLoc {
+ const global_index = self.resolver.get(name) orelse return null;
+ return self.globals.items[global_index];
+}
+
+/// Returns the index of the global entry for `name` if one exists.
+pub fn getGlobalIndex(self: *const MachO, name: []const u8) ?u32 {
+ return self.resolver.get(name);
+}
+
+/// Returns global entry at `index`.
+pub fn getGlobalByIndex(self: *const MachO, index: u32) SymbolWithLoc {
+ assert(index < self.globals.items.len);
+ return self.globals.items[index];
+}
+
+const GetOrPutGlobalPtrResult = struct {
+ found_existing: bool,
+ value_ptr: *SymbolWithLoc,
+};
+
+/// Return pointer to the global entry for `name` if one exists.
+/// Puts a new global entry for `name` if one doesn't exist, and
+/// returns a pointer to it.
+pub fn getOrPutGlobalPtr(self: *MachO, name: []const u8) !GetOrPutGlobalPtrResult {
+ if (self.getGlobalPtr(name)) |ptr| {
+ return GetOrPutGlobalPtrResult{ .found_existing = true, .value_ptr = ptr };
+ }
+ const gpa = self.base.allocator;
+ const global_index = try self.allocateGlobal();
+ const global_name = try gpa.dupe(u8, name);
+ _ = try self.resolver.put(gpa, global_name, global_index);
+ const ptr = &self.globals.items[global_index];
+ return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr };
+}
+
/// Returns atom if there is an atom referenced by the symbol described by `sym_with_loc` descriptor.
/// Returns null on failure.
pub fn getAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom {
@@ -5834,7 +5857,7 @@ pub fn getTlvPtrAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom
/// Asserts output mode is executable.
pub fn getEntryPoint(self: MachO) error{MissingMainEntrypoint}!SymbolWithLoc {
const entry_name = self.base.options.entry orelse "_main";
- const global = self.globals.get(entry_name) orelse {
+ const global = self.getGlobal(entry_name) orelse {
log.err("entrypoint '{s}' not found", .{entry_name});
return error.MissingMainEntrypoint;
};
@@ -6342,9 +6365,9 @@ fn logSymtab(self: *MachO) void {
}
log.debug("globals table:", .{});
- for (self.globals.keys()) |name, id| {
- const value = self.globals.values()[id];
- log.debug(" {s} => %{d} in object({?d})", .{ name, value.sym_index, value.file });
+ for (self.globals.items) |global| {
+ const name = self.getSymbolName(global);
+ log.debug(" {s} => %{d} in object({?d})", .{ name, global.sym_index, global.file });
}
log.debug("GOT entries:", .{});
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index dd818ea936..5b242a2013 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -272,7 +272,7 @@ pub fn parseRelocs(self: *Atom, relocs: []align(1) const macho.relocation_info,
subtractor = sym_loc;
} else {
const sym_name = context.macho_file.getSymbolName(sym_loc);
- subtractor = context.macho_file.globals.get(sym_name).?;
+ subtractor = context.macho_file.getGlobal(sym_name).?;
}
// Verify that *_SUBTRACTOR is followed by *_UNSIGNED.
if (relocs.len <= i + 1) {
@@ -339,7 +339,7 @@ pub fn parseRelocs(self: *Atom, relocs: []align(1) const macho.relocation_info,
break :target sym_loc;
} else {
const sym_name = context.macho_file.getSymbolName(sym_loc);
- break :target context.macho_file.globals.get(sym_name).?;
+ break :target context.macho_file.getGlobal(sym_name).?;
}
};
const offset = @intCast(u32, rel.r_address - context.base_offset);
@@ -579,7 +579,7 @@ pub fn resolveRelocs(self: *Atom, macho_file: *MachO) !void {
// If there is no atom for target, we still need to check for special, atom-less
// symbols such as `___dso_handle`.
const target_name = macho_file.getSymbolName(rel.target);
- assert(macho_file.globals.contains(target_name));
+ assert(macho_file.getGlobal(target_name) != null);
const atomless_sym = macho_file.getSymbol(rel.target);
log.debug(" | atomless target '{s}'", .{target_name});
break :blk atomless_sym.n_value;
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index ffff0fe5f8..a991ba8882 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -480,7 +480,7 @@ fn writeSymtab(self: *DebugSymbols, lc: *macho.symtab_command) !void {
if (sym.n_desc == MachO.N_DESC_GCED) continue; // GCed, skip
const sym_loc = MachO.SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null };
if (self.base.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip
- if (self.base.globals.contains(self.base.getSymbolName(sym_loc))) continue; // global symbol is either an export or import, skip
+ if (self.base.getGlobal(self.base.getSymbolName(sym_loc)) != null) continue; // global symbol is either an export or import, skip
var out_sym = sym;
out_sym.n_strx = try self.strtab.insert(gpa, self.base.getSymbolName(sym_loc));
try locals.append(out_sym);
@@ -489,7 +489,7 @@ fn writeSymtab(self: *DebugSymbols, lc: *macho.symtab_command) !void {
var exports = std.ArrayList(macho.nlist_64).init(gpa);
defer exports.deinit();
- for (self.base.globals.values()) |global| {
+ for (self.base.globals.items) |global| {
const sym = self.base.getSymbol(global);
if (sym.undf()) continue; // import, skip
if (sym.n_desc == MachO.N_DESC_GCED) continue; // GCed, skip
diff --git a/src/link/MachO/dead_strip.zig b/src/link/MachO/dead_strip.zig
index eb2be6e5fe..c8c4950730 100644
--- a/src/link/MachO/dead_strip.zig
+++ b/src/link/MachO/dead_strip.zig
@@ -62,7 +62,7 @@ fn collectRoots(roots: *std.AutoHashMap(*Atom, void), macho_file: *MachO) !void
else => |other| {
assert(other == .Lib);
// Add exports as GC roots
- for (macho_file.globals.values()) |global| {
+ for (macho_file.globals.items) |global| {
const sym = macho_file.getSymbol(global);
if (!sym.sect()) continue;
const atom = macho_file.getAtomForSymbol(global) orelse {
@@ -77,7 +77,7 @@ fn collectRoots(roots: *std.AutoHashMap(*Atom, void), macho_file: *MachO) !void
}
// TODO just a temp until we learn how to parse unwind records
- if (macho_file.globals.get("___gxx_personality_v0")) |global| {
+ if (macho_file.getGlobal("___gxx_personality_v0")) |global| {
if (macho_file.getAtomForSymbol(global)) |atom| {
_ = try roots.getOrPut(atom);
log.debug("adding root", .{});
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 014f6b1934..f969bf1c8b 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -5957,20 +5957,36 @@ fn zigifyEscapeSequences(ctx: *Context, m: *MacroCtx) ![]const u8 {
return bytes[0..i];
}
+/// non-ASCII characters (c > 127) are also treated as non-printable by fmtSliceEscapeLower.
+/// If a C string literal or char literal in a macro is not valid UTF-8, we need to escape
+/// non-ASCII characters so that the Zig source we output will itself be UTF-8.
+fn escapeUnprintables(ctx: *Context, m: *MacroCtx) ![]const u8 {
+ const zigified = try zigifyEscapeSequences(ctx, m);
+ if (std.unicode.utf8ValidateSlice(zigified)) return zigified;
+
+ const formatter = std.fmt.fmtSliceEscapeLower(zigified);
+ const encoded_size = @intCast(usize, std.fmt.count("{s}", .{formatter}));
+ var output = try ctx.arena.alloc(u8, encoded_size);
+ return std.fmt.bufPrint(output, "{s}", .{formatter}) catch |err| switch (err) {
+ error.NoSpaceLeft => unreachable,
+ else => |e| return e,
+ };
+}
+
fn parseCPrimaryExprInner(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
const tok = m.next().?;
const slice = m.slice();
switch (tok) {
.CharLiteral => {
if (slice[0] != '\'' or slice[1] == '\\' or slice.len == 3) {
- return Tag.char_literal.create(c.arena, try zigifyEscapeSequences(c, m));
+ return Tag.char_literal.create(c.arena, try escapeUnprintables(c, m));
} else {
const str = try std.fmt.allocPrint(c.arena, "0x{s}", .{std.fmt.fmtSliceHexLower(slice[1 .. slice.len - 1])});
return Tag.integer_literal.create(c.arena, str);
}
},
.StringLiteral => {
- return Tag.string_literal.create(c.arena, try zigifyEscapeSequences(c, m));
+ return Tag.string_literal.create(c.arena, try escapeUnprintables(c, m));
},
.IntegerLiteral, .FloatLiteral => {
return parseCNumLit(c, m);
diff --git a/src/type.zig b/src/type.zig
index c4f1782954..5de0611667 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -2042,6 +2042,9 @@ pub const Type = extern union {
try writer.writeAll("fn(");
for (fn_info.param_types) |param_ty, i| {
if (i != 0) try writer.writeAll(", ");
+ if (fn_info.paramIsComptime(i)) {
+ try writer.writeAll("comptime ");
+ }
if (std.math.cast(u5, i)) |index| if (@truncate(u1, fn_info.noalias_bits >> index) != 0) {
try writer.writeAll("noalias ");
};