aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/AstGen.zig1100
-rw-r--r--src/Compilation.zig214
-rw-r--r--src/DepTokenizer.zig5
-rw-r--r--src/Module.zig59
-rw-r--r--src/Sema.zig170
-rw-r--r--src/ThreadPool.zig4
-rw-r--r--src/Zir.zig18
-rw-r--r--src/air.zig46
-rw-r--r--src/clang.zig3
-rw-r--r--src/codegen.zig41
-rw-r--r--src/codegen/arm.zig2
-rw-r--r--src/codegen/c.zig20
-rw-r--r--src/codegen/llvm.zig4
-rw-r--r--src/codegen/spirv.zig4
-rw-r--r--src/codegen/wasm.zig13
-rw-r--r--src/codegen/x86_64.zig1
-rw-r--r--src/glibc.zig1
-rw-r--r--src/link.zig6
-rw-r--r--src/link/C.zig12
-rw-r--r--src/link/Coff.zig5
-rw-r--r--src/link/Elf.zig40
-rw-r--r--src/link/MachO.zig337
-rw-r--r--src/link/MachO/Archive.zig60
-rw-r--r--src/link/MachO/DebugSymbols.zig79
-rw-r--r--src/link/MachO/Dylib.zig429
-rw-r--r--src/link/MachO/Object.zig54
-rw-r--r--src/link/MachO/Symbol.zig23
-rw-r--r--src/link/MachO/Zld.zig1372
-rw-r--r--src/link/MachO/bind.zig1
-rw-r--r--src/link/MachO/commands.zig73
-rw-r--r--src/link/MachO/reloc/x86_64.zig1
-rw-r--r--src/link/SpirV.zig8
-rw-r--r--src/link/Wasm.zig13
-rw-r--r--src/link/tapi.zig86
-rw-r--r--src/link/tapi/Tokenizer.zig439
-rw-r--r--src/link/tapi/parse.zig708
-rw-r--r--src/link/tapi/parse/test.zig556
-rw-r--r--src/link/tapi/yaml.zig651
-rw-r--r--src/main.zig123
-rw-r--r--src/mingw.zig2
-rw-r--r--src/musl.zig1
-rw-r--r--src/print_env.zig1
-rw-r--r--src/print_targets.zig1
-rw-r--r--src/register_manager.zig14
-rw-r--r--src/stage1.zig2
-rw-r--r--src/stage1/analyze.cpp15
-rw-r--r--src/stage1/analyze.hpp5
-rw-r--r--src/stage1/astgen.cpp7
-rw-r--r--src/stage1/codegen.cpp10
-rw-r--r--src/stage1/ir.cpp78
-rw-r--r--src/test.zig3
-rw-r--r--src/tracy.zig4
-rw-r--r--src/translate_c.zig161
-rw-r--r--src/translate_c/ast.zig141
-rw-r--r--src/type.zig10
-rw-r--r--src/value.zig28
-rw-r--r--src/zig_clang.cpp6
-rw-r--r--src/zig_clang.h1
-rw-r--r--src/zig_llvm.cpp6
-rw-r--r--src/zig_llvm.h1
60 files changed, 5233 insertions, 2045 deletions
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 54500675df..bcfb5a45b7 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -17,6 +17,16 @@ tree: *const ast.Tree,
instructions: std.MultiArrayList(Zir.Inst) = .{},
extra: ArrayListUnmanaged(u32) = .{},
string_bytes: ArrayListUnmanaged(u8) = .{},
+/// Tracks the current byte offset within the source file.
+/// Used to populate line deltas in the ZIR. AstGen maintains
+/// this "cursor" throughout the entire AST lowering process in order
+/// to avoid starting over the line/column scan for every declaration, which
+/// would be O(N^2).
+source_offset: u32 = 0,
+/// Tracks the current line of `source_offset`.
+source_line: u32 = 0,
+/// Tracks the current column of `source_offset`.
+source_column: u32 = 0,
/// Used for temporary allocations; freed after AstGen is complete.
/// The resulting ZIR code has no references to anything in this arena.
arena: *Allocator,
@@ -206,7 +216,6 @@ pub const ResultLoc = union(enum) {
};
fn strategy(rl: ResultLoc, block_scope: *GenZir) Strategy {
- var elide_store_to_block_ptr_instructions = false;
switch (rl) {
// In this branch there will not be any store_to_block_ptr instructions.
.discard, .none, .none_or_ref, .ty, .ref => return .{
@@ -482,61 +491,61 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
.assign => {
try assign(gz, scope, node);
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
},
.assign_bit_shift_left => {
try assignShift(gz, scope, node, .shl);
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
},
.assign_bit_shift_right => {
try assignShift(gz, scope, node, .shr);
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
},
.assign_bit_and => {
try assignOp(gz, scope, node, .bit_and);
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
},
.assign_bit_or => {
try assignOp(gz, scope, node, .bit_or);
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
},
.assign_bit_xor => {
try assignOp(gz, scope, node, .xor);
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
},
.assign_div => {
try assignOp(gz, scope, node, .div);
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
},
.assign_sub => {
try assignOp(gz, scope, node, .sub);
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
},
.assign_sub_wrap => {
try assignOp(gz, scope, node, .subwrap);
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
},
.assign_mod => {
try assignOp(gz, scope, node, .mod_rem);
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
},
.assign_add => {
try assignOp(gz, scope, node, .add);
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
},
.assign_add_wrap => {
try assignOp(gz, scope, node, .addwrap);
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
},
.assign_mul => {
try assignOp(gz, scope, node, .mul);
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
},
.assign_mul_wrap => {
try assignOp(gz, scope, node, .mulwrap);
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
},
// zig fmt: off
@@ -551,7 +560,23 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
.mul_wrap => return simpleBinOp(gz, scope, rl, node, .mulwrap),
.div => return simpleBinOp(gz, scope, rl, node, .div),
.mod => return simpleBinOp(gz, scope, rl, node, .mod_rem),
- .bit_and => return simpleBinOp(gz, scope, rl, node, .bit_and),
+ .bit_and => {
+ const current_ampersand_token = main_tokens[node];
+ if (token_tags[current_ampersand_token + 1] == .ampersand) {
+ const token_starts = tree.tokens.items(.start);
+ const current_token_offset = token_starts[current_ampersand_token];
+ const next_token_offset = token_starts[current_ampersand_token + 1];
+ if (current_token_offset + 1 == next_token_offset) {
+ return astgen.failTok(
+ current_ampersand_token,
+ "`&&` is invalid; note that `and` is boolean AND",
+ .{},
+ );
+ }
+ }
+
+ return simpleBinOp(gz, scope, rl, node, .bit_and);
+ },
.bit_or => return simpleBinOp(gz, scope, rl, node, .bit_or),
.bit_xor => return simpleBinOp(gz, scope, rl, node, .xor),
@@ -582,10 +607,10 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
.asm_simple => return asmExpr(gz, scope, rl, node, tree.asmSimple(node)),
.@"asm" => return asmExpr(gz, scope, rl, node, tree.asmFull(node)),
- .string_literal => return stringLiteral(gz, scope, rl, node),
- .multiline_string_literal => return multilineStringLiteral(gz, scope, rl, node),
+ .string_literal => return stringLiteral(gz, rl, node),
+ .multiline_string_literal => return multilineStringLiteral(gz, rl, node),
- .integer_literal => return integerLiteral(gz, scope, rl, node),
+ .integer_literal => return integerLiteral(gz, rl, node),
// zig fmt: on
.builtin_call_two, .builtin_call_two_comma => {
@@ -625,7 +650,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
},
.@"return" => return ret(gz, scope, node),
.field_access => return fieldAccess(gz, scope, rl, node),
- .float_literal => return floatLiteral(gz, scope, rl, node),
+ .float_literal => return floatLiteral(gz, rl, node),
.if_simple => return ifExpr(gz, scope, rl, node, tree.ifSimple(node)),
.@"if" => return ifExpr(gz, scope, rl, node, tree.ifFull(node)),
@@ -644,7 +669,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
.lhs = lhs,
.start = start,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.slice => {
const lhs = try expr(gz, scope, .ref, node_datas[node].lhs);
@@ -656,7 +681,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
.start = start,
.end = end,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.slice_sentinel => {
const lhs = try expr(gz, scope, .ref, node_datas[node].lhs);
@@ -670,7 +695,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
.end = end,
.sentinel = sentinel,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.deref => {
@@ -679,22 +704,22 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
.ref, .none_or_ref => return lhs,
else => {
const result = try gz.addUnNode(.load, lhs, node);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
}
},
.address_of => {
const result = try expr(gz, scope, .ref, node_datas[node].lhs);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
- .undefined_literal => return rvalue(gz, scope, rl, .undef, node),
- .true_literal => return rvalue(gz, scope, rl, .bool_true, node),
- .false_literal => return rvalue(gz, scope, rl, .bool_false, node),
- .null_literal => return rvalue(gz, scope, rl, .null_value, node),
+ .undefined_literal => return rvalue(gz, rl, .undef, node),
+ .true_literal => return rvalue(gz, rl, .bool_true, node),
+ .false_literal => return rvalue(gz, rl, .bool_false, node),
+ .null_literal => return rvalue(gz, rl, .null_value, node),
.optional_type => {
const operand = try typeExpr(gz, scope, node_datas[node].lhs);
const result = try gz.addUnNode(.optional_type, operand, node);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.unwrap_optional => switch (rl) {
.ref => return gz.addUnNode(
@@ -702,7 +727,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
try expr(gz, scope, .ref, node_datas[node].lhs),
node,
),
- else => return rvalue(gz, scope, rl, try gz.addUnNode(
+ else => return rvalue(gz, rl, try gz.addUnNode(
.optional_payload_safe,
try expr(gz, scope, .none, node_datas[node].lhs),
node,
@@ -722,13 +747,13 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
const statements = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs];
return blockExpr(gz, scope, rl, node, statements);
},
- .enum_literal => return simpleStrTok(gz, scope, rl, main_tokens[node], node, .enum_literal),
- .error_value => return simpleStrTok(gz, scope, rl, node_datas[node].rhs, node, .error_value),
- .anyframe_literal => return rvalue(gz, scope, rl, .anyframe_type, node),
+ .enum_literal => return simpleStrTok(gz, rl, main_tokens[node], node, .enum_literal),
+ .error_value => return simpleStrTok(gz, rl, node_datas[node].rhs, node, .error_value),
+ .anyframe_literal => return rvalue(gz, rl, .anyframe_type, node),
.anyframe_type => {
const return_type = try typeExpr(gz, scope, node_datas[node].rhs);
const result = try gz.addUnNode(.anyframe_type, return_type, node);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.@"catch" => {
const catch_token = main_tokens[node];
@@ -822,14 +847,14 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
.grouped_expression => return expr(gz, scope, rl, node_datas[node].lhs),
.array_type => return arrayType(gz, scope, rl, node),
.array_type_sentinel => return arrayTypeSentinel(gz, scope, rl, node),
- .char_literal => return charLiteral(gz, scope, rl, node),
- .error_set_decl => return errorSetDecl(gz, scope, rl, node),
+ .char_literal => return charLiteral(gz, rl, node),
+ .error_set_decl => return errorSetDecl(gz, rl, node),
.array_access => return arrayAccess(gz, scope, rl, node),
.@"comptime" => return comptimeExprAst(gz, scope, rl, node),
.@"switch", .switch_comma => return switchExpr(gz, scope, rl, node),
.@"nosuspend" => return nosuspendExpr(gz, scope, rl, node),
- .@"suspend" => return suspendExpr(gz, scope, rl, node),
+ .@"suspend" => return suspendExpr(gz, scope, node),
.@"await" => return awaitExpr(gz, scope, rl, node),
.@"resume" => return resumeExpr(gz, scope, rl, node),
@@ -889,7 +914,6 @@ fn nosuspendExpr(
node: ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
- const gpa = astgen.gpa;
const tree = astgen.tree;
const node_datas = tree.nodes.items(.data);
const body_node = node_datas[node].lhs;
@@ -902,13 +926,12 @@ fn nosuspendExpr(
gz.nosuspend_node = node;
const result = try expr(gz, scope, rl, body_node);
gz.nosuspend_node = 0;
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn suspendExpr(
gz: *GenZir,
scope: *Scope,
- rl: ResultLoc,
node: ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
@@ -964,7 +987,7 @@ fn awaitExpr(
const operand = try expr(gz, scope, .none, rhs_node);
const tag: Zir.Inst.Tag = if (gz.nosuspend_node != 0) .await_nosuspend else .@"await";
const result = try gz.addUnNode(tag, operand, node);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn resumeExpr(
@@ -979,7 +1002,7 @@ fn resumeExpr(
const rhs_node = node_datas[node].lhs;
const operand = try expr(gz, scope, .none, rhs_node);
const result = try gz.addUnNode(.@"resume", operand, node);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn fnProtoExpr(
@@ -1085,7 +1108,7 @@ fn fnProtoExpr(
.is_test = false,
.is_extern = false,
});
- return rvalue(gz, scope, rl, result, fn_proto.ast.proto_node);
+ return rvalue(gz, rl, result, fn_proto.ast.proto_node);
}
fn arrayInitExpr(
@@ -1097,7 +1120,6 @@ fn arrayInitExpr(
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
- const gpa = astgen.gpa;
const node_tags = tree.nodes.items(.tag);
const main_tokens = tree.nodes.items(.main_token);
@@ -1157,32 +1179,32 @@ fn arrayInitExpr(
},
.ref => {
if (types.array != .none) {
- return arrayInitExprRlTy(gz, scope, rl, node, array_init.ast.elements, types.array, types.elem, .array_init_ref);
+ return arrayInitExprRlTy(gz, scope, node, array_init.ast.elements, types.elem, .array_init_ref);
} else {
- return arrayInitExprRlNone(gz, scope, rl, node, array_init.ast.elements, .array_init_anon_ref);
+ return arrayInitExprRlNone(gz, scope, node, array_init.ast.elements, .array_init_anon_ref);
}
},
.none, .none_or_ref => {
if (types.array != .none) {
- return arrayInitExprRlTy(gz, scope, rl, node, array_init.ast.elements, types.array, types.elem, .array_init);
+ return arrayInitExprRlTy(gz, scope, node, array_init.ast.elements, types.elem, .array_init);
} else {
- return arrayInitExprRlNone(gz, scope, rl, node, array_init.ast.elements, .array_init_anon);
+ return arrayInitExprRlNone(gz, scope, node, array_init.ast.elements, .array_init_anon);
}
},
.ty => |ty_inst| {
if (types.array != .none) {
- const result = try arrayInitExprRlTy(gz, scope, rl, node, array_init.ast.elements, types.array, types.elem, .array_init);
- return rvalue(gz, scope, rl, result, node);
+ const result = try arrayInitExprRlTy(gz, scope, node, array_init.ast.elements, types.elem, .array_init);
+ return rvalue(gz, rl, result, node);
} else {
const elem_type = try gz.addUnNode(.elem_type, ty_inst, node);
- return arrayInitExprRlTy(gz, scope, rl, node, array_init.ast.elements, ty_inst, elem_type, .array_init);
+ return arrayInitExprRlTy(gz, scope, node, array_init.ast.elements, elem_type, .array_init);
}
},
.ptr, .inferred_ptr => |ptr_inst| {
- return arrayInitExprRlPtr(gz, scope, rl, node, array_init.ast.elements, ptr_inst);
+ return arrayInitExprRlPtr(gz, scope, node, array_init.ast.elements, ptr_inst);
},
.block_ptr => |block_gz| {
- return arrayInitExprRlPtr(gz, scope, rl, node, array_init.ast.elements, block_gz.rl_ptr);
+ return arrayInitExprRlPtr(gz, scope, node, array_init.ast.elements, block_gz.rl_ptr);
},
}
}
@@ -1190,7 +1212,6 @@ fn arrayInitExpr(
fn arrayInitExprRlNone(
gz: *GenZir,
scope: *Scope,
- rl: ResultLoc,
node: ast.Node.Index,
elements: []const ast.Node.Index,
tag: Zir.Inst.Tag,
@@ -1213,10 +1234,8 @@ fn arrayInitExprRlNone(
fn arrayInitExprRlTy(
gz: *GenZir,
scope: *Scope,
- rl: ResultLoc,
node: ast.Node.Index,
elements: []const ast.Node.Index,
- array_ty_inst: Zir.Inst.Ref,
elem_ty_inst: Zir.Inst.Ref,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
@@ -1241,7 +1260,6 @@ fn arrayInitExprRlTy(
fn arrayInitExprRlPtr(
gz: *GenZir,
scope: *Scope,
- rl: ResultLoc,
node: ast.Node.Index,
elements: []const ast.Node.Index,
result_ptr: Zir.Inst.Ref,
@@ -1277,11 +1295,10 @@ fn structInitExpr(
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
- const gpa = astgen.gpa;
if (struct_init.ast.fields.len == 0) {
if (struct_init.ast.type_expr == 0) {
- return rvalue(gz, scope, rl, .empty_struct, node);
+ return rvalue(gz, rl, .empty_struct, node);
}
array: {
const node_tags = tree.nodes.items(.tag);
@@ -1303,15 +1320,17 @@ fn structInitExpr(
break :blk try gz.addArrayTypeSentinel(.zero_usize, elem_type, sentinel);
};
const result = try gz.addUnNode(.struct_init_empty, array_type_inst, node);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
}
const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr);
const result = try gz.addUnNode(.struct_init_empty, ty_inst, node);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
switch (rl) {
.discard => {
+ if (struct_init.ast.type_expr != 0)
+ _ = try typeExpr(gz, scope, struct_init.ast.type_expr);
for (struct_init.ast.fields) |field_init| {
_ = try expr(gz, scope, .discard, field_init);
}
@@ -1320,36 +1339,35 @@ fn structInitExpr(
.ref => {
if (struct_init.ast.type_expr != 0) {
const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr);
- return structInitExprRlTy(gz, scope, rl, node, struct_init, ty_inst, .struct_init_ref);
+ return structInitExprRlTy(gz, scope, node, struct_init, ty_inst, .struct_init_ref);
} else {
- return structInitExprRlNone(gz, scope, rl, node, struct_init, .struct_init_anon_ref);
+ return structInitExprRlNone(gz, scope, node, struct_init, .struct_init_anon_ref);
}
},
.none, .none_or_ref => {
if (struct_init.ast.type_expr != 0) {
const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr);
- return structInitExprRlTy(gz, scope, rl, node, struct_init, ty_inst, .struct_init);
+ return structInitExprRlTy(gz, scope, node, struct_init, ty_inst, .struct_init);
} else {
- return structInitExprRlNone(gz, scope, rl, node, struct_init, .struct_init_anon);
+ return structInitExprRlNone(gz, scope, node, struct_init, .struct_init_anon);
}
},
.ty => |ty_inst| {
if (struct_init.ast.type_expr == 0) {
- return structInitExprRlTy(gz, scope, rl, node, struct_init, ty_inst, .struct_init);
+ return structInitExprRlTy(gz, scope, node, struct_init, ty_inst, .struct_init);
}
const inner_ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr);
- const result = try structInitExprRlTy(gz, scope, rl, node, struct_init, inner_ty_inst, .struct_init);
- return rvalue(gz, scope, rl, result, node);
+ const result = try structInitExprRlTy(gz, scope, node, struct_init, inner_ty_inst, .struct_init);
+ return rvalue(gz, rl, result, node);
},
- .ptr, .inferred_ptr => |ptr_inst| return structInitExprRlPtr(gz, scope, rl, node, struct_init, ptr_inst),
- .block_ptr => |block_gz| return structInitExprRlPtr(gz, scope, rl, node, struct_init, block_gz.rl_ptr),
+ .ptr, .inferred_ptr => |ptr_inst| return structInitExprRlPtr(gz, scope, node, struct_init, ptr_inst),
+ .block_ptr => |block_gz| return structInitExprRlPtr(gz, scope, node, struct_init, block_gz.rl_ptr),
}
}
fn structInitExprRlNone(
gz: *GenZir,
scope: *Scope,
- rl: ResultLoc,
node: ast.Node.Index,
struct_init: ast.full.StructInit,
tag: Zir.Inst.Tag,
@@ -1384,7 +1402,6 @@ fn structInitExprRlNone(
fn structInitExprRlPtr(
gz: *GenZir,
scope: *Scope,
- rl: ResultLoc,
node: ast.Node.Index,
struct_init: ast.full.StructInit,
result_ptr: Zir.Inst.Ref,
@@ -1396,6 +1413,9 @@ fn structInitExprRlPtr(
const field_ptr_list = try gpa.alloc(Zir.Inst.Index, struct_init.ast.fields.len);
defer gpa.free(field_ptr_list);
+ if (struct_init.ast.type_expr != 0)
+ _ = try typeExpr(gz, scope, struct_init.ast.type_expr);
+
for (struct_init.ast.fields) |field_init, i| {
const name_token = tree.firstToken(field_init) - 2;
const str_index = try astgen.identAsString(name_token);
@@ -1416,7 +1436,6 @@ fn structInitExprRlPtr(
fn structInitExprRlTy(
gz: *GenZir,
scope: *Scope,
- rl: ResultLoc,
node: ast.Node.Index,
struct_init: ast.full.StructInit,
ty_inst: Zir.Inst.Ref,
@@ -1641,8 +1660,8 @@ fn blockExpr(
return labeledBlockExpr(gz, scope, rl, block_node, statements, .block);
}
- try blockExprStmts(gz, scope, block_node, statements);
- return rvalue(gz, scope, rl, .void_value, block_node);
+ try blockExprStmts(gz, scope, statements);
+ return rvalue(gz, rl, .void_value, block_node);
}
fn checkLabelRedefinition(astgen: *AstGen, parent_scope: *Scope, label: ast.TokenIndex) !void {
@@ -1654,9 +1673,6 @@ fn checkLabelRedefinition(astgen: *AstGen, parent_scope: *Scope, label: ast.Toke
const gen_zir = scope.cast(GenZir).?;
if (gen_zir.label) |prev_label| {
if (try astgen.tokenIdentEql(label, prev_label.token)) {
- const tree = astgen.tree;
- const main_tokens = tree.nodes.items(.main_token);
-
const label_name = try astgen.identifierTokenString(label);
return astgen.failTokNotes(label, "redefinition of label '{s}'", .{
label_name,
@@ -1719,7 +1735,7 @@ fn labeledBlockExpr(
defer block_scope.labeled_breaks.deinit(astgen.gpa);
defer block_scope.labeled_store_to_block_ptr_list.deinit(astgen.gpa);
- try blockExprStmts(&block_scope, &block_scope.base, block_node, statements);
+ try blockExprStmts(&block_scope, &block_scope.base, statements);
if (!block_scope.label.?.used) {
return astgen.failTok(label_token, "unused block label", .{});
@@ -1755,21 +1771,15 @@ fn labeledBlockExpr(
const block_ref = gz.indexToRef(block_inst);
switch (rl) {
.ref => return block_ref,
- else => return rvalue(gz, parent_scope, rl, block_ref, block_node),
+ else => return rvalue(gz, rl, block_ref, block_node),
}
},
}
}
-fn blockExprStmts(
- gz: *GenZir,
- parent_scope: *Scope,
- node: ast.Node.Index,
- statements: []const ast.Node.Index,
-) !void {
+fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const ast.Node.Index) !void {
const astgen = gz.astgen;
const tree = astgen.tree;
- const main_tokens = tree.nodes.items(.main_token);
const node_tags = tree.nodes.items(.tag);
var block_arena = std.heap.ArenaAllocator.init(gz.astgen.gpa);
@@ -1784,8 +1794,8 @@ fn blockExprStmts(
.simple_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.simpleVarDecl(statement)),
.aligned_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.alignedVarDecl(statement)),
- .@"defer" => scope = try deferStmt(gz, scope, statement, &block_arena.allocator, .defer_normal),
- .@"errdefer" => scope = try deferStmt(gz, scope, statement, &block_arena.allocator, .defer_error),
+ .@"defer" => scope = try makeDeferScope(scope, statement, &block_arena.allocator, .defer_normal),
+ .@"errdefer" => scope = try makeDeferScope(scope, statement, &block_arena.allocator, .defer_error),
.assign => try assign(gz, scope, statement),
@@ -1810,6 +1820,7 @@ fn blockExprStmts(
}
try genDefers(gz, parent_scope, scope, .none);
+ try checkUsed(gz, parent_scope, scope);
}
fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: ast.Node.Index) InnerError!void {
@@ -2103,6 +2114,7 @@ fn genDefers(
inner_scope: *Scope,
err_code: Zir.Inst.Ref,
) InnerError!void {
+ _ = err_code;
const astgen = gz.astgen;
const tree = astgen.tree;
const node_datas = tree.nodes.items(.data);
@@ -2138,8 +2150,49 @@ fn genDefers(
}
}
-fn deferStmt(
+fn checkUsed(
gz: *GenZir,
+ outer_scope: *Scope,
+ inner_scope: *Scope,
+) InnerError!void {
+ const astgen = gz.astgen;
+
+ var scope = inner_scope;
+ while (scope != outer_scope) {
+ switch (scope.tag) {
+ .gen_zir => scope = scope.cast(GenZir).?.parent,
+ .local_val => {
+ const s = scope.cast(Scope.LocalVal).?;
+ switch (s.used) {
+ .used => {},
+ .fn_param => return astgen.failTok(s.token_src, "unused function parameter", .{}),
+ .constant => return astgen.failTok(s.token_src, "unused local constant", .{}),
+ .variable => unreachable,
+ .loop_index => unreachable,
+ .capture => return astgen.failTok(s.token_src, "unused capture", .{}),
+ }
+ scope = s.parent;
+ },
+ .local_ptr => {
+ const s = scope.cast(Scope.LocalPtr).?;
+ switch (s.used) {
+ .used => {},
+ .fn_param => unreachable,
+ .constant => return astgen.failTok(s.token_src, "unused local constant", .{}),
+ .variable => return astgen.failTok(s.token_src, "unused local variable", .{}),
+ .loop_index => return astgen.failTok(s.token_src, "unused loop index capture", .{}),
+ .capture => unreachable,
+ }
+ scope = s.parent;
+ },
+ .defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent,
+ .namespace => unreachable,
+ .top => unreachable,
+ }
+ }
+}
+
+fn makeDeferScope(
scope: *Scope,
node: ast.Node.Index,
block_arena: *Allocator,
@@ -2258,6 +2311,7 @@ fn varDecl(
.name = ident_name,
.inst = init_inst,
.token_src = name_token,
+ .used = .constant,
};
return &sub_scope.base;
}
@@ -2325,6 +2379,7 @@ fn varDecl(
.name = ident_name,
.inst = init_inst,
.token_src = name_token,
+ .used = .constant,
};
return &sub_scope.base;
}
@@ -2353,7 +2408,8 @@ fn varDecl(
.name = ident_name,
.ptr = init_scope.rl_ptr,
.token_src = name_token,
- .is_comptime = true,
+ .maybe_comptime = true,
+ .used = .constant,
};
return &sub_scope.base;
},
@@ -2398,7 +2454,7 @@ fn varDecl(
resolve_inferred_alloc = alloc;
break :a .{ .alloc = alloc, .result_loc = .{ .inferred_ptr = alloc } };
};
- const init_inst = try expr(gz, scope, var_data.result_loc, var_decl.ast.init_node);
+ _ = try expr(gz, scope, var_data.result_loc, var_decl.ast.init_node);
if (resolve_inferred_alloc != .none) {
_ = try gz.addUnNode(.resolve_inferred_alloc, resolve_inferred_alloc, node);
}
@@ -2409,7 +2465,8 @@ fn varDecl(
.name = ident_name,
.ptr = var_data.alloc,
.token_src = name_token,
- .is_comptime = is_comptime,
+ .maybe_comptime = is_comptime,
+ .used = .variable,
};
return &sub_scope.base;
},
@@ -2425,16 +2482,18 @@ fn emitDbgNode(gz: *GenZir, node: ast.Node.Index) !void {
const astgen = gz.astgen;
const tree = astgen.tree;
- const node_tags = tree.nodes.items(.tag);
+ const source = tree.source;
const token_starts = tree.tokens.items(.start);
- const decl_start = token_starts[tree.firstToken(gz.decl_node_index)];
const node_start = token_starts[tree.firstToken(node)];
- const source = tree.source[decl_start..node_start];
- const loc = std.zig.findLineColumn(source, source.len);
+
+ astgen.advanceSourceCursor(source, node_start);
+ const line = @intCast(u32, astgen.source_line);
+ const column = @intCast(u32, astgen.source_column);
+
_ = try gz.add(.{ .tag = .dbg_stmt, .data = .{
.dbg_stmt = .{
- .line = @intCast(u32, loc.line),
- .column = @intCast(u32, loc.column),
+ .line = line,
+ .column = column,
},
} });
}
@@ -2514,7 +2573,7 @@ fn boolNot(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) Inne
const operand = try expr(gz, scope, bool_rl, node_datas[node].lhs);
const result = try gz.addUnNode(.bool_not, operand, node);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn bitNot(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerError!Zir.Inst.Ref {
@@ -2524,7 +2583,7 @@ fn bitNot(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) Inner
const operand = try expr(gz, scope, .none, node_datas[node].lhs);
const result = try gz.addUnNode(.bit_not, operand, node);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn negation(
@@ -2540,7 +2599,7 @@ fn negation(
const operand = try expr(gz, scope, .none, node_datas[node].lhs);
const result = try gz.addUnNode(tag, operand, node);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn ptrType(
@@ -2550,9 +2609,6 @@ fn ptrType(
node: ast.Node.Index,
ptr_info: ast.full.PtrType,
) InnerError!Zir.Inst.Ref {
- const astgen = gz.astgen;
- const tree = astgen.tree;
-
const elem_type = try typeExpr(gz, scope, ptr_info.ast.child_type);
const simple = ptr_info.ast.align_node == 0 and
@@ -2569,7 +2625,7 @@ fn ptrType(
.elem_type = elem_type,
},
} });
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
var sentinel_ref: Zir.Inst.Ref = .none;
@@ -2629,7 +2685,7 @@ fn ptrType(
} });
gz.instructions.appendAssumeCapacity(new_index);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn arrayType(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) !Zir.Inst.Ref {
@@ -2649,7 +2705,7 @@ fn arrayType(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) !Z
const elem_type = try typeExpr(gz, scope, node_datas[node].rhs);
const result = try gz.addBin(.array_type, len, elem_type);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn arrayTypeSentinel(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) !Zir.Inst.Ref {
@@ -2671,7 +2727,7 @@ fn arrayTypeSentinel(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.I
const sentinel = try expr(gz, scope, .{ .ty = elem_type }, extra.sentinel);
const result = try gz.addArrayTypeSentinel(len, elem_type, sentinel);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
const WipDecls = struct {
@@ -2902,6 +2958,10 @@ fn fnDecl(
const name_token = param.name_token orelse {
return astgen.failNode(param.type_expr, "missing parameter name", .{});
};
+ if (param.type_expr != 0)
+ _ = try typeExpr(&fn_gz, params_scope, param.type_expr);
+ if (mem.eql(u8, "_", tree.tokenSlice(name_token)))
+ continue;
const param_name = try astgen.identAsString(name_token);
// Create an arg instruction. This is needed to emit a semantic analysis
// error for shadowing decls.
@@ -2914,16 +2974,19 @@ fn fnDecl(
.name = param_name,
.inst = arg_inst,
.token_src = name_token,
+ .used = .fn_param,
};
params_scope = &sub_scope.base;
// Additionally put the param name into `string_bytes` and reference it with
// `extra` so that we have access to the data in codegen, for debug info.
const str_index = try astgen.identAsString(name_token);
- astgen.extra.appendAssumeCapacity(str_index);
+ try astgen.extra.append(astgen.gpa, str_index);
}
+ _ = try typeExpr(&fn_gz, params_scope, fn_proto.ast.return_type);
_ = try expr(&fn_gz, params_scope, .none, body_node);
+ try checkUsed(gz, &fn_gz.base, params_scope);
}
const need_implicit_ret = blk: {
@@ -3508,6 +3571,10 @@ fn structDeclInner(
const field_name = try astgen.identAsString(member.ast.name_token);
fields_data.appendAssumeCapacity(field_name);
+ if (member.ast.type_expr == 0) {
+ return astgen.failTok(member.ast.name_token, "struct field missing type", .{});
+ }
+
const field_type: Zir.Inst.Ref = if (node_tags[member.ast.type_expr] == .@"anytype")
.none
else
@@ -3616,7 +3683,7 @@ fn unionDeclInner(
};
defer block_scope.instructions.deinit(gpa);
- var namespace: Scope.Namespace = .{ .parent = &gz.base };
+ var namespace: Scope.Namespace = .{ .parent = scope };
defer namespace.decls.deinit(gpa);
var wip_decls: WipDecls = .{};
@@ -3900,7 +3967,7 @@ fn containerDecl(
assert(arg_inst == .none);
const result = try structDeclInner(gz, scope, node, container_decl, layout);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.keyword_union => {
const layout = if (container_decl.layout_token) |t| switch (token_tags[t]) {
@@ -3912,7 +3979,7 @@ fn containerDecl(
const have_auto_enum = container_decl.ast.enum_token != null;
const result = try unionDeclInner(gz, scope, node, container_decl.ast.members, layout, arg_inst, have_auto_enum);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.keyword_enum => {
if (container_decl.layout_token) |t| {
@@ -4017,7 +4084,7 @@ fn containerDecl(
};
defer block_scope.instructions.deinit(gpa);
- var namespace: Scope.Namespace = .{ .parent = &gz.base };
+ var namespace: Scope.Namespace = .{ .parent = scope };
defer namespace.decls.deinit(gpa);
var wip_decls: WipDecls = .{};
@@ -4241,20 +4308,18 @@ fn containerDecl(
astgen.extra.appendAssumeCapacity(cur_bit_bag);
astgen.extra.appendSliceAssumeCapacity(fields_data.items);
- return rvalue(gz, scope, rl, gz.indexToRef(decl_inst), node);
+ return rvalue(gz, rl, gz.indexToRef(decl_inst), node);
},
.keyword_opaque => {
- var namespace: Scope.Namespace = .{ .parent = &gz.base };
+ var namespace: Scope.Namespace = .{ .parent = scope };
defer namespace.decls.deinit(gpa);
var wip_decls: WipDecls = .{};
defer wip_decls.deinit(gpa);
for (container_decl.ast.members) |member_node| {
- const member = switch (node_tags[member_node]) {
- .container_field_init => tree.containerFieldInit(member_node),
- .container_field_align => tree.containerFieldAlign(member_node),
- .container_field => tree.containerField(member_node),
+ switch (node_tags[member_node]) {
+ .container_field_init, .container_field_align, .container_field => {},
.fn_decl => {
const fn_proto = node_datas[member_node].lhs;
@@ -4375,7 +4440,7 @@ fn containerDecl(
continue;
},
else => unreachable,
- };
+ }
}
{
const empty_slot_count = WipDecls.fields_per_u32 - (wip_decls.decl_index % WipDecls.fields_per_u32);
@@ -4404,18 +4469,13 @@ fn containerDecl(
}
astgen.extra.appendSliceAssumeCapacity(wip_decls.payload.items);
- return rvalue(gz, scope, rl, gz.indexToRef(decl_inst), node);
+ return rvalue(gz, rl, gz.indexToRef(decl_inst), node);
},
else => unreachable,
}
}
-fn errorSetDecl(
- gz: *GenZir,
- scope: *Scope,
- rl: ResultLoc,
- node: ast.Node.Index,
-) InnerError!Zir.Inst.Ref {
+fn errorSetDecl(gz: *GenZir, rl: ResultLoc, node: ast.Node.Index) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const gpa = astgen.gpa;
const tree = astgen.tree;
@@ -4443,16 +4503,11 @@ fn errorSetDecl(
}
}
- const tag: Zir.Inst.Tag = switch (gz.anon_name_strategy) {
- .parent => .error_set_decl,
- .anon => .error_set_decl_anon,
- .func => .error_set_decl_func,
- };
const result = try gz.addPlNode(.error_set_decl, node, Zir.Inst.ErrorSetDecl{
.fields_len = @intCast(u32, field_names.items.len),
});
try astgen.extra.appendSlice(gpa, field_names.items);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn tryExpr(
@@ -4463,7 +4518,6 @@ fn tryExpr(
operand_node: ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = parent_gz.astgen;
- const tree = astgen.tree;
const fn_block = astgen.fn_block orelse {
return astgen.failNode(node, "invalid 'try' outside function scope", .{});
@@ -4512,12 +4566,11 @@ fn tryExpr(
const unwrapped_payload = try else_scope.addUnNode(err_ops[2], operand, node);
const else_result = switch (rl) {
.ref => unwrapped_payload,
- else => try rvalue(&else_scope, &else_scope.base, block_scope.break_result_loc, unwrapped_payload, node),
+ else => try rvalue(&else_scope, block_scope.break_result_loc, unwrapped_payload, node),
};
return finishThenElseBlock(
parent_gz,
- scope,
rl,
node,
&block_scope,
@@ -4525,8 +4578,6 @@ fn tryExpr(
&else_scope,
condbr,
cond,
- node,
- node,
then_result,
else_result,
block,
@@ -4587,12 +4638,15 @@ fn orelseCatchExpr(
.name = err_name,
.inst = try then_scope.addUnNode(unwrap_code_op, operand, node),
.token_src = payload,
+ .used = .capture,
};
break :blk &err_val_scope.base;
};
block_scope.break_count += 1;
const then_result = try expr(&then_scope, then_sub_scope, block_scope.break_result_loc, rhs);
+ try checkUsed(parent_gz, &then_scope.base, then_sub_scope);
+
// We hold off on the break instructions as well as copying the then/else
// instructions into place until we know whether to keep store_to_block_ptr
// instructions or not.
@@ -4604,12 +4658,11 @@ fn orelseCatchExpr(
const unwrapped_payload = try else_scope.addUnNode(unwrap_op, operand, node);
const else_result = switch (rl) {
.ref => unwrapped_payload,
- else => try rvalue(&else_scope, &else_scope.base, block_scope.break_result_loc, unwrapped_payload, node),
+ else => try rvalue(&else_scope, block_scope.break_result_loc, unwrapped_payload, node),
};
return finishThenElseBlock(
parent_gz,
- scope,
rl,
node,
&block_scope,
@@ -4617,8 +4670,6 @@ fn orelseCatchExpr(
&else_scope,
condbr,
cond,
- node,
- node,
then_result,
else_result,
block,
@@ -4629,7 +4680,6 @@ fn orelseCatchExpr(
fn finishThenElseBlock(
parent_gz: *GenZir,
- parent_scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
block_scope: *GenZir,
@@ -4637,8 +4687,6 @@ fn finishThenElseBlock(
else_scope: *GenZir,
condbr: Zir.Inst.Index,
cond: Zir.Inst.Ref,
- then_src: ast.Node.Index,
- else_src: ast.Node.Index,
then_result: Zir.Inst.Ref,
else_result: Zir.Inst.Ref,
main_block: Zir.Inst.Index,
@@ -4648,7 +4696,6 @@ fn finishThenElseBlock(
// We now have enough information to decide whether the result instruction should
// be communicated via result location pointer or break instructions.
const strat = rl.strategy(block_scope);
- const astgen = block_scope.astgen;
switch (strat.tag) {
.break_void => {
if (!parent_gz.refIsNoReturn(then_result)) {
@@ -4681,7 +4728,7 @@ fn finishThenElseBlock(
const block_ref = parent_gz.indexToRef(main_block);
switch (rl) {
.ref => return block_ref,
- else => return rvalue(parent_gz, parent_scope, rl, block_ref, node),
+ else => return rvalue(parent_gz, rl, block_ref, node),
}
},
}
@@ -4717,7 +4764,7 @@ fn fieldAccess(
.lhs = try expr(gz, scope, .ref, object_node),
.field_name_start = str_index,
}),
- else => return rvalue(gz, scope, rl, try gz.addPlNode(.field_val, node, Zir.Inst.Field{
+ else => return rvalue(gz, rl, try gz.addPlNode(.field_val, node, Zir.Inst.Field{
.lhs = try expr(gz, scope, .none_or_ref, object_node),
.field_name_start = str_index,
}), node),
@@ -4732,7 +4779,6 @@ fn arrayAccess(
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
- const main_tokens = tree.nodes.items(.main_token);
const node_datas = tree.nodes.items(.data);
switch (rl) {
.ref => return gz.addBin(
@@ -4740,7 +4786,7 @@ fn arrayAccess(
try expr(gz, scope, .ref, node_datas[node].lhs),
try expr(gz, scope, .{ .ty = .usize_type }, node_datas[node].rhs),
),
- else => return rvalue(gz, scope, rl, try gz.addBin(
+ else => return rvalue(gz, rl, try gz.addBin(
.elem_val,
try expr(gz, scope, .none_or_ref, node_datas[node].lhs),
try expr(gz, scope, .{ .ty = .usize_type }, node_datas[node].rhs),
@@ -4763,12 +4809,11 @@ fn simpleBinOp(
.lhs = try expr(gz, scope, .none, node_datas[node].lhs),
.rhs = try expr(gz, scope, .none, node_datas[node].rhs),
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn simpleStrTok(
gz: *GenZir,
- scope: *Scope,
rl: ResultLoc,
ident_token: ast.TokenIndex,
node: ast.Node.Index,
@@ -4777,7 +4822,7 @@ fn simpleStrTok(
const astgen = gz.astgen;
const str_index = try astgen.identAsString(ident_token);
const result = try gz.addStrTok(op_inst_tag, str_index, ident_token);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn boolBinOp(
@@ -4803,7 +4848,7 @@ fn boolBinOp(
try rhs_scope.setBoolBrBody(bool_br);
const block_ref = gz.indexToRef(bool_br);
- return rvalue(gz, scope, rl, block_ref, node);
+ return rvalue(gz, rl, block_ref, node);
}
fn ifExpr(
@@ -4830,7 +4875,7 @@ fn ifExpr(
inst: Zir.Inst.Ref,
bool_bit: Zir.Inst.Ref,
} = c: {
- if (if_full.error_token) |error_token| {
+ if (if_full.error_token) |_| {
const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none;
const err_union = try expr(&block_scope, &block_scope.base, cond_rl, if_full.ast.cond_expr);
const tag: Zir.Inst.Tag = if (payload_is_ref) .is_err_ptr else .is_err;
@@ -4838,7 +4883,7 @@ fn ifExpr(
.inst = err_union,
.bool_bit = try block_scope.addUnNode(tag, err_union, node),
};
- } else if (if_full.payload_token) |payload_token| {
+ } else if (if_full.payload_token) |_| {
const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none;
const optional = try expr(&block_scope, &block_scope.base, cond_rl, if_full.ast.cond_expr);
const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_null_ptr else .is_non_null;
@@ -4867,27 +4912,38 @@ fn ifExpr(
var payload_val_scope: Scope.LocalVal = undefined;
const then_sub_scope = s: {
- if (if_full.error_token) |error_token| {
- const tag: Zir.Inst.Tag = if (payload_is_ref)
- .err_union_payload_unsafe_ptr
- else
- .err_union_payload_unsafe;
- const payload_inst = try then_scope.addUnNode(tag, cond.inst, node);
- const ident_name = try astgen.identAsString(error_token);
- payload_val_scope = .{
- .parent = &then_scope.base,
- .gen_zir = &then_scope,
- .name = ident_name,
- .inst = payload_inst,
- .token_src = error_token,
- };
- break :s &payload_val_scope.base;
+ if (if_full.error_token != null) {
+ if (if_full.payload_token) |payload_token| {
+ const tag: Zir.Inst.Tag = if (payload_is_ref)
+ .err_union_payload_unsafe_ptr
+ else
+ .err_union_payload_unsafe;
+ const payload_inst = try then_scope.addUnNode(tag, cond.inst, node);
+ const token_name_index = payload_token + @boolToInt(payload_is_ref);
+ const ident_name = try astgen.identAsString(token_name_index);
+ const token_name_str = tree.tokenSlice(token_name_index);
+ if (mem.eql(u8, "_", token_name_str))
+ break :s &then_scope.base;
+ payload_val_scope = .{
+ .parent = &then_scope.base,
+ .gen_zir = &then_scope,
+ .name = ident_name,
+ .inst = payload_inst,
+ .token_src = payload_token,
+ .used = .capture,
+ };
+ break :s &payload_val_scope.base;
+ } else {
+ break :s &then_scope.base;
+ }
} else if (if_full.payload_token) |payload_token| {
const ident_token = if (payload_is_ref) payload_token + 1 else payload_token;
const tag: Zir.Inst.Tag = if (payload_is_ref)
.optional_payload_unsafe_ptr
else
.optional_payload_unsafe;
+ if (mem.eql(u8, "_", tree.tokenSlice(ident_token)))
+ break :s &then_scope.base;
const payload_inst = try then_scope.addUnNode(tag, cond.inst, node);
const ident_name = try astgen.identAsString(ident_token);
payload_val_scope = .{
@@ -4896,6 +4952,7 @@ fn ifExpr(
.name = ident_name,
.inst = payload_inst,
.token_src = ident_token,
+ .used = .capture,
};
break :s &payload_val_scope.base;
} else {
@@ -4905,6 +4962,7 @@ fn ifExpr(
block_scope.break_count += 1;
const then_result = try expr(&then_scope, then_sub_scope, block_scope.break_result_loc, if_full.ast.then_expr);
+ try checkUsed(parent_gz, &then_scope.base, then_sub_scope);
// We hold off on the break instructions as well as copying the then/else
// instructions into place until we know whether to keep store_to_block_ptr
// instructions or not.
@@ -4926,21 +4984,27 @@ fn ifExpr(
.err_union_code;
const payload_inst = try else_scope.addUnNode(tag, cond.inst, node);
const ident_name = try astgen.identAsString(error_token);
+ const error_token_str = tree.tokenSlice(error_token);
+ if (mem.eql(u8, "_", error_token_str))
+ break :s &else_scope.base;
payload_val_scope = .{
.parent = &else_scope.base,
.gen_zir = &else_scope,
.name = ident_name,
.inst = payload_inst,
.token_src = error_token,
+ .used = .capture,
};
break :s &payload_val_scope.base;
} else {
break :s &else_scope.base;
}
};
+ const e = try expr(&else_scope, sub_scope, block_scope.break_result_loc, else_node);
+ try checkUsed(parent_gz, &else_scope.base, sub_scope);
break :blk .{
.src = else_node,
- .result = try expr(&else_scope, sub_scope, block_scope.break_result_loc, else_node),
+ .result = e,
};
} else .{
.src = if_full.ast.then_expr,
@@ -4949,7 +5013,6 @@ fn ifExpr(
return finishThenElseBlock(
parent_gz,
- scope,
rl,
node,
&block_scope,
@@ -4957,8 +5020,6 @@ fn ifExpr(
&else_scope,
condbr,
cond.bool_bit,
- if_full.ast.then_expr,
- else_info.src,
then_result,
else_info.result,
block,
@@ -5071,7 +5132,7 @@ fn whileExpr(
inst: Zir.Inst.Ref,
bool_bit: Zir.Inst.Ref,
} = c: {
- if (while_full.error_token) |error_token| {
+ if (while_full.error_token) |_| {
const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none;
const err_union = try expr(&continue_scope, &continue_scope.base, cond_rl, while_full.ast.cond_expr);
const tag: Zir.Inst.Tag = if (payload_is_ref) .is_err_ptr else .is_err;
@@ -5079,7 +5140,7 @@ fn whileExpr(
.inst = err_union,
.bool_bit = try continue_scope.addUnNode(tag, err_union, node),
};
- } else if (while_full.payload_token) |payload_token| {
+ } else if (while_full.payload_token) |_| {
const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none;
const optional = try expr(&continue_scope, &continue_scope.base, cond_rl, while_full.ast.cond_expr);
const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_null_ptr else .is_non_null;
@@ -5103,46 +5164,35 @@ fn whileExpr(
try loop_scope.instructions.append(astgen.gpa, cond_block);
try continue_scope.setBlockBody(cond_block);
- // TODO avoid emitting the continue expr when there
- // are no jumps to it. This happens when the last statement of a while body is noreturn
- // and there are no `continue` statements.
- if (while_full.ast.cont_expr != 0) {
- _ = try expr(&loop_scope, &loop_scope.base, .{ .ty = .void_type }, while_full.ast.cont_expr);
- }
- const repeat_tag: Zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat;
- _ = try loop_scope.addNode(repeat_tag, node);
-
- try loop_scope.setBlockBody(loop_block);
- loop_scope.break_block = loop_block;
- loop_scope.continue_block = cond_block;
- if (while_full.label_token) |label_token| {
- loop_scope.label = @as(?GenZir.Label, GenZir.Label{
- .token = label_token,
- .block_inst = loop_block,
- });
- }
-
var then_scope = parent_gz.makeSubBlock(&continue_scope.base);
defer then_scope.instructions.deinit(astgen.gpa);
var payload_val_scope: Scope.LocalVal = undefined;
const then_sub_scope = s: {
- if (while_full.error_token) |error_token| {
- const tag: Zir.Inst.Tag = if (payload_is_ref)
- .err_union_payload_unsafe_ptr
- else
- .err_union_payload_unsafe;
- const payload_inst = try then_scope.addUnNode(tag, cond.inst, node);
- const ident_name = try astgen.identAsString(error_token);
- payload_val_scope = .{
- .parent = &then_scope.base,
- .gen_zir = &then_scope,
- .name = ident_name,
- .inst = payload_inst,
- .token_src = error_token,
- };
- break :s &payload_val_scope.base;
+ if (while_full.error_token != null) {
+ if (while_full.payload_token) |payload_token| {
+ const tag: Zir.Inst.Tag = if (payload_is_ref)
+ .err_union_payload_unsafe_ptr
+ else
+ .err_union_payload_unsafe;
+ const payload_inst = try then_scope.addUnNode(tag, cond.inst, node);
+ const ident_token = if (payload_is_ref) payload_token + 1 else payload_token;
+ if (mem.eql(u8, "_", tree.tokenSlice(ident_token)))
+ break :s &then_scope.base;
+ const ident_name = try astgen.identAsString(payload_token + @boolToInt(payload_is_ref));
+ payload_val_scope = .{
+ .parent = &then_scope.base,
+ .gen_zir = &then_scope,
+ .name = ident_name,
+ .inst = payload_inst,
+ .token_src = payload_token,
+ .used = .capture,
+ };
+ break :s &payload_val_scope.base;
+ } else {
+ break :s &then_scope.base;
+ }
} else if (while_full.payload_token) |payload_token| {
const ident_token = if (payload_is_ref) payload_token + 1 else payload_token;
const tag: Zir.Inst.Tag = if (payload_is_ref)
@@ -5151,12 +5201,15 @@ fn whileExpr(
.optional_payload_unsafe;
const payload_inst = try then_scope.addUnNode(tag, cond.inst, node);
const ident_name = try astgen.identAsString(ident_token);
+ if (mem.eql(u8, "_", tree.tokenSlice(ident_token)))
+ break :s &then_scope.base;
payload_val_scope = .{
.parent = &then_scope.base,
.gen_zir = &then_scope,
.name = ident_name,
.inst = payload_inst,
.token_src = ident_token,
+ .used = .capture,
};
break :s &payload_val_scope.base;
} else {
@@ -5164,8 +5217,29 @@ fn whileExpr(
}
};
+ // This code could be improved to avoid emitting the continue expr when there
+ // are no jumps to it. This happens when the last statement of a while body is noreturn
+ // and there are no `continue` statements.
+ // Tracking issue: https://github.com/ziglang/zig/issues/9185
+ if (while_full.ast.cont_expr != 0) {
+ _ = try expr(&loop_scope, then_sub_scope, .{ .ty = .void_type }, while_full.ast.cont_expr);
+ }
+ const repeat_tag: Zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat;
+ _ = try loop_scope.addNode(repeat_tag, node);
+
+ try loop_scope.setBlockBody(loop_block);
+ loop_scope.break_block = loop_block;
+ loop_scope.continue_block = cond_block;
+ if (while_full.label_token) |label_token| {
+ loop_scope.label = @as(?GenZir.Label, GenZir.Label{
+ .token = label_token,
+ .block_inst = loop_block,
+ });
+ }
+
loop_scope.break_count += 1;
const then_result = try expr(&then_scope, then_sub_scope, loop_scope.break_result_loc, while_full.ast.then_expr);
+ try checkUsed(parent_gz, &then_scope.base, then_sub_scope);
var else_scope = parent_gz.makeSubBlock(&continue_scope.base);
defer else_scope.instructions.deinit(astgen.gpa);
@@ -5184,21 +5258,26 @@ fn whileExpr(
.err_union_code;
const payload_inst = try else_scope.addUnNode(tag, cond.inst, node);
const ident_name = try astgen.identAsString(error_token);
+ if (mem.eql(u8, tree.tokenSlice(error_token), "_"))
+ break :s &else_scope.base;
payload_val_scope = .{
.parent = &else_scope.base,
.gen_zir = &else_scope,
.name = ident_name,
.inst = payload_inst,
.token_src = error_token,
+ .used = .capture,
};
break :s &payload_val_scope.base;
} else {
break :s &else_scope.base;
}
};
+ const e = try expr(&else_scope, sub_scope, loop_scope.break_result_loc, else_node);
+ try checkUsed(parent_gz, &else_scope.base, sub_scope);
break :blk .{
.src = else_node,
- .result = try expr(&else_scope, sub_scope, loop_scope.break_result_loc, else_node),
+ .result = e,
};
} else .{
.src = while_full.ast.then_expr,
@@ -5213,7 +5292,6 @@ fn whileExpr(
const break_tag: Zir.Inst.Tag = if (is_inline) .break_inline else .@"break";
return finishThenElseBlock(
parent_gz,
- scope,
rl,
node,
&loop_scope,
@@ -5221,8 +5299,6 @@ fn whileExpr(
&else_scope,
condbr,
cond.bool_bit,
- while_full.ast.then_expr,
- else_info.src,
then_result,
else_info.result,
loop_block,
@@ -5329,6 +5405,7 @@ fn forExpr(
.name = name_str_index,
.inst = payload_inst,
.token_src = ident,
+ .used = .capture,
};
payload_sub_scope = &payload_val_scope.base;
} else if (is_ptr) {
@@ -5351,13 +5428,15 @@ fn forExpr(
.name = index_name,
.ptr = index_ptr,
.token_src = index_token,
- .is_comptime = parent_gz.force_comptime,
+ .maybe_comptime = is_inline,
+ .used = .loop_index,
};
break :blk &index_scope.base;
};
loop_scope.break_count += 1;
const then_result = try expr(&then_scope, then_sub_scope, loop_scope.break_result_loc, for_full.ast.then_expr);
+ try checkUsed(parent_gz, &then_scope.base, then_sub_scope);
var else_scope = parent_gz.makeSubBlock(&cond_scope.base);
defer else_scope.instructions.deinit(astgen.gpa);
@@ -5386,7 +5465,6 @@ fn forExpr(
const break_tag: Zir.Inst.Tag = if (is_inline) .break_inline else .@"break";
return finishThenElseBlock(
parent_gz,
- scope,
rl,
node,
&loop_scope,
@@ -5394,8 +5472,6 @@ fn forExpr(
&else_scope,
condbr,
cond,
- for_full.ast.then_expr,
- else_info.src,
then_result,
else_info.result,
loop_block,
@@ -5598,10 +5674,12 @@ fn switchExpr(
.name = capture_name,
.inst = capture,
.token_src = payload_token,
+ .used = .capture,
};
break :blk &capture_val_scope.base;
};
const case_result = try expr(&case_scope, sub_scope, block_scope.break_result_loc, case.ast.target_expr);
+ try checkUsed(parent_gz, &case_scope.base, sub_scope);
if (!parent_gz.refIsNoReturn(case_result)) {
block_scope.break_count += 1;
_ = try case_scope.addBreak(.@"break", switch_block, case_result);
@@ -5690,6 +5768,7 @@ fn switchExpr(
.name = capture_name,
.inst = capture,
.token_src = payload_token,
+ .used = .capture,
};
break :blk &capture_val_scope.base;
};
@@ -5723,6 +5802,7 @@ fn switchExpr(
}
const case_result = try expr(&case_scope, sub_scope, block_scope.break_result_loc, case.ast.target_expr);
+ try checkUsed(parent_gz, &case_scope.base, sub_scope);
if (!parent_gz.refIsNoReturn(case_result)) {
block_scope.break_count += 1;
_ = try case_scope.addBreak(.@"break", switch_block, case_result);
@@ -5736,6 +5816,7 @@ fn switchExpr(
const item_node = case.ast.values[0];
const item_inst = try comptimeExpr(parent_gz, scope, item_rl, item_node);
const case_result = try expr(&case_scope, sub_scope, block_scope.break_result_loc, case.ast.target_expr);
+ try checkUsed(parent_gz, &case_scope.base, sub_scope);
if (!parent_gz.refIsNoReturn(case_result)) {
block_scope.break_count += 1;
_ = try case_scope.addBreak(.@"break", switch_block, case_result);
@@ -5934,7 +6015,7 @@ fn switchExpr(
const block_ref = parent_gz.indexToRef(switch_block);
switch (rl) {
.ref => return block_ref,
- else => return rvalue(parent_gz, scope, rl, block_ref, switch_node),
+ else => return rvalue(parent_gz, rl, block_ref, switch_node),
}
},
.break_void => {
@@ -6000,27 +6081,59 @@ fn ret(gz: *GenZir, scope: *Scope, node: ast.Node.Index) InnerError!Zir.Inst.Ref
const astgen = gz.astgen;
const tree = astgen.tree;
const node_datas = tree.nodes.items(.data);
- const main_tokens = tree.nodes.items(.main_token);
if (gz.in_defer) return astgen.failNode(node, "cannot return from defer expression", .{});
const operand_node = node_datas[node].lhs;
- if (operand_node != 0) {
- const rl: ResultLoc = if (nodeMayNeedMemoryLocation(tree, operand_node)) .{
- .ptr = try gz.addNodeExtended(.ret_ptr, node),
- } else .{
- .ty = try gz.addNodeExtended(.ret_type, node),
- };
- const operand = try expr(gz, scope, rl, operand_node);
- // TODO check operand to see if we need to generate errdefers
+ if (operand_node == 0) {
+ // Returning a void value; skip error defers.
try genDefers(gz, &astgen.fn_block.?.base, scope, .none);
- _ = try gz.addUnNode(.ret_node, operand, node);
+ _ = try gz.addUnNode(.ret_node, .void_value, node);
return Zir.Inst.Ref.unreachable_value;
}
- // Returning a void value; skip error defers.
- try genDefers(gz, &astgen.fn_block.?.base, scope, .none);
- _ = try gz.addUnNode(.ret_node, .void_value, node);
- return Zir.Inst.Ref.unreachable_value;
+
+ const rl: ResultLoc = if (nodeMayNeedMemoryLocation(tree, operand_node)) .{
+ .ptr = try gz.addNodeExtended(.ret_ptr, node),
+ } else .{
+ .ty = try gz.addNodeExtended(.ret_type, node),
+ };
+ const operand = try expr(gz, scope, rl, operand_node);
+
+ switch (nodeMayEvalToError(tree, operand_node)) {
+ .never => {
+ // Returning a value that cannot be an error; skip error defers.
+ try genDefers(gz, &astgen.fn_block.?.base, scope, .none);
+ _ = try gz.addUnNode(.ret_node, operand, node);
+ return Zir.Inst.Ref.unreachable_value;
+ },
+ .always => {
+ // Value is always an error. Emit both error defers and regular defers.
+ const err_code = try gz.addUnNode(.err_union_code, operand, node);
+ try genDefers(gz, &astgen.fn_block.?.base, scope, err_code);
+ _ = try gz.addUnNode(.ret_node, operand, node);
+ return Zir.Inst.Ref.unreachable_value;
+ },
+ .maybe => {
+ // Emit conditional branch for generating errdefers.
+ const is_err = try gz.addUnNode(.is_err, operand, node);
+ const condbr = try gz.addCondBr(.condbr, node);
+
+ var then_scope = gz.makeSubBlock(scope);
+ defer then_scope.instructions.deinit(astgen.gpa);
+ const err_code = try then_scope.addUnNode(.err_union_code, operand, node);
+ try genDefers(&then_scope, &astgen.fn_block.?.base, scope, err_code);
+ _ = try then_scope.addUnNode(.ret_node, operand, node);
+
+ var else_scope = gz.makeSubBlock(scope);
+ defer else_scope.instructions.deinit(astgen.gpa);
+ try genDefers(&else_scope, &astgen.fn_block.?.base, scope, .none);
+ _ = try else_scope.addUnNode(.ret_node, operand, node);
+
+ try setCondBrPayload(condbr, is_err, &then_scope, &else_scope);
+
+ return Zir.Inst.Ref.unreachable_value;
+ },
+ }
}
fn identifier(
@@ -6043,7 +6156,7 @@ fn identifier(
}
if (simple_types.get(ident_name)) |zir_const_ref| {
- return rvalue(gz, scope, rl, zir_const_ref, ident);
+ return rvalue(gz, rl, zir_const_ref, ident);
}
if (ident_name.len >= 2) integer: {
@@ -6069,7 +6182,7 @@ fn identifier(
.bit_count = bit_count,
} },
});
- return rvalue(gz, scope, rl, result, ident);
+ return rvalue(gz, rl, result, ident);
}
}
@@ -6082,33 +6195,37 @@ fn identifier(
while (true) switch (s.tag) {
.local_val => {
const local_val = s.cast(Scope.LocalVal).?;
- if (hit_namespace) {
- // captures of non-locals need to be emitted as decl_val or decl_ref
- // This *might* be capturable depending on if it is comptime known
- break;
- }
+
if (local_val.name == name_str_index) {
- return rvalue(gz, scope, rl, local_val.inst, ident);
+ local_val.used = .used;
+ // Captures of non-locals need to be emitted as decl_val or decl_ref.
+ // This *might* be capturable depending on if it is comptime known.
+ if (!hit_namespace) {
+ return rvalue(gz, rl, local_val.inst, ident);
+ }
}
s = local_val.parent;
},
.local_ptr => {
const local_ptr = s.cast(Scope.LocalPtr).?;
if (local_ptr.name == name_str_index) {
+ local_ptr.used = .used;
if (hit_namespace) {
- if (local_ptr.is_comptime)
+ if (local_ptr.maybe_comptime)
break
else
return astgen.failNodeNotes(ident, "'{s}' not accessible from inner function", .{ident_name}, &.{
try astgen.errNoteTok(local_ptr.token_src, "declared here", .{}),
// TODO add crossed function definition here note.
+ // Maybe add a note to the error about it being because of the var,
+ // maybe recommend copying it into a const variable. -SpexGuy
});
}
switch (rl) {
.ref, .none_or_ref => return local_ptr.ptr,
else => {
const loaded = try gz.addUnNode(.load, local_ptr.ptr, ident);
- return rvalue(gz, scope, rl, loaded, ident);
+ return rvalue(gz, rl, loaded, ident);
},
}
}
@@ -6142,14 +6259,13 @@ fn identifier(
.ref, .none_or_ref => return gz.addStrTok(.decl_ref, name_str_index, ident_token),
else => {
const result = try gz.addStrTok(.decl_val, name_str_index, ident_token);
- return rvalue(gz, scope, rl, result, ident);
+ return rvalue(gz, rl, result, ident);
},
}
}
fn stringLiteral(
gz: *GenZir,
- scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
) InnerError!Zir.Inst.Ref {
@@ -6165,19 +6281,17 @@ fn stringLiteral(
.len = str.len,
} },
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn multilineStringLiteral(
gz: *GenZir,
- scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
const node_datas = tree.nodes.items(.data);
- const main_tokens = tree.nodes.items(.main_token);
const start = node_datas[node].lhs;
const end = node_datas[node].rhs;
@@ -6209,10 +6323,10 @@ fn multilineStringLiteral(
.len = @intCast(u32, string_bytes.items.len - str_index),
} },
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
-fn charLiteral(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) !Zir.Inst.Ref {
+fn charLiteral(gz: *GenZir, rl: ResultLoc, node: ast.Node.Index) !Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
const main_tokens = tree.nodes.items(.main_token);
@@ -6232,15 +6346,10 @@ fn charLiteral(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index)
},
};
const result = try gz.addInt(value);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
-fn integerLiteral(
- gz: *GenZir,
- scope: *Scope,
- rl: ResultLoc,
- node: ast.Node.Index,
-) InnerError!Zir.Inst.Ref {
+fn integerLiteral(gz: *GenZir, rl: ResultLoc, node: ast.Node.Index) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
const main_tokens = tree.nodes.items(.main_token);
@@ -6252,7 +6361,7 @@ fn integerLiteral(
1 => .one,
else => try gz.addInt(small_int),
};
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
} else |err| switch (err) {
error.InvalidCharacter => unreachable, // Caught by the parser.
error.Overflow => {},
@@ -6283,17 +6392,11 @@ fn integerLiteral(
const limbs = big_int.limbs[0..big_int.len()];
assert(big_int.isPositive());
const result = try gz.addIntBig(limbs);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
-fn floatLiteral(
- gz: *GenZir,
- scope: *Scope,
- rl: ResultLoc,
- node: ast.Node.Index,
-) InnerError!Zir.Inst.Ref {
+fn floatLiteral(gz: *GenZir, rl: ResultLoc, node: ast.Node.Index) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
- const arena = astgen.arena;
const tree = astgen.tree;
const main_tokens = tree.nodes.items(.main_token);
@@ -6314,7 +6417,7 @@ fn floatLiteral(
const bigger_again: f128 = smaller_float;
if (bigger_again == float_number) {
const result = try gz.addFloat(smaller_float, node);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
// We need to use 128 bits. Break the float into 4 u32 values so we can
// put it into the `extra` array.
@@ -6325,7 +6428,7 @@ fn floatLiteral(
.piece2 = @truncate(u32, int_bits >> 64),
.piece3 = @truncate(u32, int_bits >> 96),
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn asmExpr(
@@ -6336,7 +6439,6 @@ fn asmExpr(
full: ast.full.Asm,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
- const arena = astgen.arena;
const tree = astgen.tree;
const main_tokens = tree.nodes.items(.main_token);
const node_datas = tree.nodes.items(.data);
@@ -6378,6 +6480,33 @@ fn asmExpr(
// issues and decide how to handle outputs. Do we want this to be identifiers?
// Or maybe we want to force this to be expressions with a pointer type.
// Until that is figured out this is only hooked up for referencing Decls.
+ // TODO we have put this as an identifier lookup just so that we don't get
+ // unused vars for outputs. We need to check if this is correct in the future ^^
+ // so we just put in this simple lookup. This is a workaround.
+ {
+ var s = scope;
+ while (true) switch (s.tag) {
+ .local_val => {
+ const local_val = s.cast(Scope.LocalVal).?;
+ if (local_val.name == str_index) {
+ local_val.used = .used;
+ break;
+ }
+ s = local_val.parent;
+ },
+ .local_ptr => {
+ const local_ptr = s.cast(Scope.LocalPtr).?;
+ if (local_ptr.name == str_index) {
+ local_ptr.used = .used;
+ break;
+ }
+ s = local_ptr.parent;
+ },
+ .gen_zir => s = s.cast(GenZir).?.parent,
+ .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
+ .namespace, .top => break,
+ };
+ }
const operand = try gz.addStrTok(.decl_ref, str_index, ident_token);
outputs[i] = .{
.name = name,
@@ -6398,7 +6527,6 @@ fn asmExpr(
const name = try astgen.identAsString(symbolic_name);
const constraint_token = symbolic_name + 2;
const constraint = (try astgen.strLitAsString(constraint_token)).index;
- const has_arrow = token_tags[symbolic_name + 4] == .arrow;
const operand = try expr(gz, scope, .{ .ty = .usize_type }, node_datas[input_node].lhs);
inputs[i] = .{
.name = name,
@@ -6443,7 +6571,7 @@ fn asmExpr(
.inputs = inputs,
.clobbers = clobbers_buffer[0..clobber_i],
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn as(
@@ -6458,7 +6586,7 @@ fn as(
switch (rl) {
.none, .none_or_ref, .discard, .ref, .ty => {
const result = try expr(gz, scope, .{ .ty = dest_type }, rhs);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.ptr, .inferred_ptr => |result_ptr| {
return asRlPtr(gz, scope, rl, result_ptr, rhs, dest_type);
@@ -6480,18 +6608,18 @@ fn unionInit(
const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]);
switch (rl) {
.none, .none_or_ref, .discard, .ref, .ty, .inferred_ptr => {
- const field_type = try gz.addPlNode(.field_type_ref, params[1], Zir.Inst.FieldTypeRef{
+ _ = try gz.addPlNode(.field_type_ref, params[1], Zir.Inst.FieldTypeRef{
.container_type = union_type,
.field_name = field_name,
});
const result = try expr(gz, scope, .{ .ty = union_type }, params[2]);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.ptr => |result_ptr| {
- return unionInitRlPtr(gz, scope, rl, node, result_ptr, params[2], union_type, field_name);
+ return unionInitRlPtr(gz, scope, node, result_ptr, params[2], union_type, field_name);
},
.block_ptr => |block_scope| {
- return unionInitRlPtr(gz, scope, rl, node, block_scope.rl_ptr, params[2], union_type, field_name);
+ return unionInitRlPtr(gz, scope, node, block_scope.rl_ptr, params[2], union_type, field_name);
},
}
}
@@ -6499,7 +6627,6 @@ fn unionInit(
fn unionInitRlPtr(
parent_gz: *GenZir,
scope: *Scope,
- rl: ResultLoc,
node: ast.Node.Index,
result_ptr: Zir.Inst.Ref,
expr_node: ast.Node.Index,
@@ -6547,7 +6674,7 @@ fn asRlPtr(
parent_zir.appendAssumeCapacity(src_inst);
}
const casted_result = try parent_gz.addBin(.as, dest_type, result);
- return rvalue(parent_gz, scope, rl, casted_result, operand_node);
+ return rvalue(parent_gz, rl, casted_result, operand_node);
} else {
try parent_zir.appendSlice(astgen.gpa, as_scope.instructions.items);
return result;
@@ -6571,16 +6698,16 @@ fn bitCast(
.lhs = dest_type,
.rhs = operand,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.ref => {
return astgen.failNode(node, "cannot take address of `@bitCast` result", .{});
},
.ptr, .inferred_ptr => |result_ptr| {
- return bitCastRlPtr(gz, scope, rl, node, dest_type, result_ptr, rhs);
+ return bitCastRlPtr(gz, scope, node, dest_type, result_ptr, rhs);
},
.block_ptr => |block| {
- return bitCastRlPtr(gz, scope, rl, node, dest_type, block.rl_ptr, rhs);
+ return bitCastRlPtr(gz, scope, node, dest_type, block.rl_ptr, rhs);
},
}
}
@@ -6588,7 +6715,6 @@ fn bitCast(
fn bitCastRlPtr(
gz: *GenZir,
scope: *Scope,
- rl: ResultLoc,
node: ast.Node.Index,
dest_type: Zir.Inst.Ref,
result_ptr: Zir.Inst.Ref,
@@ -6613,7 +6739,7 @@ fn typeOf(
}
if (params.len == 1) {
const result = try gz.addUnNode(.typeof, try expr(gz, scope, .none, params[0]), node);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
const arena = gz.astgen.arena;
var items = try arena.alloc(Zir.Inst.Ref, params.len);
@@ -6622,7 +6748,7 @@ fn typeOf(
}
const result = try gz.addExtendedMultiOp(.typeof_peer, node, items);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn builtinCall(
@@ -6662,7 +6788,6 @@ fn builtinCall(
switch (info.tag) {
.import => {
const node_tags = tree.nodes.items(.tag);
- const node_datas = tree.nodes.items(.data);
const operand_node = params[0];
if (node_tags[operand_node] != .string_literal) {
@@ -6673,7 +6798,7 @@ fn builtinCall(
const str = try astgen.strLitAsString(str_lit_token);
try astgen.imports.put(astgen.gpa, str.index, {});
const result = try gz.addStrTok(.import, str.index, str_lit_token);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.compile_log => {
const arg_refs = try astgen.gpa.alloc(Zir.Inst.Ref, params.len);
@@ -6682,7 +6807,7 @@ fn builtinCall(
for (params) |param, i| arg_refs[i] = try expr(gz, scope, .none, param);
const result = try gz.addExtendedMultiOp(.compile_log, node, arg_refs);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.field => {
const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]);
@@ -6696,7 +6821,7 @@ fn builtinCall(
.lhs = try expr(gz, scope, .none, params[0]),
.field_name = field_name,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.as => return as( gz, scope, rl, node, params[0], params[1]),
.bit_cast => return bitCast( gz, scope, rl, node, params[0], params[1]),
@@ -6715,9 +6840,32 @@ fn builtinCall(
.identifier => {
const ident_token = main_tokens[params[0]];
decl_name = try astgen.identAsString(ident_token);
- // TODO look for local variables in scope matching `decl_name` and emit a compile
- // error. Only top-level declarations can be exported. Until this is done, the
- // compile error will end up being "use of undeclared identifier" in Sema.
+ {
+ var s = scope;
+ while (true) switch (s.tag) {
+ .local_val => {
+ const local_val = s.cast(Scope.LocalVal).?;
+ if (local_val.name == decl_name) {
+ local_val.used = .used;
+ break;
+ }
+ s = local_val.parent;
+ },
+ .local_ptr => {
+ const local_ptr = s.cast(Scope.LocalPtr).?;
+ if (local_ptr.name == decl_name) {
+ if (!local_ptr.maybe_comptime)
+ return astgen.failNode(params[0], "unable to export runtime-known value", .{});
+ local_ptr.used = .used;
+ break;
+ }
+ s = local_ptr.parent;
+ },
+ .gen_zir => s = s.cast(GenZir).?.parent,
+ .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
+ .namespace, .top => break,
+ };
+ }
},
.field_access => {
const namespace_node = node_datas[params[0]].lhs;
@@ -6727,7 +6875,7 @@ fn builtinCall(
decl_name = try astgen.identAsString(field_ident);
},
else => return astgen.failNode(
- params[0], "the first @export parameter must be an identifier", .{},
+ params[0], "symbol to export must identify a declaration", .{},
),
}
const options = try comptimeExpr(gz, scope, .{ .ty = .export_options_type }, params[1]);
@@ -6736,7 +6884,7 @@ fn builtinCall(
.decl_name = decl_name,
.options = options,
});
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
},
.@"extern" => {
const type_inst = try typeExpr(gz, scope, params[0]);
@@ -6746,18 +6894,18 @@ fn builtinCall(
.lhs = type_inst,
.rhs = options,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
- .breakpoint => return simpleNoOpVoid(gz, scope, rl, node, .breakpoint),
- .fence => return simpleNoOpVoid(gz, scope, rl, node, .fence),
+ .breakpoint => return simpleNoOpVoid(gz, rl, node, .breakpoint),
+ .fence => return simpleNoOpVoid(gz, rl, node, .fence),
- .This => return rvalue(gz, scope, rl, try gz.addNodeExtended(.this, node), node),
- .return_address => return rvalue(gz, scope, rl, try gz.addNodeExtended(.ret_addr, node), node),
- .src => return rvalue(gz, scope, rl, try gz.addNodeExtended(.builtin_src, node), node),
- .error_return_trace => return rvalue(gz, scope, rl, try gz.addNodeExtended(.error_return_trace, node), node),
- .frame => return rvalue(gz, scope, rl, try gz.addNodeExtended(.frame, node), node),
- .frame_address => return rvalue(gz, scope, rl, try gz.addNodeExtended(.frame_address, node), node),
+ .This => return rvalue(gz, rl, try gz.addNodeExtended(.this, node), node),
+ .return_address => return rvalue(gz, rl, try gz.addNodeExtended(.ret_addr, node), node),
+ .src => return rvalue(gz, rl, try gz.addNodeExtended(.builtin_src, node), node),
+ .error_return_trace => return rvalue(gz, rl, try gz.addNodeExtended(.error_return_trace, node), node),
+ .frame => return rvalue(gz, rl, try gz.addNodeExtended(.frame, node), node),
+ .frame_address => return rvalue(gz, rl, try gz.addNodeExtended(.frame_address, node), node),
.type_info => return simpleUnOpType(gz, scope, rl, node, params[0], .type_info),
.size_of => return simpleUnOpType(gz, scope, rl, node, params[0], .size_of),
@@ -6813,7 +6961,7 @@ fn builtinCall(
.lhs = dest_align,
.rhs = rhs,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.has_decl => return hasDeclOrField(gz, scope, rl, node, params[0], params[1], .has_decl),
@@ -6849,7 +6997,7 @@ fn builtinCall(
.node = gz.nodeIndexToRelative(node),
.operand = operand,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.wasm_memory_grow => {
const index_arg = try expr(gz, scope, .{ .ty = .u32_type }, params[0]);
@@ -6859,7 +7007,7 @@ fn builtinCall(
.lhs = index_arg,
.rhs = delta_arg,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.c_define => {
const name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[0]);
@@ -6869,7 +7017,7 @@ fn builtinCall(
.lhs = name,
.rhs = value,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.splat => {
@@ -6879,7 +7027,7 @@ fn builtinCall(
.lhs = len,
.rhs = scalar,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.reduce => {
const op = try expr(gz, scope, .{ .ty = .reduce_op_type }, params[0]);
@@ -6888,7 +7036,7 @@ fn builtinCall(
.lhs = op,
.rhs = scalar,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.add_with_overflow => return overflowArithmetic(gz, scope, rl, node, params, .add_with_overflow),
@@ -6915,7 +7063,7 @@ fn builtinCall(
.rhs = rhs,
.ptr = ptr,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.atomic_load => {
@@ -6935,7 +7083,7 @@ fn builtinCall(
.lhs = ptr,
.rhs = ordering,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.atomic_rmw => {
const int_type = try typeExpr(gz, scope, params[0]);
@@ -6958,7 +7106,7 @@ fn builtinCall(
.operand = operand,
.ordering = ordering,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.atomic_store => {
const int_type = try typeExpr(gz, scope, params[0]);
@@ -6979,7 +7127,7 @@ fn builtinCall(
.operand = operand,
.ordering = ordering,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.mul_add => {
const float_type = try typeExpr(gz, scope, params[0]);
@@ -6991,7 +7139,7 @@ fn builtinCall(
.mulend2 = mulend2,
.addend = addend,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.call => {
const options = try comptimeExpr(gz, scope, .{ .ty = .call_options_type }, params[0]);
@@ -7002,7 +7150,7 @@ fn builtinCall(
.callee = callee,
.args = args,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.field_parent_ptr => {
const parent_type = try typeExpr(gz, scope, params[0]);
@@ -7013,7 +7161,7 @@ fn builtinCall(
.field_name = field_name,
.field_ptr = try expr(gz, scope, .{ .ty = field_ptr_type }, params[2]),
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.memcpy => {
const result = try gz.addPlNode(.memcpy, node, Zir.Inst.Memcpy{
@@ -7021,7 +7169,7 @@ fn builtinCall(
.source = try expr(gz, scope, .{ .ty = .manyptr_const_u8_type }, params[1]),
.byte_count = try expr(gz, scope, .{ .ty = .usize_type }, params[2]),
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.memset => {
const result = try gz.addPlNode(.memset, node, Zir.Inst.Memset{
@@ -7029,7 +7177,7 @@ fn builtinCall(
.byte = try expr(gz, scope, .{ .ty = .u8_type }, params[1]),
.byte_count = try expr(gz, scope, .{ .ty = .usize_type }, params[2]),
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.shuffle => {
const result = try gz.addPlNode(.shuffle, node, Zir.Inst.Shuffle{
@@ -7038,7 +7186,7 @@ fn builtinCall(
.b = try expr(gz, scope, .none, params[2]),
.mask = try comptimeExpr(gz, scope, .none, params[3]),
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.async_call => {
const result = try gz.addPlNode(.builtin_async_call, node, Zir.Inst.AsyncCall{
@@ -7047,14 +7195,14 @@ fn builtinCall(
.fn_ptr = try expr(gz, scope, .none, params[2]),
.args = try expr(gz, scope, .none, params[3]),
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
.Vector => {
const result = try gz.addPlNode(.vector_type, node, Zir.Inst.Bin{
.lhs = try comptimeExpr(gz, scope, .{.ty = .u32_type}, params[0]),
.rhs = try typeExpr(gz, scope, params[1]),
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
},
}
@@ -7063,13 +7211,12 @@ fn builtinCall(
fn simpleNoOpVoid(
gz: *GenZir,
- scope: *Scope,
rl: ResultLoc,
node: ast.Node.Index,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
_ = try gz.addNode(tag, node);
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
}
fn hasDeclOrField(
@@ -7087,7 +7234,7 @@ fn hasDeclOrField(
.lhs = container_type,
.rhs = name,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn typeCast(
@@ -7103,7 +7250,7 @@ fn typeCast(
.lhs = try typeExpr(gz, scope, lhs_node),
.rhs = try expr(gz, scope, .none, rhs_node),
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn simpleUnOpType(
@@ -7116,7 +7263,7 @@ fn simpleUnOpType(
) InnerError!Zir.Inst.Ref {
const operand = try typeExpr(gz, scope, operand_node);
const result = try gz.addUnNode(tag, operand, node);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn simpleUnOp(
@@ -7130,7 +7277,7 @@ fn simpleUnOp(
) InnerError!Zir.Inst.Ref {
const operand = try expr(gz, scope, operand_rl, operand_node);
const result = try gz.addUnNode(tag, operand, node);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn cmpxchg(
@@ -7160,7 +7307,7 @@ fn cmpxchg(
.fail_order = try expr(gz, scope, .{ .ty = .atomic_ordering_type }, params[5]),
// zig fmt: on
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn bitBuiltin(
@@ -7175,7 +7322,7 @@ fn bitBuiltin(
const int_type = try typeExpr(gz, scope, int_type_node);
const operand = try expr(gz, scope, .{ .ty = int_type }, operand_node);
const result = try gz.addUnNode(tag, operand, node);
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn divBuiltin(
@@ -7191,7 +7338,7 @@ fn divBuiltin(
.lhs = try expr(gz, scope, .none, lhs_node),
.rhs = try expr(gz, scope, .none, rhs_node),
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn simpleCBuiltin(
@@ -7207,7 +7354,7 @@ fn simpleCBuiltin(
.node = gz.nodeIndexToRelative(node),
.operand = operand,
});
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
}
fn offsetOf(
@@ -7225,7 +7372,7 @@ fn offsetOf(
.lhs = type_inst,
.rhs = field_name,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn shiftOp(
@@ -7244,7 +7391,7 @@ fn shiftOp(
.lhs = lhs,
.rhs = rhs,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn cImport(
@@ -7269,7 +7416,7 @@ fn cImport(
try block_scope.setBlockBody(block_inst);
try gz.instructions.append(gpa, block_inst);
- return rvalue(gz, scope, rl, .void_value, node);
+ return rvalue(gz, rl, .void_value, node);
}
fn overflowArithmetic(
@@ -7299,7 +7446,7 @@ fn overflowArithmetic(
.rhs = rhs,
.ptr = ptr,
});
- return rvalue(gz, scope, rl, result, node);
+ return rvalue(gz, rl, result, node);
}
fn callExpr(
@@ -7351,7 +7498,7 @@ fn callExpr(
};
break :res try gz.addCall(tag, lhs, args, node);
};
- return rvalue(gz, scope, rl, result, node); // TODO function call with result location
+ return rvalue(gz, rl, result, node); // TODO function call with result location
}
pub const simple_types = std.ComptimeStringMap(Zir.Inst.Ref, .{
@@ -7608,13 +7755,225 @@ fn nodeMayNeedMemoryLocation(tree: *const ast.Tree, start_node: ast.Node.Index)
}
}
+fn nodeMayEvalToError(tree: *const ast.Tree, start_node: ast.Node.Index) enum { never, always, maybe } {
+ const node_tags = tree.nodes.items(.tag);
+ const node_datas = tree.nodes.items(.data);
+ const main_tokens = tree.nodes.items(.main_token);
+ const token_tags = tree.tokens.items(.tag);
+
+ var node = start_node;
+ while (true) {
+ switch (node_tags[node]) {
+ .root,
+ .@"usingnamespace",
+ .test_decl,
+ .switch_case,
+ .switch_case_one,
+ .container_field_init,
+ .container_field_align,
+ .container_field,
+ .asm_output,
+ .asm_input,
+ => unreachable,
+
+ .error_value => return .always,
+
+ .@"asm",
+ .asm_simple,
+ .identifier,
+ .field_access,
+ .deref,
+ .array_access,
+ .while_simple,
+ .while_cont,
+ .for_simple,
+ .if_simple,
+ .@"while",
+ .@"if",
+ .@"for",
+ .@"switch",
+ .switch_comma,
+ .call_one,
+ .call_one_comma,
+ .async_call_one,
+ .async_call_one_comma,
+ .call,
+ .call_comma,
+ .async_call,
+ .async_call_comma,
+ => return .maybe,
+
+ .@"return",
+ .@"break",
+ .@"continue",
+ .bit_not,
+ .bool_not,
+ .global_var_decl,
+ .local_var_decl,
+ .simple_var_decl,
+ .aligned_var_decl,
+ .@"defer",
+ .@"errdefer",
+ .address_of,
+ .optional_type,
+ .negation,
+ .negation_wrap,
+ .@"resume",
+ .array_type,
+ .array_type_sentinel,
+ .ptr_type_aligned,
+ .ptr_type_sentinel,
+ .ptr_type,
+ .ptr_type_bit_range,
+ .@"suspend",
+ .@"anytype",
+ .fn_proto_simple,
+ .fn_proto_multi,
+ .fn_proto_one,
+ .fn_proto,
+ .fn_decl,
+ .anyframe_type,
+ .anyframe_literal,
+ .integer_literal,
+ .float_literal,
+ .enum_literal,
+ .string_literal,
+ .multiline_string_literal,
+ .char_literal,
+ .true_literal,
+ .false_literal,
+ .null_literal,
+ .undefined_literal,
+ .unreachable_literal,
+ .error_set_decl,
+ .container_decl,
+ .container_decl_trailing,
+ .container_decl_two,
+ .container_decl_two_trailing,
+ .container_decl_arg,
+ .container_decl_arg_trailing,
+ .tagged_union,
+ .tagged_union_trailing,
+ .tagged_union_two,
+ .tagged_union_two_trailing,
+ .tagged_union_enum_tag,
+ .tagged_union_enum_tag_trailing,
+ .add,
+ .add_wrap,
+ .array_cat,
+ .array_mult,
+ .assign,
+ .assign_bit_and,
+ .assign_bit_or,
+ .assign_bit_shift_left,
+ .assign_bit_shift_right,
+ .assign_bit_xor,
+ .assign_div,
+ .assign_sub,
+ .assign_sub_wrap,
+ .assign_mod,
+ .assign_add,
+ .assign_add_wrap,
+ .assign_mul,
+ .assign_mul_wrap,
+ .bang_equal,
+ .bit_and,
+ .bit_or,
+ .bit_shift_left,
+ .bit_shift_right,
+ .bit_xor,
+ .bool_and,
+ .bool_or,
+ .div,
+ .equal_equal,
+ .error_union,
+ .greater_or_equal,
+ .greater_than,
+ .less_or_equal,
+ .less_than,
+ .merge_error_sets,
+ .mod,
+ .mul,
+ .mul_wrap,
+ .switch_range,
+ .sub,
+ .sub_wrap,
+ .slice,
+ .slice_open,
+ .slice_sentinel,
+ .array_init_one,
+ .array_init_one_comma,
+ .array_init_dot_two,
+ .array_init_dot_two_comma,
+ .array_init_dot,
+ .array_init_dot_comma,
+ .array_init,
+ .array_init_comma,
+ .struct_init_one,
+ .struct_init_one_comma,
+ .struct_init_dot_two,
+ .struct_init_dot_two_comma,
+ .struct_init_dot,
+ .struct_init_dot_comma,
+ .struct_init,
+ .struct_init_comma,
+ => return .never,
+
+ // Forward the question to the LHS sub-expression.
+ .grouped_expression,
+ .@"try",
+ .@"await",
+ .@"comptime",
+ .@"nosuspend",
+ .unwrap_optional,
+ => node = node_datas[node].lhs,
+
+ // Forward the question to the RHS sub-expression.
+ .@"catch",
+ .@"orelse",
+ => node = node_datas[node].rhs,
+
+ .block_two,
+ .block_two_semicolon,
+ .block,
+ .block_semicolon,
+ => {
+ const lbrace = main_tokens[node];
+ if (token_tags[lbrace - 1] == .colon) {
+ // Labeled blocks may need a memory location to forward
+ // to their break statements.
+ return .maybe;
+ } else {
+ return .never;
+ }
+ },
+
+ .builtin_call,
+ .builtin_call_comma,
+ .builtin_call_two,
+ .builtin_call_two_comma,
+ => {
+ const builtin_token = main_tokens[node];
+ const builtin_name = tree.tokenSlice(builtin_token);
+ // If the builtin is an invalid name, we don't cause an error here; instead
+ // let it pass, and the error will be "invalid builtin function" later.
+ const builtin_info = BuiltinFn.list.get(builtin_name) orelse return .maybe;
+ if (builtin_info.tag == .err_set_cast) {
+ return .always;
+ } else {
+ return .never;
+ }
+ },
+ }
+ }
+}
+
/// Applies `rl` semantics to `inst`. Expressions which do not do their own handling of
/// result locations must call this function on their result.
/// As an example, if the `ResultLoc` is `ptr`, it will write the result to the pointer.
/// If the `ResultLoc` is `ty`, it will coerce the result to the type.
fn rvalue(
gz: *GenZir,
- scope: *Scope,
rl: ResultLoc,
result: Zir.Inst.Ref,
src_node: ast.Node.Index,
@@ -7762,7 +8121,6 @@ fn parseStrLit(
bytes: []const u8,
offset: u32,
) InnerError!void {
- const tree = astgen.tree;
const raw_string = bytes[offset..];
var buf_managed = buf.toManaged(astgen.gpa);
const result = std.zig.string_literal.parseAppend(&buf_managed, raw_string);
@@ -8065,6 +8423,15 @@ const Scope = struct {
top,
};
+ // either .used or the type of the var/constant
+ const Used = enum {
+ fn_param,
+ constant,
+ variable,
+ loop_index,
+ capture,
+ used,
+ };
/// This is always a `const` local and importantly the `inst` is a value type, not a pointer.
/// This structure lives as long as the AST generation of the Block
/// node that contains the variable.
@@ -8079,6 +8446,8 @@ const Scope = struct {
token_src: ast.TokenIndex,
/// String table index.
name: u32,
+ /// has this variable been referenced?
+ used: Used,
};
/// This could be a `const` or `var` local. It has a pointer instead of a value.
@@ -8095,7 +8464,10 @@ const Scope = struct {
token_src: ast.TokenIndex,
/// String table index.
name: u32,
- is_comptime: bool,
+ /// true means we find out during Sema whether the value is comptime. false means it is already known at AstGen the value is runtime-known.
+ maybe_comptime: bool,
+ /// has this variable been referenced?
+ used: Used,
};
const Defer = struct {
@@ -8205,13 +8577,13 @@ const GenZir = struct {
fn calcLine(gz: GenZir, node: ast.Node.Index) u32 {
const astgen = gz.astgen;
const tree = astgen.tree;
- const node_tags = tree.nodes.items(.tag);
+ const source = tree.source;
const token_starts = tree.tokens.items(.start);
- const decl_start = token_starts[tree.firstToken(gz.decl_node_index)];
const node_start = token_starts[tree.firstToken(node)];
- const source = tree.source[decl_start..node_start];
- const loc = std.zig.findLineColumn(source, source.len);
- return @intCast(u32, gz.decl_line + loc.line);
+
+ astgen.advanceSourceCursor(source, node_start);
+
+ return @intCast(u32, gz.decl_line + astgen.source_line);
}
fn tokSrcLoc(gz: GenZir, token_index: ast.TokenIndex) LazySrcLoc {
@@ -8355,20 +8727,21 @@ const GenZir = struct {
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const token_starts = tree.tokens.items(.start);
- const decl_start = token_starts[tree.firstToken(gz.decl_node_index)];
const fn_decl = args.src_node;
assert(node_tags[fn_decl] == .fn_decl or node_tags[fn_decl] == .test_decl);
const block = node_datas[fn_decl].rhs;
const lbrace_start = token_starts[tree.firstToken(block)];
const rbrace_start = token_starts[tree.lastToken(block)];
- const lbrace_source = tree.source[decl_start..lbrace_start];
- const lbrace_loc = std.zig.findLineColumn(lbrace_source, lbrace_source.len);
- const rbrace_source = tree.source[lbrace_start..rbrace_start];
- const rbrace_loc = std.zig.findLineColumn(rbrace_source, rbrace_source.len);
- const lbrace_line = @intCast(u32, lbrace_loc.line);
- const rbrace_line = lbrace_line + @intCast(u32, rbrace_loc.line);
- const columns = @intCast(u32, lbrace_loc.column) |
- (@intCast(u32, rbrace_loc.column) << 16);
+
+ astgen.advanceSourceCursor(tree.source, lbrace_start);
+ const lbrace_line = @intCast(u32, astgen.source_line);
+ const lbrace_column = @intCast(u32, astgen.source_column);
+
+ astgen.advanceSourceCursor(tree.source, rbrace_start);
+ const rbrace_line = @intCast(u32, astgen.source_line);
+ const rbrace_column = @intCast(u32, astgen.source_column);
+
+ const columns = lbrace_column | (rbrace_column << 16);
src_locs_buffer[0] = lbrace_line;
src_locs_buffer[1] = rbrace_line;
src_locs_buffer[2] = columns;
@@ -9184,3 +9557,20 @@ fn declareNewName(
}
}
}
+
+fn advanceSourceCursor(astgen: *AstGen, source: []const u8, end: usize) void {
+ var i = astgen.source_offset;
+ var line = astgen.source_line;
+ var column = astgen.source_column;
+ while (i < end) : (i += 1) {
+ if (source[i] == '\n') {
+ line += 1;
+ column = 0;
+ } else {
+ column += 1;
+ }
+ }
+ astgen.source_offset = i;
+ astgen.source_line = line;
+ astgen.source_column = column;
+}
diff --git a/src/Compilation.zig b/src/Compilation.zig
index dc5fcbf361..f202034242 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -325,7 +325,6 @@ pub const AllErrors = struct {
},
pub fn renderToStdErr(msg: Message, ttyconf: std.debug.TTY.Config) void {
- const stderr_mutex = std.debug.getStderrMutex();
const held = std.debug.getStderrMutex().acquire();
defer held.release();
const stderr = std.io.getStdErr();
@@ -343,6 +342,7 @@ pub const AllErrors = struct {
const stderr = stderr_file.writer();
switch (msg) {
.src => |src| {
+ try stderr.writeByteNTimes(' ', indent);
ttyconf.setColor(stderr, .Bold);
try stderr.print("{s}:{d}:{d}: ", .{
src.src_path,
@@ -350,7 +350,6 @@ pub const AllErrors = struct {
src.column + 1,
});
ttyconf.setColor(stderr, color);
- try stderr.writeByteNTimes(' ', indent);
try stderr.writeAll(kind);
ttyconf.setColor(stderr, .Reset);
ttyconf.setColor(stderr, .Bold);
@@ -524,6 +523,7 @@ pub const AllErrors = struct {
errors: *std.ArrayList(Message),
msg: []const u8,
) !void {
+ _ = arena;
try errors.append(.{ .plain = .{ .msg = msg } });
}
@@ -612,6 +612,7 @@ pub const InitOptions = struct {
output_mode: std.builtin.OutputMode,
thread_pool: *ThreadPool,
dynamic_linker: ?[]const u8 = null,
+ sysroot: ?[]const u8 = null,
/// `null` means to not emit a binary file.
emit_bin: ?EmitLoc,
/// `null` means to not emit a C header file.
@@ -731,6 +732,7 @@ fn addPackageTableToCacheHash(
hash: *Cache.HashHelper,
arena: *std.heap.ArenaAllocator,
pkg_table: Package.Table,
+ seen_table: *std.AutoHashMap(*Package, void),
hash_type: union(enum) { path_bytes, files: *Cache.Manifest },
) (error{OutOfMemory} || std.os.GetCwdError)!void {
const allocator = &arena.allocator;
@@ -755,6 +757,8 @@ fn addPackageTableToCacheHash(
}.lessThan);
for (packages) |pkg| {
+ if ((try seen_table.getOrPut(pkg.value)).found_existing) continue;
+
// Finally insert the package name and path to the cache hash.
hash.addBytes(pkg.key);
switch (hash_type) {
@@ -770,7 +774,7 @@ fn addPackageTableToCacheHash(
},
}
// Recurse to handle the package's dependencies
- try addPackageTableToCacheHash(hash, arena, pkg.value.table, hash_type);
+ try addPackageTableToCacheHash(hash, arena, pkg.value.table, seen_table, hash_type);
}
}
@@ -876,25 +880,32 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
break :blk false;
};
- const DarwinOptions = struct {
- syslibroot: ?[]const u8 = null,
- system_linker_hack: bool = false,
+ const darwin_can_use_system_linker_and_sdk =
+ // comptime conditions
+ ((build_options.have_llvm and comptime std.Target.current.isDarwin()) and
+ // runtime conditions
+ (use_lld and std.builtin.os.tag == .macos and options.target.isDarwin()));
+
+ const darwin_system_linker_hack = blk: {
+ if (darwin_can_use_system_linker_and_sdk) {
+ break :blk std.os.getenv("ZIG_SYSTEM_LINKER_HACK") != null;
+ } else {
+ break :blk false;
+ }
};
- const darwin_options: DarwinOptions = if (build_options.have_llvm and comptime std.Target.current.isDarwin()) outer: {
- const opts: DarwinOptions = if (use_lld and std.builtin.os.tag == .macos and options.target.isDarwin()) inner: {
+ const sysroot = blk: {
+ if (options.sysroot) |sysroot| {
+ break :blk sysroot;
+ } else if (darwin_can_use_system_linker_and_sdk) {
// TODO Revisit this targeting versions lower than macOS 11 when LLVM 12 is out.
// See https://github.com/ziglang/zig/issues/6996
const at_least_big_sur = options.target.os.getVersionRange().semver.min.major >= 11;
- const syslibroot = if (at_least_big_sur) try std.zig.system.getSDKPath(arena) else null;
- const system_linker_hack = std.os.getenv("ZIG_SYSTEM_LINKER_HACK") != null;
- break :inner .{
- .syslibroot = syslibroot,
- .system_linker_hack = system_linker_hack,
- };
- } else .{};
- break :outer opts;
- } else .{};
+ break :blk if (at_least_big_sur) try std.zig.system.getSDKPath(arena) else null;
+ } else {
+ break :blk null;
+ }
+ };
const lto = blk: {
if (options.want_lto) |explicit| {
@@ -905,7 +916,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
break :blk false;
} else if (options.c_source_files.len == 0) {
break :blk false;
- } else if (darwin_options.system_linker_hack) {
+ } else if (darwin_system_linker_hack) {
break :blk false;
} else switch (options.output_mode) {
.Lib, .Obj => break :blk false,
@@ -1116,7 +1127,8 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
{
var local_arena = std.heap.ArenaAllocator.init(gpa);
defer local_arena.deinit();
- try addPackageTableToCacheHash(&hash, &local_arena, root_pkg.table, .path_bytes);
+ var seen_table = std.AutoHashMap(*Package, void).init(&local_arena.allocator);
+ try addPackageTableToCacheHash(&hash, &local_arena, root_pkg.table, &seen_table, .path_bytes);
}
hash.add(valgrind);
hash.add(single_threaded);
@@ -1137,36 +1149,32 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
artifact_sub_dir,
};
- // If we rely on stage1, we must not redundantly add these packages.
- const use_stage1 = build_options.is_stage1 and use_llvm;
- if (!use_stage1) {
- const builtin_pkg = try Package.createWithDir(
- gpa,
- zig_cache_artifact_directory,
- null,
- "builtin.zig",
- );
- errdefer builtin_pkg.destroy(gpa);
+ const builtin_pkg = try Package.createWithDir(
+ gpa,
+ zig_cache_artifact_directory,
+ null,
+ "builtin.zig",
+ );
+ errdefer builtin_pkg.destroy(gpa);
- const std_pkg = try Package.createWithDir(
- gpa,
- options.zig_lib_directory,
- "std",
- "std.zig",
- );
- errdefer std_pkg.destroy(gpa);
+ const std_pkg = try Package.createWithDir(
+ gpa,
+ options.zig_lib_directory,
+ "std",
+ "std.zig",
+ );
+ errdefer std_pkg.destroy(gpa);
- try root_pkg.addAndAdopt(gpa, "builtin", builtin_pkg);
- try root_pkg.add(gpa, "root", root_pkg);
- try root_pkg.addAndAdopt(gpa, "std", std_pkg);
+ try root_pkg.addAndAdopt(gpa, "builtin", builtin_pkg);
+ try root_pkg.add(gpa, "root", root_pkg);
+ try root_pkg.addAndAdopt(gpa, "std", std_pkg);
- try std_pkg.add(gpa, "builtin", builtin_pkg);
- try std_pkg.add(gpa, "root", root_pkg);
- try std_pkg.add(gpa, "std", std_pkg);
+ try std_pkg.add(gpa, "builtin", builtin_pkg);
+ try std_pkg.add(gpa, "root", root_pkg);
+ try std_pkg.add(gpa, "std", std_pkg);
- try builtin_pkg.add(gpa, "std", std_pkg);
- try builtin_pkg.add(gpa, "builtin", builtin_pkg);
- }
+ try builtin_pkg.add(gpa, "std", std_pkg);
+ try builtin_pkg.add(gpa, "builtin", builtin_pkg);
// Pre-open the directory handles for cached ZIR code so that it does not need
// to redundantly happen for each AstGen operation.
@@ -1281,13 +1289,14 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
.module = module,
.target = options.target,
.dynamic_linker = options.dynamic_linker,
+ .sysroot = sysroot,
.output_mode = options.output_mode,
.link_mode = link_mode,
.object_format = ofmt,
.optimize_mode = options.optimize_mode,
.use_lld = use_lld,
.use_llvm = use_llvm,
- .system_linker_hack = darwin_options.system_linker_hack,
+ .system_linker_hack = darwin_system_linker_hack,
.link_libc = link_libc,
.link_libcpp = link_libcpp,
.link_libunwind = link_libunwind,
@@ -1295,7 +1304,6 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
.frameworks = options.frameworks,
.framework_dirs = options.framework_dirs,
.system_libs = system_libs,
- .syslibroot = darwin_options.syslibroot,
.wasi_emulated_libs = options.wasi_emulated_libs,
.lib_dirs = options.lib_dirs,
.rpath_list = options.rpath_list,
@@ -1625,30 +1633,39 @@ pub fn update(self: *Compilation) !void {
// Add a Job for each C object.
try self.c_object_work_queue.ensureUnusedCapacity(self.c_object_table.count());
for (self.c_object_table.keys()) |key| {
- assert(@ptrToInt(key) != 0xaaaa_aaaa_aaaa_aaaa);
self.c_object_work_queue.writeItemAssumeCapacity(key);
}
const use_stage1 = build_options.omit_stage2 or
(build_options.is_stage1 and self.bin_file.options.use_llvm);
- if (!use_stage1) {
- if (self.bin_file.options.module) |module| {
- module.compile_log_text.shrinkAndFree(module.gpa, 0);
- module.generation += 1;
-
- // Make sure std.zig is inside the import_table. We unconditionally need
- // it for start.zig.
- const std_pkg = module.root_pkg.table.get("std").?;
- _ = try module.importPkg(module.root_pkg, std_pkg);
-
- // Put a work item in for every known source file to detect if
- // it changed, and, if so, re-compute ZIR and then queue the job
- // to update it.
- try self.astgen_work_queue.ensureUnusedCapacity(module.import_table.count());
- for (module.import_table.values()) |value| {
- self.astgen_work_queue.writeItemAssumeCapacity(value);
- }
+ if (self.bin_file.options.module) |module| {
+ module.compile_log_text.shrinkAndFree(module.gpa, 0);
+ module.generation += 1;
+
+ // Make sure std.zig is inside the import_table. We unconditionally need
+ // it for start.zig.
+ const std_pkg = module.root_pkg.table.get("std").?;
+ _ = try module.importPkg(std_pkg);
+
+ // Normally we rely on importing std to in turn import the root source file
+ // in the start code, but when using the stage1 backend that won't happen,
+ // so in order to run AstGen on the root source file we put it into the
+ // import_table here.
+ if (use_stage1) {
+ _ = try module.importPkg(module.root_pkg);
+ }
+
+ // Put a work item in for every known source file to detect if
+ // it changed, and, if so, re-compute ZIR and then queue the job
+ // to update it.
+ // We still want AstGen work items for stage1 so that we expose compile errors
+ // that are implemented in stage2 but not stage1.
+ try self.astgen_work_queue.ensureUnusedCapacity(module.import_table.count());
+ for (module.import_table.values()) |value| {
+ self.astgen_work_queue.writeItemAssumeCapacity(value);
+ }
+ if (!use_stage1) {
try self.work_queue.writeItem(.{ .analyze_pkg = std_pkg });
}
}
@@ -1915,7 +1932,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
// (at least for now) single-threaded main work queue. However, C object compilation
// only needs to be finished by the end of this function.
- var zir_prog_node = main_progress_node.start("AstGen", self.astgen_work_queue.count);
+ var zir_prog_node = main_progress_node.start("AST Lowering", self.astgen_work_queue.count);
defer zir_prog_node.end();
var c_obj_prog_node = main_progress_node.start("Compile C Objects", self.c_source_files.len);
@@ -1936,7 +1953,6 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
}
while (self.c_object_work_queue.readItem()) |c_object| {
- assert(@ptrToInt(c_object) != 0xaaaa_aaaa_aaaa_aaaa);
self.work_queue_wait_group.start();
try self.thread_pool.spawn(workerUpdateCObject, .{
self, c_object, &c_obj_prog_node, &self.work_queue_wait_group,
@@ -1944,9 +1960,13 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
}
}
- // Iterate over all the files and look for outdated and deleted declarations.
- if (self.bin_file.options.module) |mod| {
- try mod.processOutdatedAndDeletedDecls();
+ const use_stage1 = build_options.omit_stage2 or
+ (build_options.is_stage1 and self.bin_file.options.use_llvm);
+ if (!use_stage1) {
+ // Iterate over all the files and look for outdated and deleted declarations.
+ if (self.bin_file.options.module) |mod| {
+ try mod.processOutdatedAndDeletedDecls();
+ }
}
while (self.work_queue.readItem()) |work_item| switch (work_item) {
@@ -2283,8 +2303,12 @@ fn workerAstGenFile(
) void {
defer wg.finish();
+ var child_prog_node = prog_node.start(file.sub_file_path, 0);
+ child_prog_node.activate();
+ defer child_prog_node.end();
+
const mod = comp.bin_file.options.module.?;
- mod.astGenFile(file, prog_node) catch |err| switch (err) {
+ mod.astGenFile(file) catch |err| switch (err) {
error.AnalysisFail => return,
else => {
file.status = .retryable_failure;
@@ -2315,6 +2339,9 @@ fn workerAstGenFile(
break :blk mod.importFile(file, import_path) catch continue;
};
if (import_result.is_new) {
+ log.debug("AstGen of {s} has import '{s}'; queuing AstGen of {s}", .{
+ file.sub_file_path, import_path, import_result.file.sub_file_path,
+ });
wg.start();
comp.thread_pool.spawn(workerAstGenFile, .{
comp, import_result.file, prog_node, wg,
@@ -2373,7 +2400,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
// We need to "unhit" in this case, to keep the digests matching.
const prev_hash_state = man.hash.peekBin();
const actual_hit = hit: {
- const is_hit = try man.hit();
+ _ = try man.hit();
if (man.files.items.len == 0) {
man.unhit(prev_hash_state, 0);
break :hit false;
@@ -2536,13 +2563,23 @@ fn reportRetryableAstGenError(
file.status = .retryable_failure;
- const err_msg = try Module.ErrorMsg.create(gpa, .{
+ const src_loc: Module.SrcLoc = .{
.file_scope = file,
.parent_decl_node = 0,
.lazy = .entire_file,
- }, "unable to load {s}: {s}", .{
- file.sub_file_path, @errorName(err),
- });
+ };
+
+ const err_msg = if (file.pkg.root_src_directory.path) |dir_path|
+ try Module.ErrorMsg.create(
+ gpa,
+ src_loc,
+ "unable to load {s}" ++ std.fs.path.sep_str ++ "{s}: {s}",
+ .{ dir_path, file.sub_file_path, @errorName(err) },
+ )
+ else
+ try Module.ErrorMsg.create(gpa, src_loc, "unable to load {s}: {s}", .{
+ file.sub_file_path, @errorName(err),
+ });
errdefer err_msg.destroy(gpa);
{
@@ -2828,7 +2865,7 @@ pub fn addCCArgs(
try argv.appendSlice(&[_][]const u8{ "-target", llvm_triple });
switch (ext) {
- .c, .cpp, .h => {
+ .c, .cpp, .m, .h => {
try argv.appendSlice(&[_][]const u8{
"-nostdinc",
"-fno-spell-checking",
@@ -3015,7 +3052,7 @@ pub fn addCCArgs(
const prefix: []const u8 = if (target.cpu.arch == .riscv64) "rv64" else "rv32";
const prefix_len = 4;
assert(prefix.len == prefix_len);
- var march_buf: [prefix_len + letters.len]u8 = undefined;
+ var march_buf: [prefix_len + letters.len + 1]u8 = undefined;
var march_index: usize = prefix_len;
mem.copy(u8, &march_buf, prefix);
@@ -3119,6 +3156,7 @@ pub const FileExt = enum {
c,
cpp,
h,
+ m,
ll,
bc,
assembly,
@@ -3130,7 +3168,7 @@ pub const FileExt = enum {
pub fn clangSupportsDepFile(ext: FileExt) bool {
return switch (ext) {
- .c, .cpp, .h => true,
+ .c, .cpp, .h, .m => true,
.ll,
.bc,
@@ -3164,6 +3202,10 @@ pub fn hasCppExt(filename: []const u8) bool {
mem.endsWith(u8, filename, ".cxx");
}
+pub fn hasObjCExt(filename: []const u8) bool {
+ return mem.endsWith(u8, filename, ".m");
+}
+
pub fn hasAsmExt(filename: []const u8) bool {
return mem.endsWith(u8, filename, ".s") or mem.endsWith(u8, filename, ".S");
}
@@ -3200,6 +3242,8 @@ pub fn classifyFileExt(filename: []const u8) FileExt {
return .c;
} else if (hasCppExt(filename)) {
return .cpp;
+ } else if (hasObjCExt(filename)) {
+ return .m;
} else if (mem.endsWith(u8, filename, ".ll")) {
return .ll;
} else if (mem.endsWith(u8, filename, ".bc")) {
@@ -3223,6 +3267,7 @@ pub fn classifyFileExt(filename: []const u8) FileExt {
test "classifyFileExt" {
try std.testing.expectEqual(FileExt.cpp, classifyFileExt("foo.cc"));
+ try std.testing.expectEqual(FileExt.m, classifyFileExt("foo.m"));
try std.testing.expectEqual(FileExt.unknown, classifyFileExt("foo.nim"));
try std.testing.expectEqual(FileExt.shared_library, classifyFileExt("foo.so"));
try std.testing.expectEqual(FileExt.shared_library, classifyFileExt("foo.so.1"));
@@ -3826,9 +3871,8 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
_ = try man.addFile(main_zig_file, null);
{
- var local_arena = std.heap.ArenaAllocator.init(comp.gpa);
- defer local_arena.deinit();
- try addPackageTableToCacheHash(&man.hash, &local_arena, mod.root_pkg.table, .{ .files = &man });
+ var seen_table = std.AutoHashMap(*Package, void).init(&arena_allocator.allocator);
+ try addPackageTableToCacheHash(&man.hash, &arena_allocator, mod.root_pkg.table, &seen_table, .{ .files = &man });
}
man.hash.add(comp.bin_file.options.valgrind);
man.hash.add(comp.bin_file.options.single_threaded);
@@ -4099,6 +4143,12 @@ fn createStage1Pkg(
var children = std.ArrayList(*stage1.Pkg).init(arena);
var it = pkg.table.iterator();
while (it.next()) |entry| {
+ if (mem.eql(u8, entry.key_ptr.*, "std") or
+ mem.eql(u8, entry.key_ptr.*, "builtin") or
+ mem.eql(u8, entry.key_ptr.*, "root"))
+ {
+ continue;
+ }
try children.append(try createStage1Pkg(arena, entry.key_ptr.*, entry.value_ptr.*, child_pkg));
}
break :blk children.items;
diff --git a/src/DepTokenizer.zig b/src/DepTokenizer.zig
index b21c893850..0fd26532f0 100644
--- a/src/DepTokenizer.zig
+++ b/src/DepTokenizer.zig
@@ -944,7 +944,7 @@ fn printLabel(out: anytype, label: []const u8, bytes: []const u8) !void {
try out.writeAll(text);
var i: usize = text.len;
const end = 79;
- while (i < 79) : (i += 1) {
+ while (i < end) : (i += 1) {
try out.writeAll(&[_]u8{label[0]});
}
try out.writeAll("\n");
@@ -953,7 +953,7 @@ fn printLabel(out: anytype, label: []const u8, bytes: []const u8) !void {
fn printRuler(out: anytype) !void {
var i: usize = 0;
const end = 79;
- while (i < 79) : (i += 1) {
+ while (i < end) : (i += 1) {
try out.writeAll("-");
}
try out.writeAll("\n");
@@ -1057,4 +1057,3 @@ const printable_char_tab: [256]u8 = (
"................................................................" ++
"................................................................"
).*;
-
diff --git a/src/Module.zig b/src/Module.zig
index 8e868c4ed8..439256f320 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -774,7 +774,10 @@ pub const Fn = struct {
ir.dumpFn(mod, func);
}
- pub fn deinit(func: *Fn, gpa: *Allocator) void {}
+ pub fn deinit(func: *Fn, gpa: *Allocator) void {
+ _ = func;
+ _ = gpa;
+ }
};
pub const Var = struct {
@@ -1148,6 +1151,9 @@ pub const Scope = struct {
is_comptime: bool,
+ /// when null, it is determined by build mode, changed by @setRuntimeSafety
+ want_safety: ?bool = null,
+
/// This `Block` maps a block ZIR instruction to the corresponding
/// AIR instruction for break instruction analysis.
pub const Label = struct {
@@ -1192,12 +1198,12 @@ pub const Scope = struct {
.runtime_cond = parent.runtime_cond,
.runtime_loop = parent.runtime_loop,
.runtime_index = parent.runtime_index,
+ .want_safety = parent.want_safety,
};
}
pub fn wantSafety(block: *const Block) bool {
- // TODO take into account scope's safety overrides
- return switch (block.sema.mod.optimizeMode()) {
+ return block.want_safety orelse switch (block.sema.mod.optimizeMode()) {
.Debug => true,
.ReleaseSafe => true,
.ReleaseFast => false,
@@ -1561,7 +1567,6 @@ pub const SrcLoc = struct {
.node_offset_array_access_index => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
const node_datas = tree.nodes.items(.data);
- const node_tags = tree.nodes.items(.tag);
const node = src_loc.declRelativeToNodeIndex(node_off);
const main_tokens = tree.nodes.items(.main_token);
const tok_index = main_tokens[node_datas[node].rhs];
@@ -1570,7 +1575,6 @@ pub const SrcLoc = struct {
},
.node_offset_slice_sentinel => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
- const node_datas = tree.nodes.items(.data);
const node_tags = tree.nodes.items(.tag);
const node = src_loc.declRelativeToNodeIndex(node_off);
const full = switch (node_tags[node]) {
@@ -1586,7 +1590,6 @@ pub const SrcLoc = struct {
},
.node_offset_call_func => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
- const node_datas = tree.nodes.items(.data);
const node_tags = tree.nodes.items(.tag);
const node = src_loc.declRelativeToNodeIndex(node_off);
var params: [1]ast.Node.Index = undefined;
@@ -1625,7 +1628,6 @@ pub const SrcLoc = struct {
.node_offset_deref_ptr => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
const node_datas = tree.nodes.items(.data);
- const node_tags = tree.nodes.items(.tag);
const node = src_loc.declRelativeToNodeIndex(node_off);
const tok_index = node_datas[node].lhs;
const token_starts = tree.tokens.items(.start);
@@ -1633,7 +1635,6 @@ pub const SrcLoc = struct {
},
.node_offset_asm_source => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
- const node_datas = tree.nodes.items(.data);
const node_tags = tree.nodes.items(.tag);
const node = src_loc.declRelativeToNodeIndex(node_off);
const full = switch (node_tags[node]) {
@@ -1648,7 +1649,6 @@ pub const SrcLoc = struct {
},
.node_offset_asm_ret_ty => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
- const node_datas = tree.nodes.items(.data);
const node_tags = tree.nodes.items(.tag);
const node = src_loc.declRelativeToNodeIndex(node_off);
const full = switch (node_tags[node]) {
@@ -1771,7 +1771,6 @@ pub const SrcLoc = struct {
.node_offset_fn_type_cc => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
- const node_datas = tree.nodes.items(.data);
const node_tags = tree.nodes.items(.tag);
const node = src_loc.declRelativeToNodeIndex(node_off);
var params: [1]ast.Node.Index = undefined;
@@ -1790,7 +1789,6 @@ pub const SrcLoc = struct {
.node_offset_fn_type_ret_ty => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
- const node_datas = tree.nodes.items(.data);
const node_tags = tree.nodes.items(.tag);
const node = src_loc.declRelativeToNodeIndex(node_off);
var params: [1]ast.Node.Index = undefined;
@@ -1810,7 +1808,6 @@ pub const SrcLoc = struct {
.node_offset_anyframe_type => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
const node_datas = tree.nodes.items(.data);
- const node_tags = tree.nodes.items(.tag);
const parent_node = src_loc.declRelativeToNodeIndex(node_off);
const node = node_datas[parent_node].rhs;
const main_tokens = tree.nodes.items(.main_token);
@@ -2217,7 +2214,7 @@ comptime {
}
}
-pub fn astGenFile(mod: *Module, file: *Scope.File, prog_node: *std.Progress.Node) !void {
+pub fn astGenFile(mod: *Module, file: *Scope.File) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -2502,7 +2499,6 @@ pub fn astGenFile(mod: *Module, file: *Scope.File, prog_node: *std.Progress.Node
@ptrCast([*]const u8, file.zir.instructions.items(.data).ptr);
if (data_has_safety_tag) {
// The `Data` union has a safety tag but in the file format we store it without.
- const tags = file.zir.instructions.items(.tag);
for (file.zir.instructions.items(.data)) |*data, i| {
const as_struct = @ptrCast(*const Stage1DataLayout, data);
safety_buffer[i] = as_struct.data;
@@ -2838,7 +2834,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) InnerError!void {
}
pub fn semaPkg(mod: *Module, pkg: *Package) !void {
- const file = (try mod.importPkg(mod.root_pkg, pkg)).file;
+ const file = (try mod.importPkg(pkg)).file;
return mod.semaFile(file);
}
@@ -3137,7 +3133,7 @@ pub const ImportFileResult = struct {
is_new: bool,
};
-pub fn importPkg(mod: *Module, cur_pkg: *Package, pkg: *Package) !ImportFileResult {
+pub fn importPkg(mod: *Module, pkg: *Package) !ImportFileResult {
const gpa = mod.gpa;
// The resolved path is used as the key in the import table, to detect if
@@ -3190,7 +3186,10 @@ pub fn importFile(
import_string: []const u8,
) !ImportFileResult {
if (cur_file.pkg.table.get(import_string)) |pkg| {
- return mod.importPkg(cur_file.pkg, pkg);
+ return mod.importPkg(pkg);
+ }
+ if (!mem.endsWith(u8, import_string, ".zig")) {
+ return error.PackageNotFound;
}
const gpa = mod.gpa;
@@ -3386,7 +3385,6 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) InnerError!vo
log.debug("scan existing {*} ({s}) of {*}", .{ decl, decl.name, namespace });
// Update the AST node of the decl; even if its contents are unchanged, it may
// have been re-ordered.
- const prev_src_node = decl.src_node;
decl.src_node = decl_node;
decl.src_line = line;
@@ -3395,7 +3393,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) InnerError!vo
decl.has_align = has_align;
decl.has_linksection = has_linksection;
decl.zir_decl_index = @intCast(u32, decl_sub_index);
- if (decl.getFunction()) |func| {
+ if (decl.getFunction()) |_| {
switch (mod.comp.bin_file.tag) {
.coff => {
// TODO Implement for COFF
@@ -3764,6 +3762,7 @@ pub fn analyzeExport(
errdefer de_gop.value_ptr.* = mod.gpa.shrink(de_gop.value_ptr.*, de_gop.value_ptr.len - 1);
}
pub fn constInst(mod: *Module, arena: *Allocator, src: LazySrcLoc, typed_value: TypedValue) !*ir.Inst {
+ _ = mod;
const const_inst = try arena.create(ir.Inst.Constant);
const_inst.* = .{
.base = .{
@@ -4132,6 +4131,7 @@ pub fn floatAdd(
lhs: Value,
rhs: Value,
) !Value {
+ _ = src;
switch (float_type.tag()) {
.f16 => {
@panic("TODO add __trunctfhf2 to compiler-rt");
@@ -4165,6 +4165,7 @@ pub fn floatSub(
lhs: Value,
rhs: Value,
) !Value {
+ _ = src;
switch (float_type.tag()) {
.f16 => {
@panic("TODO add __trunctfhf2 to compiler-rt");
@@ -4198,6 +4199,7 @@ pub fn floatDiv(
lhs: Value,
rhs: Value,
) !Value {
+ _ = src;
switch (float_type.tag()) {
.f16 => {
@panic("TODO add __trunctfhf2 to compiler-rt");
@@ -4231,6 +4233,7 @@ pub fn floatMul(
lhs: Value,
rhs: Value,
) !Value {
+ _ = src;
switch (float_type.tag()) {
.f16 => {
@panic("TODO add __trunctfhf2 to compiler-rt");
@@ -4264,6 +4267,7 @@ pub fn simplePtrType(
mutable: bool,
size: std.builtin.TypeInfo.Pointer.Size,
) Allocator.Error!Type {
+ _ = mod;
if (!mutable and size == .Slice and elem_ty.eql(Type.initTag(.u8))) {
return Type.initTag(.const_slice_u8);
}
@@ -4298,6 +4302,7 @@ pub fn ptrType(
@"volatile": bool,
size: std.builtin.TypeInfo.Pointer.Size,
) Allocator.Error!Type {
+ _ = mod;
assert(host_size == 0 or bit_offset < host_size * 8);
// TODO check if type can be represented by simplePtrType
@@ -4315,6 +4320,7 @@ pub fn ptrType(
}
pub fn optionalType(mod: *Module, arena: *Allocator, child_type: Type) Allocator.Error!Type {
+ _ = mod;
switch (child_type.tag()) {
.single_const_pointer => return Type.Tag.optional_single_const_pointer.create(
arena,
@@ -4335,6 +4341,7 @@ pub fn arrayType(
sentinel: ?Value,
elem_type: Type,
) Allocator.Error!Type {
+ _ = mod;
if (elem_type.eql(Type.initTag(.u8))) {
if (sentinel) |some| {
if (some.eql(Value.initTag(.zero))) {
@@ -4365,6 +4372,7 @@ pub fn errorUnionType(
error_set: Type,
payload: Type,
) Allocator.Error!Type {
+ _ = mod;
assert(error_set.zigTypeTag() == .ErrorSet);
if (error_set.eql(Type.initTag(.anyerror)) and payload.eql(Type.initTag(.void))) {
return Type.initTag(.anyerror_void_error_union);
@@ -4692,11 +4700,9 @@ pub fn analyzeUnionFields(mod: *Module, union_obj: *Union) InnerError!void {
const src: LazySrcLoc = .{ .node_offset = union_obj.node_offset };
extra_index += @boolToInt(small.has_src_node);
- const tag_type_ref = if (small.has_tag_type) blk: {
- const tag_type_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
+ if (small.has_tag_type) {
extra_index += 1;
- break :blk tag_type_ref;
- } else .none;
+ }
const body_len = if (small.has_body_len) blk: {
const body_len = zir.extra[extra_index];
@@ -4784,6 +4790,7 @@ pub fn analyzeUnionFields(mod: *Module, union_obj: *Union) InnerError!void {
cur_bit_bag >>= 1;
const unused = @truncate(u1, cur_bit_bag) != 0;
cur_bit_bag >>= 1;
+ _ = unused;
const field_name_zir = zir.nullTerminatedString(zir.extra[extra_index]);
extra_index += 1;
@@ -4800,11 +4807,9 @@ pub fn analyzeUnionFields(mod: *Module, union_obj: *Union) InnerError!void {
break :blk align_ref;
} else .none;
- const tag_ref: Zir.Inst.Ref = if (has_tag) blk: {
- const tag_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
+ if (has_tag) {
extra_index += 1;
- break :blk tag_ref;
- } else .none;
+ }
// This string needs to outlive the ZIR code.
const field_name = try decl_arena.allocator.dupe(u8, field_name_zir);
diff --git a/src/Sema.zig b/src/Sema.zig
index 68d47b7f3b..ea6f4898c5 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -473,7 +473,7 @@ pub fn analyzeBody(
continue;
},
.set_cold => {
- try sema.zirSetAlignStack(block, inst);
+ try sema.zirSetCold(block, inst);
i += 1;
continue;
},
@@ -702,6 +702,7 @@ fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I
}
fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+ _ = inst;
const tracy = trace(@src());
defer tracy.end();
return sema.mod.fail(&block.base, sema.src, "TODO implement zirCoerceResultPtr", .{});
@@ -776,6 +777,7 @@ fn zirStructDecl(
}
fn createTypeName(sema: *Sema, block: *Scope.Block, name_strategy: Zir.Inst.NameStrategy) ![:0]u8 {
+ _ = block;
switch (name_strategy) {
.anon => {
// It would be neat to have "struct:line:column" but this name has
@@ -1074,6 +1076,10 @@ fn zirOpaqueDecl(
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
+ _ = name_strategy;
+ _ = inst_data;
+ _ = src;
+ _ = extra;
return sema.mod.fail(&block.base, sema.src, "TODO implement zirOpaqueDecl", .{});
}
@@ -1230,14 +1236,13 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In
fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
- const src = inst_data.src();
const arg_name = inst_data.get(sema.code);
const arg_index = sema.next_arg_index;
sema.next_arg_index += 1;
// TODO check if arg_name shadows a Decl
- if (block.inlining) |inlining| {
+ if (block.inlining) |_| {
return sema.param_inst_list[arg_index];
}
@@ -1636,6 +1641,7 @@ fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In
}
fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+ _ = block;
const tracy = trace(@src());
defer tracy.end();
@@ -1644,6 +1650,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In
}
fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+ _ = block;
const tracy = trace(@src());
defer tracy.end();
@@ -1661,6 +1668,7 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!
}
fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+ _ = block;
const arena = sema.arena;
const inst_data = sema.code.instructions.items(.data)[inst].float;
const src = inst_data.src();
@@ -1673,6 +1681,7 @@ fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*
}
fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+ _ = block;
const arena = sema.arena;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data;
@@ -2031,8 +2040,12 @@ fn zirSetFloatMode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner
fn zirSetRuntimeSafety(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const src: LazySrcLoc = inst_data.src();
- return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirSetRuntimeSafety", .{});
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+
+ const op = try sema.resolveInst(inst_data.operand);
+ const op_coerced = try sema.coerce(block, Type.initTag(.bool), op, operand_src);
+ const b = (try sema.resolveConstValue(block, operand_src, op_coerced)).toBool();
+ block.want_safety = b;
}
fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void {
@@ -2354,6 +2367,7 @@ fn analyzeCall(
}
fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+ _ = block;
const tracy = trace(@src());
defer tracy.end();
@@ -2462,6 +2476,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn
}
fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+ _ = block;
const tracy = trace(@src());
defer tracy.end();
@@ -2506,7 +2521,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr
}
try sema.requireRuntimeBlock(block, src);
- return block.addUnOp(src, result_ty, .error_to_int, op_coerced);
+ return block.addUnOp(src, result_ty, .bitcast, op_coerced);
}
fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
@@ -2539,7 +2554,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr
// const is_gt_max = @panic("TODO get max errors in compilation");
// try sema.addSafetyCheck(block, is_gt_max, .invalid_error_code);
}
- return block.addUnOp(src, Type.initTag(.anyerror), .int_to_error, op);
+ return block.addUnOp(src, Type.initTag(.anyerror), .bitcast, op);
}
fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
@@ -2622,6 +2637,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn
}
fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+ _ = block;
const tracy = trace(@src());
defer tracy.end();
@@ -3005,7 +3021,6 @@ fn zirFunc(
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.Func, inst_data.payload_index);
const param_types = sema.code.refSlice(extra.end, extra.data.param_types_len);
@@ -3061,7 +3076,9 @@ fn funcCommon(
const fn_ty: Type = fn_ty: {
// Hot path for some common function types.
- if (zir_param_types.len == 0 and !var_args and align_val.tag() == .null_value) {
+ if (zir_param_types.len == 0 and !var_args and align_val.tag() == .null_value and
+ !inferred_error_set)
+ {
if (return_type.zigTypeTag() == .NoReturn and cc == .Unspecified) {
break :fn_ty Type.initTag(.fn_noreturn_no_args);
}
@@ -3092,6 +3109,10 @@ fn funcCommon(
return mod.fail(&block.base, src, "TODO implement support for function prototypes to have alignment specified", .{});
}
+ if (inferred_error_set) {
+ return mod.fail(&block.base, src, "TODO implement functions with inferred error sets", .{});
+ }
+
break :fn_ty try Type.Tag.function.create(sema.arena, .{
.param_types = param_types,
.return_type = return_type,
@@ -3332,9 +3353,7 @@ fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
- const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs);
@@ -3499,6 +3518,8 @@ fn zirSwitchCapture(
const switch_info = zir_datas[capture_info.switch_inst].pl_node;
const src = switch_info.src();
+ _ = is_ref;
+ _ = is_multi;
return sema.mod.fail(&block.base, src, "TODO implement Sema for zirSwitchCapture", .{});
}
@@ -3516,6 +3537,7 @@ fn zirSwitchCaptureElse(
const switch_info = zir_datas[capture_info.switch_inst].pl_node;
const src = switch_info.src();
+ _ = is_ref;
return sema.mod.fail(&block.base, src, "TODO implement Sema for zirSwitchCaptureElse", .{});
}
@@ -3653,7 +3675,6 @@ fn analyzeSwitch(
extra_index += 1;
const body_len = sema.code.extra[extra_index];
extra_index += 1;
- const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body_len;
try sema.validateSwitchItemEnum(
@@ -3763,7 +3784,6 @@ fn analyzeSwitch(
extra_index += 1;
const body_len = sema.code.extra[extra_index];
extra_index += 1;
- const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body_len;
try sema.validateSwitchItem(
@@ -3859,7 +3879,6 @@ fn analyzeSwitch(
extra_index += 1;
const body_len = sema.code.extra[extra_index];
extra_index += 1;
- const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body_len;
try sema.validateSwitchItemBool(
@@ -3942,7 +3961,6 @@ fn analyzeSwitch(
extra_index += 1;
const body_len = sema.code.extra[extra_index];
extra_index += 1;
- const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body_len;
try sema.validateSwitchItemSparse(
@@ -4457,6 +4475,7 @@ fn validateSwitchNoRange(
fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
+ _ = extra;
const src = inst_data.src();
return sema.mod.fail(&block.base, src, "TODO implement zirHasField", .{});
@@ -4515,12 +4534,17 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!
fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
+
+ _ = block;
+ _ = inst;
return sema.mod.fail(&block.base, sema.src, "TODO implement zirShl", .{});
}
fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
+
+ _ = inst;
return sema.mod.fail(&block.base, sema.src, "TODO implement zirShr", .{});
}
@@ -4590,18 +4614,24 @@ fn zirBitwise(
fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
+
+ _ = inst;
return sema.mod.fail(&block.base, sema.src, "TODO implement zirBitNot", .{});
}
fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
+
+ _ = inst;
return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{});
}
fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
+
+ _ = inst;
return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayMul", .{});
}
@@ -5061,6 +5091,7 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro
}
fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+ _ = block;
const zir_datas = sema.code.instructions.items(.data);
const inst_data = zir_datas[inst].un_node;
const src = inst_data.src();
@@ -5069,6 +5100,7 @@ fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!
}
fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+ _ = block;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_ptr = try sema.resolveInst(inst_data.operand);
@@ -5594,6 +5626,10 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref:
return mod.failWithOwnedErrorMsg(&block.base, msg);
}
+ if (is_ref) {
+ return mod.fail(&block.base, src, "TODO: Sema.zirStructInit is_ref=true", .{});
+ }
+
const is_comptime = for (field_inits) |field_init| {
if (field_init.value() == null) {
break false;
@@ -5617,18 +5653,24 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref:
fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
+
+ _ = is_ref;
return sema.mod.fail(&block.base, src, "TODO: Sema.zirStructInitAnon", .{});
}
fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
+
+ _ = is_ref;
return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInit", .{});
}
fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
+
+ _ = is_ref;
return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInitAnon", .{});
}
@@ -5758,7 +5800,64 @@ fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr
fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- return sema.mod.fail(&block.base, src, "TODO: Sema.zirIntToPtr", .{});
+
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
+
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const operand_res = try sema.resolveInst(extra.rhs);
+ const operand_coerced = try sema.coerce(block, Type.initTag(.usize), operand_res, operand_src);
+
+ const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const type_res = try sema.resolveType(block, src, extra.lhs);
+ if (type_res.zigTypeTag() != .Pointer)
+ return sema.mod.fail(&block.base, type_src, "expected pointer, found '{}'", .{type_res});
+ const ptr_align = type_res.ptrAlignment(sema.mod.getTarget());
+
+ if (try sema.resolveDefinedValue(block, operand_src, operand_coerced)) |val| {
+ const addr = val.toUnsignedInt();
+ if (!type_res.isAllowzeroPtr() and addr == 0)
+ return sema.mod.fail(&block.base, operand_src, "pointer type '{}' does not allow address zero", .{type_res});
+ if (addr != 0 and addr % ptr_align != 0)
+ return sema.mod.fail(&block.base, operand_src, "pointer type '{}' requires aligned address", .{type_res});
+
+ const val_payload = try sema.arena.create(Value.Payload.U64);
+ val_payload.* = .{
+ .base = .{ .tag = .int_u64 },
+ .data = addr,
+ };
+ return sema.mod.constInst(sema.arena, src, .{
+ .ty = type_res,
+ .val = Value.initPayload(&val_payload.base),
+ });
+ }
+
+ try sema.requireRuntimeBlock(block, src);
+ if (block.wantSafety()) {
+ const zero = try sema.mod.constInst(sema.arena, src, .{
+ .ty = Type.initTag(.u64),
+ .val = Value.initTag(.zero),
+ });
+ if (!type_res.isAllowzeroPtr()) {
+ const is_non_zero = try block.addBinOp(src, Type.initTag(.bool), .cmp_neq, operand_coerced, zero);
+ try sema.addSafetyCheck(block, is_non_zero, .cast_to_null);
+ }
+
+ if (ptr_align > 1) {
+ const val_payload = try sema.arena.create(Value.Payload.U64);
+ val_payload.* = .{
+ .base = .{ .tag = .int_u64 },
+ .data = ptr_align - 1,
+ };
+ const align_minus_1 = try sema.mod.constInst(sema.arena, src, .{
+ .ty = Type.initTag(.u64),
+ .val = Value.initPayload(&val_payload.base),
+ });
+ const remainder = try block.addBinOp(src, Type.initTag(.u64), .bit_and, operand_coerced, align_minus_1);
+ const is_aligned = try block.addBinOp(src, Type.initTag(.bool), .cmp_eq, remainder, zero);
+ try sema.addSafetyCheck(block, is_aligned, .incorrect_alignment);
+ }
+ }
+ return block.addUnOp(src, type_res, .bitcast, operand_coerced);
}
fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
@@ -5967,6 +6066,8 @@ fn zirAwait(
) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
+
+ _ = is_nosuspend;
return sema.mod.fail(&block.base, src, "TODO: Sema.zirAwait", .{});
}
@@ -5977,7 +6078,6 @@ fn zirVarExtended(
) InnerError!*Inst {
const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand);
const src = sema.src;
- const align_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at align
const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type
const mut_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at mut token
const init_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at init expr
@@ -6183,6 +6283,8 @@ pub const PanicId = enum {
unreach,
unwrap_null,
unwrap_errunion,
+ cast_to_null,
+ incorrect_alignment,
invalid_error_code,
};
@@ -6245,6 +6347,8 @@ fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id:
}
fn safetyPanic(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, panic_id: PanicId) !Zir.Inst.Index {
+ _ = sema;
+ _ = panic_id;
// TODO Once we have a panic function to call, call it here instead of breakpoint.
_ = try block.addNoOp(src, Type.initTag(.void), .breakpoint);
_ = try block.addNoOp(src, Type.initTag(.noreturn), .unreach);
@@ -6558,6 +6662,8 @@ fn elemPtrArray(
});
}
}
+ _ = elem_index;
+ _ = elem_index_src;
return sema.mod.fail(&block.base, src, "TODO implement more analyze elemptr for arrays", .{});
}
@@ -6805,10 +6911,13 @@ fn storePtr(
if ((try sema.typeHasOnePossibleValue(block, src, elem_ty)) != null)
return;
- if (try sema.resolvePossiblyUndefinedValue(block, src, ptr)) |ptr_val| {
+ if (try sema.resolvePossiblyUndefinedValue(block, src, ptr)) |ptr_val| blk: {
const const_val = (try sema.resolvePossiblyUndefinedValue(block, src, value)) orelse
return sema.mod.fail(&block.base, src, "cannot store runtime value in compile time variable", .{});
+ if (ptr_val.tag() == .int_u64)
+ break :blk; // propogate it down to runtime
+
const comptime_alloc = ptr_val.castTag(.comptime_alloc).?;
if (comptime_alloc.data.runtime_index < block.runtime_index) {
if (block.runtime_cond) |cond_src| {
@@ -6947,7 +7056,10 @@ fn analyzeLoad(
.Pointer => ptr.ty.elemType(),
else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr.ty}),
};
- if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
+ if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| blk: {
+ if (ptr_val.tag() == .int_u64)
+ break :blk; // do it at runtime
+
return sema.mod.constInst(sema.arena, src, .{
.ty = elem_ty,
.val = try ptr_val.pointerDeref(sema.arena),
@@ -7065,6 +7177,7 @@ fn analyzeSlice(
ptr_child.isVolatilePtr(),
return_ptr_size,
);
+ _ = return_type;
return sema.mod.fail(&block.base, src, "TODO implement analysis of slice", .{});
}
@@ -7410,14 +7523,14 @@ fn resolveTypeFields(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type
struct_obj.status = .have_field_types;
return ty;
},
- .extern_options => return sema.resolveBuiltinTypeFields(block, src, ty, "ExternOptions"),
- .export_options => return sema.resolveBuiltinTypeFields(block, src, ty, "ExportOptions"),
- .atomic_ordering => return sema.resolveBuiltinTypeFields(block, src, ty, "AtomicOrdering"),
- .atomic_rmw_op => return sema.resolveBuiltinTypeFields(block, src, ty, "AtomicRmwOp"),
- .calling_convention => return sema.resolveBuiltinTypeFields(block, src, ty, "CallingConvention"),
- .float_mode => return sema.resolveBuiltinTypeFields(block, src, ty, "FloatMode"),
- .reduce_op => return sema.resolveBuiltinTypeFields(block, src, ty, "ReduceOp"),
- .call_options => return sema.resolveBuiltinTypeFields(block, src, ty, "CallOptions"),
+ .extern_options => return sema.resolveBuiltinTypeFields(block, src, "ExternOptions"),
+ .export_options => return sema.resolveBuiltinTypeFields(block, src, "ExportOptions"),
+ .atomic_ordering => return sema.resolveBuiltinTypeFields(block, src, "AtomicOrdering"),
+ .atomic_rmw_op => return sema.resolveBuiltinTypeFields(block, src, "AtomicRmwOp"),
+ .calling_convention => return sema.resolveBuiltinTypeFields(block, src, "CallingConvention"),
+ .float_mode => return sema.resolveBuiltinTypeFields(block, src, "FloatMode"),
+ .reduce_op => return sema.resolveBuiltinTypeFields(block, src, "ReduceOp"),
+ .call_options => return sema.resolveBuiltinTypeFields(block, src, "CallOptions"),
.@"union", .union_tagged => {
const union_obj = ty.cast(Type.Payload.Union).?.data;
@@ -7443,7 +7556,6 @@ fn resolveBuiltinTypeFields(
sema: *Sema,
block: *Scope.Block,
src: LazySrcLoc,
- ty: Type,
name: []const u8,
) InnerError!Type {
const resolved_ty = try sema.getBuiltinType(block, src, name);
@@ -7458,7 +7570,7 @@ fn getBuiltinType(
) InnerError!Type {
const mod = sema.mod;
const std_pkg = mod.root_pkg.table.get("std").?;
- const std_file = (mod.importPkg(mod.root_pkg, std_pkg) catch unreachable).file;
+ const std_file = (mod.importPkg(std_pkg) catch unreachable).file;
const opt_builtin_inst = try sema.analyzeNamespaceLookup(
block,
src,
diff --git a/src/ThreadPool.zig b/src/ThreadPool.zig
index e66742b49e..4a7fa8cb9b 100644
--- a/src/ThreadPool.zig
+++ b/src/ThreadPool.zig
@@ -101,7 +101,7 @@ pub fn deinit(self: *ThreadPool) void {
pub fn spawn(self: *ThreadPool, comptime func: anytype, args: anytype) !void {
if (std.builtin.single_threaded) {
- const result = @call(.{}, func, args);
+ @call(.{}, func, args);
return;
}
@@ -114,7 +114,7 @@ pub fn spawn(self: *ThreadPool, comptime func: anytype, args: anytype) !void {
fn runFn(runnable: *Runnable) void {
const run_node = @fieldParentPtr(RunQueue.Node, "data", runnable);
const closure = @fieldParentPtr(@This(), "run_node", run_node);
- const result = @call(.{}, func, closure.arguments);
+ @call(.{}, func, closure.arguments);
const held = closure.pool.lock.acquire();
defer held.release();
diff --git a/src/Zir.zig b/src/Zir.zig
index 0c8855bbc9..5d75030711 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -3176,6 +3176,7 @@ const Writer = struct {
inst: Inst.Index,
) (@TypeOf(stream).Error || error{OutOfMemory})!void {
const inst_data = self.code.instructions.items(.data)[inst].array_type_sentinel;
+ _ = inst_data;
try stream.writeAll("TODO)");
}
@@ -3213,6 +3214,7 @@ const Writer = struct {
inst: Inst.Index,
) (@TypeOf(stream).Error || error{OutOfMemory})!void {
const inst_data = self.code.instructions.items(.data)[inst].ptr_type;
+ _ = inst_data;
try stream.writeAll("TODO)");
}
@@ -3559,6 +3561,8 @@ const Writer = struct {
assert(body.len == 0);
try stream.writeAll("{}, {})");
} else {
+ const prev_parent_decl_node = self.parent_decl_node;
+ if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
self.indent += 2;
if (body.len == 0) {
try stream.writeAll("{}, {\n");
@@ -3621,6 +3625,7 @@ const Writer = struct {
try stream.writeAll(",\n");
}
+ self.parent_decl_node = prev_parent_decl_node;
self.indent -= 2;
try stream.writeByteNTimes(' ', self.indent);
try stream.writeAll("})");
@@ -3689,6 +3694,8 @@ const Writer = struct {
const body = self.code.extra[extra_index..][0..body_len];
extra_index += body.len;
+ const prev_parent_decl_node = self.parent_decl_node;
+ if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
self.indent += 2;
if (body.len == 0) {
try stream.writeAll("{}, {\n");
@@ -3754,6 +3761,7 @@ const Writer = struct {
try stream.writeAll(",\n");
}
+ self.parent_decl_node = prev_parent_decl_node;
self.indent -= 2;
try stream.writeByteNTimes(' ', self.indent);
try stream.writeAll("})");
@@ -3909,6 +3917,8 @@ const Writer = struct {
assert(body.len == 0);
try stream.writeAll("{}, {})");
} else {
+ const prev_parent_decl_node = self.parent_decl_node;
+ if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
self.indent += 2;
if (body.len == 0) {
try stream.writeAll("{}, {\n");
@@ -3949,6 +3959,7 @@ const Writer = struct {
}
try stream.writeAll(",\n");
}
+ self.parent_decl_node = prev_parent_decl_node;
self.indent -= 2;
try stream.writeByteNTimes(' ', self.indent);
try stream.writeAll("})");
@@ -4431,6 +4442,7 @@ const Writer = struct {
}
fn writeInstIndex(self: *Writer, stream: anytype, inst: Inst.Index) !void {
+ _ = self;
return stream.print("%{d}", .{inst});
}
@@ -4451,6 +4463,7 @@ const Writer = struct {
name: []const u8,
flag: bool,
) !void {
+ _ = self;
if (!flag) return;
try stream.writeAll(name);
}
@@ -4739,7 +4752,6 @@ fn findDeclsSwitch(
var extra_index: usize = special.end;
var scalar_i: usize = 0;
while (scalar_i < extra.data.cases_len) : (scalar_i += 1) {
- const item_ref = @intToEnum(Inst.Ref, zir.extra[extra_index]);
extra_index += 1;
const body_len = zir.extra[extra_index];
extra_index += 1;
@@ -4779,7 +4791,6 @@ fn findDeclsSwitchMulti(
{
var scalar_i: usize = 0;
while (scalar_i < extra.data.scalar_cases_len) : (scalar_i += 1) {
- const item_ref = @intToEnum(Inst.Ref, zir.extra[extra_index]);
extra_index += 1;
const body_len = zir.extra[extra_index];
extra_index += 1;
@@ -4800,12 +4811,11 @@ fn findDeclsSwitchMulti(
extra_index += 1;
const items = zir.refSlice(extra_index, items_len);
extra_index += items_len;
+ _ = items;
var range_i: usize = 0;
while (range_i < ranges_len) : (range_i += 1) {
- const item_first = @intToEnum(Inst.Ref, zir.extra[extra_index]);
extra_index += 1;
- const item_last = @intToEnum(Inst.Ref, zir.extra[extra_index]);
extra_index += 1;
}
diff --git a/src/air.zig b/src/air.zig
index 8f3ae6d631..37126f8153 100644
--- a/src/air.zig
+++ b/src/air.zig
@@ -92,10 +92,6 @@ pub const Inst = struct {
is_err,
/// *E!T => bool
is_err_ptr,
- /// E => u16
- error_to_int,
- /// u16 => E
- int_to_error,
bool_and,
bool_or,
/// Read a value from a pointer.
@@ -159,8 +155,6 @@ pub const Inst = struct {
.is_null_ptr,
.is_err,
.is_err_ptr,
- .int_to_error,
- .error_to_int,
.ptrtoint,
.floatcast,
.intcast,
@@ -310,9 +304,12 @@ pub const Inst = struct {
base: Inst,
pub fn operandCount(self: *const NoOp) usize {
+ _ = self;
return 0;
}
pub fn getOperand(self: *const NoOp, index: usize) ?*Inst {
+ _ = self;
+ _ = index;
return null;
}
};
@@ -322,6 +319,7 @@ pub const Inst = struct {
operand: *Inst,
pub fn operandCount(self: *const UnOp) usize {
+ _ = self;
return 1;
}
pub fn getOperand(self: *const UnOp, index: usize) ?*Inst {
@@ -337,6 +335,7 @@ pub const Inst = struct {
rhs: *Inst,
pub fn operandCount(self: *const BinOp) usize {
+ _ = self;
return 2;
}
pub fn getOperand(self: *const BinOp, index: usize) ?*Inst {
@@ -362,9 +361,12 @@ pub const Inst = struct {
name: [*:0]const u8,
pub fn operandCount(self: *const Arg) usize {
+ _ = self;
return 0;
}
pub fn getOperand(self: *const Arg, index: usize) ?*Inst {
+ _ = self;
+ _ = index;
return null;
}
};
@@ -397,9 +399,12 @@ pub const Inst = struct {
body: Body,
pub fn operandCount(self: *const Block) usize {
+ _ = self;
return 0;
}
pub fn getOperand(self: *const Block, index: usize) ?*Inst {
+ _ = self;
+ _ = index;
return null;
}
};
@@ -418,9 +423,12 @@ pub const Inst = struct {
body: Body,
pub fn operandCount(self: *const BrBlockFlat) usize {
+ _ = self;
return 0;
}
pub fn getOperand(self: *const BrBlockFlat, index: usize) ?*Inst {
+ _ = self;
+ _ = index;
return null;
}
};
@@ -433,9 +441,11 @@ pub const Inst = struct {
operand: *Inst,
pub fn operandCount(self: *const Br) usize {
+ _ = self;
return 1;
}
pub fn getOperand(self: *const Br, index: usize) ?*Inst {
+ _ = self;
if (index == 0)
return self.operand;
return null;
@@ -449,9 +459,12 @@ pub const Inst = struct {
block: *Block,
pub fn operandCount(self: *const BrVoid) usize {
+ _ = self;
return 0;
}
pub fn getOperand(self: *const BrVoid, index: usize) ?*Inst {
+ _ = self;
+ _ = index;
return null;
}
};
@@ -496,6 +509,7 @@ pub const Inst = struct {
else_death_count: u32 = 0,
pub fn operandCount(self: *const CondBr) usize {
+ _ = self;
return 1;
}
pub fn getOperand(self: *const CondBr, index: usize) ?*Inst {
@@ -522,9 +536,12 @@ pub const Inst = struct {
val: Value,
pub fn operandCount(self: *const Constant) usize {
+ _ = self;
return 0;
}
pub fn getOperand(self: *const Constant, index: usize) ?*Inst {
+ _ = self;
+ _ = index;
return null;
}
};
@@ -536,9 +553,12 @@ pub const Inst = struct {
body: Body,
pub fn operandCount(self: *const Loop) usize {
+ _ = self;
return 0;
}
pub fn getOperand(self: *const Loop, index: usize) ?*Inst {
+ _ = self;
+ _ = index;
return null;
}
};
@@ -550,9 +570,12 @@ pub const Inst = struct {
variable: *Module.Var,
pub fn operandCount(self: *const VarPtr) usize {
+ _ = self;
return 0;
}
pub fn getOperand(self: *const VarPtr, index: usize) ?*Inst {
+ _ = self;
+ _ = index;
return null;
}
};
@@ -565,9 +588,12 @@ pub const Inst = struct {
field_index: usize,
pub fn operandCount(self: *const StructFieldPtr) usize {
+ _ = self;
return 1;
}
pub fn getOperand(self: *const StructFieldPtr, index: usize) ?*Inst {
+ _ = self;
+ _ = index;
var i = index;
if (i < 1)
@@ -599,6 +625,7 @@ pub const Inst = struct {
};
pub fn operandCount(self: *const SwitchBr) usize {
+ _ = self;
return 1;
}
pub fn getOperand(self: *const SwitchBr, index: usize) ?*Inst {
@@ -627,9 +654,12 @@ pub const Inst = struct {
column: u32,
pub fn operandCount(self: *const DbgStmt) usize {
+ _ = self;
return 0;
}
pub fn getOperand(self: *const DbgStmt, index: usize) ?*Inst {
+ _ = self;
+ _ = index;
return null;
}
};
@@ -730,8 +760,6 @@ const DumpTzir = struct {
.is_null_ptr,
.is_err,
.is_err_ptr,
- .error_to_int,
- .int_to_error,
.ptrtoint,
.floatcast,
.intcast,
@@ -865,8 +893,6 @@ const DumpTzir = struct {
.is_null_ptr,
.is_err,
.is_err_ptr,
- .error_to_int,
- .int_to_error,
.ptrtoint,
.floatcast,
.intcast,
diff --git a/src/clang.zig b/src/clang.zig
index bd305bc76f..023d1aba4b 100644
--- a/src/clang.zig
+++ b/src/clang.zig
@@ -389,9 +389,6 @@ pub const ElaboratedType = opaque {
};
pub const EnumConstantDecl = opaque {
- pub const getInitExpr = ZigClangEnumConstantDecl_getInitExpr;
- extern fn ZigClangEnumConstantDecl_getInitExpr(*const EnumConstantDecl) ?*const Expr;
-
pub const getInitVal = ZigClangEnumConstantDecl_getInitVal;
extern fn ZigClangEnumConstantDecl_getInitVal(*const EnumConstantDecl) *const APSInt;
};
diff --git a/src/codegen.zig b/src/codegen.zig
index b35b8d5986..0b63222242 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -118,7 +118,6 @@ pub fn generateSymbol(
if (typed_value.ty.sentinel()) |sentinel| {
try code.ensureCapacity(code.items.len + payload.data.len + 1);
code.appendSliceAssumeCapacity(payload.data);
- const prev_len = code.items.len;
switch (try generateSymbol(bin_file, src_loc, .{
.ty = typed_value.ty.elemType(),
.val = sentinel,
@@ -565,7 +564,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.r11 = true, // fp
.r14 = true, // lr
};
- inline for (callee_preserved_regs) |reg, i| {
+ inline for (callee_preserved_regs) |reg| {
if (self.register_manager.isRegAllocated(reg)) {
@field(saved_regs, @tagName(reg)) = true;
}
@@ -603,7 +602,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
} else {
if (math.cast(i26, amt)) |offset| {
writeInt(u32, self.code.items[jmp_reloc..][0..4], Instruction.b(.al, offset).toU32());
- } else |err| {
+ } else |_| {
return self.failSymbol("exitlude jump is too large", .{});
}
}
@@ -676,7 +675,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
} else {
if (math.cast(i28, amt)) |offset| {
writeInt(u32, self.code.items[jmp_reloc..][0..4], Instruction.b(offset).toU32());
- } else |err| {
+ } else |_| {
return self.failSymbol("exitlude jump is too large", .{});
}
}
@@ -850,8 +849,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?),
.is_err => return self.genIsErr(inst.castTag(.is_err).?),
.is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?),
- .error_to_int => return self.genErrorToInt(inst.castTag(.error_to_int).?),
- .int_to_error => return self.genIntToError(inst.castTag(.int_to_error).?),
.load => return self.genLoad(inst.castTag(.load).?),
.loop => return self.genLoop(inst.castTag(.loop).?),
.not => return self.genNot(inst.castTag(.not).?),
@@ -1500,6 +1497,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
swap_lhs_and_rhs: bool,
op: ir.Inst.Tag,
) !void {
+ _ = src;
assert(lhs_mcv == .register or rhs_mcv == .register);
const op1 = if (swap_lhs_and_rhs) rhs_mcv.register else lhs_mcv.register;
@@ -1908,6 +1906,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try self.genX8664ModRMRegToStack(src, dst_ty, off, src_reg, mr + 0x1);
},
.immediate => |imm| {
+ _ = imm;
return self.fail(src, "TODO implement x86 ADD/SUB/CMP source immediate", .{});
},
.embedded_in_code, .memory, .stack_offset => {
@@ -2057,6 +2056,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.genSetStack(src, dst_ty, off, MCValue{ .register = dst_reg });
},
.immediate => |imm| {
+ _ = imm;
return self.fail(src, "TODO implement x86 multiply source immediate", .{});
},
.embedded_in_code, .memory, .stack_offset => {
@@ -2960,14 +2960,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.fail(inst.base.src, "TODO load the operand and call genIsErr", .{});
}
- fn genErrorToInt(self: *Self, inst: *ir.Inst.UnOp) !MCValue {
- return self.resolveInst(inst.operand);
- }
-
- fn genIntToError(self: *Self, inst: *ir.Inst.UnOp) !MCValue {
- return self.resolveInst(inst.operand);
- }
-
fn genLoop(self: *Self, inst: *ir.Inst.Loop) !MCValue {
// A loop is a setup to be able to jump back to the beginning.
const start_index = self.code.items.len;
@@ -2993,14 +2985,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.arm, .armeb => {
if (math.cast(i26, @intCast(i32, index) - @intCast(i32, self.code.items.len + 8))) |delta| {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.b(.al, delta).toU32());
- } else |err| {
+ } else |_| {
return self.fail(src, "TODO: enable larger branch offset", .{});
}
},
.aarch64, .aarch64_be, .aarch64_32 => {
if (math.cast(i28, @intCast(i32, index) - @intCast(i32, self.code.items.len + 8))) |delta| {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.b(delta).toU32());
- } else |err| {
+ } else |_| {
return self.fail(src, "TODO: enable larger branch offset", .{});
}
},
@@ -3318,9 +3310,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
},
.compare_flags_unsigned => |op| {
+ _ = op;
return self.fail(src, "TODO implement set stack variable with compare flags value (unsigned)", .{});
},
.compare_flags_signed => |op| {
+ _ = op;
return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{});
},
.immediate => {
@@ -3328,6 +3322,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg });
},
.embedded_in_code => |code_offset| {
+ _ = code_offset;
return self.fail(src, "TODO implement set stack variable from embedded_in_code", .{});
},
.register => |reg| {
@@ -3364,6 +3359,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
},
.memory => |vaddr| {
+ _ = vaddr;
return self.fail(src, "TODO implement set stack variable from memory vaddr", .{});
},
.stack_offset => |off| {
@@ -3392,9 +3388,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
},
.compare_flags_unsigned => |op| {
+ _ = op;
return self.fail(src, "TODO implement set stack variable with compare flags value (unsigned)", .{});
},
.compare_flags_signed => |op| {
+ _ = op;
return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{});
},
.immediate => |x_big| {
@@ -3447,12 +3445,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
},
.embedded_in_code => |code_offset| {
+ _ = code_offset;
return self.fail(src, "TODO implement set stack variable from embedded_in_code", .{});
},
.register => |reg| {
try self.genX8664ModRMRegToStack(src, ty, stack_offset, reg, 0x89);
},
.memory => |vaddr| {
+ _ = vaddr;
return self.fail(src, "TODO implement set stack variable from memory vaddr", .{});
},
.stack_offset => |off| {
@@ -3481,9 +3481,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
},
.compare_flags_unsigned => |op| {
+ _ = op;
return self.fail(src, "TODO implement set stack variable with compare flags value (unsigned)", .{});
},
.compare_flags_signed => |op| {
+ _ = op;
return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{});
},
.immediate => {
@@ -3491,6 +3493,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg });
},
.embedded_in_code => |code_offset| {
+ _ = code_offset;
return self.fail(src, "TODO implement set stack variable from embedded_in_code", .{});
},
.register => |reg| {
@@ -3523,6 +3526,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
},
.memory => |vaddr| {
+ _ = vaddr;
return self.fail(src, "TODO implement set stack variable from memory vaddr", .{});
},
.stack_offset => |off| {
@@ -3853,6 +3857,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
);
},
.compare_flags_signed => |op| {
+ _ = op;
return self.fail(src, "TODO set register with compare flags value (signed)", .{});
},
.immediate => |x| {
@@ -4179,6 +4184,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.fail(src, "TODO codegen non-ELF const Decl pointer", .{});
}
}
+ if (typed_value.val.tag() == .int_u64) {
+ return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
+ }
return self.fail(src, "TODO codegen more kinds of const pointers", .{});
},
.Int => {
@@ -4468,6 +4476,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
dummy,
pub fn allocIndex(self: Register) ?u4 {
+ _ = self;
return null;
}
};
diff --git a/src/codegen/arm.zig b/src/codegen/arm.zig
index cc6abe2e52..891a9e100b 100644
--- a/src/codegen/arm.zig
+++ b/src/codegen/arm.zig
@@ -674,7 +674,7 @@ pub const Instruction = union(enum) {
};
const imm4h: u4 = switch (offset) {
.immediate => |imm| @truncate(u4, imm >> 4),
- .register => |reg| 0b0000,
+ .register => 0b0000,
};
return Instruction{
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 68b74d7659..ae439693b8 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -47,6 +47,8 @@ fn formatTypeAsCIdentifier(
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
+ _ = fmt;
+ _ = options;
var buffer = [1]u8{0} ** 128;
// We don't care if it gets cut off, it's still more unique than a number
var buf = std.fmt.bufPrint(&buffer, "{}", .{data}) catch &buffer;
@@ -63,6 +65,8 @@ fn formatIdent(
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
+ _ = fmt;
+ _ = options;
for (ident) |c, i| {
switch (c) {
'a'...'z', 'A'...'Z', '_' => try writer.writeByte(c),
@@ -724,8 +728,6 @@ pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!voi
.is_err => try genIsErr(o, inst.castTag(.is_err).?),
.is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?),
- .error_to_int => try genErrorToInt(o, inst.castTag(.error_to_int).?),
- .int_to_error => try genIntToError(o, inst.castTag(.int_to_error).?),
.unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?),
.unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?),
@@ -749,6 +751,7 @@ pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!voi
}
fn genVarPtr(o: *Object, inst: *Inst.VarPtr) !CValue {
+ _ = o;
return CValue{ .decl_ref = inst.variable.owner_decl };
}
@@ -939,6 +942,8 @@ fn genCall(o: *Object, inst: *Inst.Call) !CValue {
}
fn genDbgStmt(o: *Object, inst: *Inst.DbgStmt) !CValue {
+ _ = o;
+ _ = inst;
// TODO emit #line directive here with line number and filename
return CValue.none;
}
@@ -1018,11 +1023,13 @@ fn genBitcast(o: *Object, inst: *Inst.UnOp) !CValue {
}
fn genBreakpoint(o: *Object, inst: *Inst.NoOp) !CValue {
+ _ = inst;
try o.writer().writeAll("zig_breakpoint();\n");
return CValue.none;
}
fn genUnreach(o: *Object, inst: *Inst.NoOp) !CValue {
+ _ = inst;
try o.writer().writeAll("zig_unreachable();\n");
return CValue.none;
}
@@ -1109,7 +1116,6 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue {
for (as.inputs) |i, index| {
if (i[0] == '{' and i[i.len - 1] == '}') {
const reg = i[1 .. i.len - 1];
- const arg = as.args[index];
if (index > 0) {
try writer.writeAll(", ");
}
@@ -1283,14 +1289,6 @@ fn genIsErr(o: *Object, inst: *Inst.UnOp) !CValue {
return local;
}
-fn genIntToError(o: *Object, inst: *Inst.UnOp) !CValue {
- return o.resolveInst(inst.operand);
-}
-
-fn genErrorToInt(o: *Object, inst: *Inst.UnOp) !CValue {
- return o.resolveInst(inst.operand);
-}
-
fn IndentWriter(comptime UnderlyingWriter: type) type {
return struct {
const Self = @This();
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 4e4621ca29..2baedf8c9d 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -154,6 +154,7 @@ pub const Object = struct {
object_pathZ: [:0]const u8,
pub fn create(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Object {
+ _ = sub_path;
const self = try allocator.create(Object);
errdefer allocator.destroy(self);
@@ -742,6 +743,7 @@ pub const FuncGen = struct {
}
fn genRetVoid(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value {
+ _ = inst;
_ = self.builder.buildRetVoid();
return null;
}
@@ -873,6 +875,7 @@ pub const FuncGen = struct {
}
fn genUnreach(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value {
+ _ = inst;
_ = self.builder.buildUnreachable();
return null;
}
@@ -1013,6 +1016,7 @@ pub const FuncGen = struct {
}
fn genBreakpoint(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value {
+ _ = inst;
const llvn_fn = self.getIntrinsic("llvm.debugtrap");
_ = self.builder.buildCall(llvn_fn, null, 0, "");
return null;
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index 3bff09bd8d..9e0cd19f6f 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -714,7 +714,6 @@ pub const DeclGen = struct {
return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for strange integers", .{});
}
- const is_bool = info.class == .bool;
const is_float = info.class == .float;
const is_signed = info.signedness == .signed;
// **Note**: All these operations must be valid for vectors as well!
@@ -802,8 +801,6 @@ pub const DeclGen = struct {
const result_id = self.spv.allocResultId();
const result_type_id = try self.genType(inst.base.src, inst.base.ty);
- const info = try self.arithmeticTypeInfo(inst.operand.ty);
-
const opcode = switch (inst.base.tag) {
// Bool -> bool
.not => Opcode.OpLogicalNot,
@@ -867,6 +864,7 @@ pub const DeclGen = struct {
// are not allowed to be created from a phi node, and throw an error for those. For now, genType already throws
// an error for pointers.
const result_type_id = try self.genType(inst.base.src, inst.base.ty);
+ _ = result_type_id;
try writeOpcode(&self.code, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent...
diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig
index d7fe239d3b..ec4ec66b1e 100644
--- a/src/codegen/wasm.zig
+++ b/src/codegen/wasm.zig
@@ -702,7 +702,7 @@ pub const Context = struct {
try writer.writeByte(wasm.valtype(.i32)); // error code is always an i32 integer.
try writer.writeByte(val_type);
},
- else => |ret_type| {
+ else => {
try leb.writeULEB128(writer, @as(u32, 1));
// Can we maybe get the source index of the return type?
const val_type = try self.genValtype(.{ .node_offset = 0 }, return_type);
@@ -721,7 +721,7 @@ pub const Context = struct {
// TODO: check for and handle death of instructions
const mod_fn = blk: {
if (typed_value.val.castTag(.function)) |func| break :blk func.data;
- if (typed_value.val.castTag(.extern_fn)) |ext_fn| return Result.appended; // don't need code body for extern functions
+ if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions
unreachable;
};
@@ -849,7 +849,6 @@ pub const Context = struct {
}
fn genCall(self: *Context, inst: *Inst.Call) InnerError!WValue {
- const func_inst = inst.func.castTag(.constant).?;
const func_val = inst.func.value().?;
const target: *Decl = blk: {
@@ -914,7 +913,7 @@ pub const Context = struct {
.local => |local| {
try self.emitWValue(rhs);
try writer.writeByte(wasm.opcode(.local_set));
- try leb.writeULEB128(writer, lhs.local);
+ try leb.writeULEB128(writer, local);
},
else => unreachable,
}
@@ -926,6 +925,7 @@ pub const Context = struct {
}
fn genArg(self: *Context, inst: *Inst.Arg) InnerError!WValue {
+ _ = inst;
// arguments share the index with locals
defer self.local_index += 1;
return WValue{ .local = self.local_index };
@@ -1146,8 +1146,6 @@ pub const Context = struct {
}
fn genCmp(self: *Context, inst: *Inst.BinOp, op: std.math.CompareOperator) InnerError!WValue {
- const ty = inst.lhs.ty.tag();
-
// save offset, so potential conditions can insert blocks in front of
// the comparison that we can later jump back to
const offset = self.code.items.len;
@@ -1216,12 +1214,15 @@ pub const Context = struct {
}
fn genBreakpoint(self: *Context, breakpoint: *Inst.NoOp) InnerError!WValue {
+ _ = self;
+ _ = breakpoint;
// unsupported by wasm itself. Can be implemented once we support DWARF
// for wasm
return .none;
}
fn genUnreachable(self: *Context, unreach: *Inst.NoOp) InnerError!WValue {
+ _ = unreach;
try self.code.append(wasm.opcode(.@"unreachable"));
return .none;
}
diff --git a/src/codegen/x86_64.zig b/src/codegen/x86_64.zig
index 0e1ffefe75..2964d7245e 100644
--- a/src/codegen/x86_64.zig
+++ b/src/codegen/x86_64.zig
@@ -4,7 +4,6 @@ const mem = std.mem;
const assert = std.debug.assert;
const ArrayList = std.ArrayList;
const Allocator = std.mem.Allocator;
-const Type = @import("../Type.zig");
const DW = std.dwarf;
// zig fmt: off
diff --git a/src/glibc.zig b/src/glibc.zig
index 30bc021ced..c0afc321c9 100644
--- a/src/glibc.zig
+++ b/src/glibc.zig
@@ -497,7 +497,6 @@ fn add_include_dirs(comp: *Compilation, arena: *Allocator, args: *std.ArrayList(
const target = comp.getTarget();
const arch = target.cpu.arch;
const opt_nptl: ?[]const u8 = if (target.os.tag == .linux) "nptl" else "htl";
- const glibc = try lib_path(comp, arena, lib_libc ++ "glibc");
const s = path.sep_str;
diff --git a/src/link.zig b/src/link.zig
index d1508c29cd..c26feb10fd 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -38,6 +38,8 @@ pub const Options = struct {
/// Not every Compilation compiles .zig code! For example you could do `zig build-exe foo.o`.
module: ?*Module,
dynamic_linker: ?[]const u8,
+ /// The root path for the dynamic linker and system libraries (as well as frameworks on Darwin)
+ sysroot: ?[]const u8,
/// Used for calculating how much space to reserve for symbols in case the binary file
/// does not already have a symbol table.
symbol_count_hint: u64 = 32,
@@ -104,8 +106,6 @@ pub const Options = struct {
llvm_cpu_features: ?[*:0]const u8,
/// Extra args passed directly to LLD. Ignored when not linking with LLD.
extra_lld_args: []const []const u8,
- /// Darwin-only. Set the root path to the system libraries and frameworks.
- syslibroot: ?[]const u8,
objects: []const []const u8,
framework_dirs: []const []const u8,
@@ -517,7 +517,7 @@ pub const File = struct {
.target = base.options.target,
.output_mode = .Obj,
});
- const o_directory = base.options.module.?.zig_cache_artifact_directory;
+ const o_directory = module.zig_cache_artifact_directory;
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
}
diff --git a/src/link/C.zig b/src/link/C.zig
index 6cb219db41..1793b95210 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -76,7 +76,10 @@ pub fn deinit(self: *C) void {
self.decl_table.deinit(self.base.allocator);
}
-pub fn allocateDeclIndexes(self: *C, decl: *Module.Decl) !void {}
+pub fn allocateDeclIndexes(self: *C, decl: *Module.Decl) !void {
+ _ = self;
+ _ = decl;
+}
pub fn freeDecl(self: *C, decl: *Module.Decl) void {
_ = self.decl_table.swapRemove(decl);
@@ -307,4 +310,9 @@ pub fn updateDeclExports(
module: *Module,
decl: *Module.Decl,
exports: []const *Module.Export,
-) !void {}
+) !void {
+ _ = exports;
+ _ = decl;
+ _ = module;
+ _ = self;
+}
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 9ab1c6d78a..b466cf9136 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -831,7 +831,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
.target = self.base.options.target,
.output_mode = .Obj,
});
- const o_directory = self.base.options.module.?.zig_cache_artifact_directory;
+ const o_directory = module.zig_cache_artifact_directory;
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
}
@@ -1340,6 +1340,9 @@ pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl) u64 {
}
pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void {
+ _ = self;
+ _ = module;
+ _ = decl;
// TODO Implement this
}
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 84068ffeca..722077b8b8 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1108,7 +1108,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
for (buf) |*phdr, i| {
phdr.* = progHeaderTo32(self.program_headers.items[i]);
if (foreign_endian) {
- std.elf.bswapAllFields(elf.Elf32_Phdr, phdr);
+ mem.bswapAllFields(elf.Elf32_Phdr, phdr);
}
}
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?);
@@ -1120,7 +1120,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
for (buf) |*phdr, i| {
phdr.* = self.program_headers.items[i];
if (foreign_endian) {
- std.elf.bswapAllFields(elf.Elf64_Phdr, phdr);
+ mem.bswapAllFields(elf.Elf64_Phdr, phdr);
}
}
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?);
@@ -1197,7 +1197,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
shdr.* = sectHeaderTo32(self.sections.items[i]);
log.debug("writing section {}", .{shdr.*});
if (foreign_endian) {
- std.elf.bswapAllFields(elf.Elf32_Shdr, shdr);
+ mem.bswapAllFields(elf.Elf32_Shdr, shdr);
}
}
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
@@ -1210,7 +1210,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
shdr.* = self.sections.items[i];
log.debug("writing section {}", .{shdr.*});
if (foreign_endian) {
- std.elf.bswapAllFields(elf.Elf64_Shdr, shdr);
+ mem.bswapAllFields(elf.Elf64_Shdr, shdr);
}
}
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
@@ -1262,7 +1262,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
.target = self.base.options.target,
.output_mode = .Obj,
});
- const o_directory = self.base.options.module.?.zig_cache_artifact_directory;
+ const o_directory = module.zig_cache_artifact_directory;
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
}
@@ -1354,6 +1354,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
man.hash.add(allow_shlib_undefined);
man.hash.add(self.base.options.bind_global_refs_locally);
man.hash.add(self.base.options.tsan);
+ man.hash.addOptionalBytes(self.base.options.sysroot);
// We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
_ = try man.hit();
@@ -1423,6 +1424,10 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
try argv.append("-error-limit=0");
+ if (self.base.options.sysroot) |sysroot| {
+ try argv.append(try std.fmt.allocPrint(arena, "--sysroot={s}", .{sysroot}));
+ }
+
if (self.base.options.lto) {
switch (self.base.options.optimize_mode) {
.Debug => {},
@@ -1938,6 +1943,9 @@ fn freeTextBlock(self: *Elf, text_block: *TextBlock) void {
}
fn shrinkTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64) void {
+ _ = self;
+ _ = text_block;
+ _ = new_block_size;
// TODO check the new capacity, and if it crosses the size threshold into a big enough
// capacity, insert a free list node for it.
}
@@ -2706,6 +2714,7 @@ pub fn updateDeclExports(
/// Must be called only after a successful call to `updateDecl`.
pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Decl) !void {
+ _ = module;
const tracy = trace(@src());
defer tracy.end();
@@ -2736,14 +2745,14 @@ fn writeProgHeader(self: *Elf, index: usize) !void {
.p32 => {
var phdr = [1]elf.Elf32_Phdr{progHeaderTo32(self.program_headers.items[index])};
if (foreign_endian) {
- std.elf.bswapAllFields(elf.Elf32_Phdr, &phdr[0]);
+ mem.bswapAllFields(elf.Elf32_Phdr, &phdr[0]);
}
return self.base.file.?.pwriteAll(mem.sliceAsBytes(&phdr), offset);
},
.p64 => {
var phdr = [1]elf.Elf64_Phdr{self.program_headers.items[index]};
if (foreign_endian) {
- std.elf.bswapAllFields(elf.Elf64_Phdr, &phdr[0]);
+ mem.bswapAllFields(elf.Elf64_Phdr, &phdr[0]);
}
return self.base.file.?.pwriteAll(mem.sliceAsBytes(&phdr), offset);
},
@@ -2757,7 +2766,7 @@ fn writeSectHeader(self: *Elf, index: usize) !void {
var shdr: [1]elf.Elf32_Shdr = undefined;
shdr[0] = sectHeaderTo32(self.sections.items[index]);
if (foreign_endian) {
- std.elf.bswapAllFields(elf.Elf32_Shdr, &shdr[0]);
+ mem.bswapAllFields(elf.Elf32_Shdr, &shdr[0]);
}
const offset = self.shdr_table_offset.? + index * @sizeOf(elf.Elf32_Shdr);
return self.base.file.?.pwriteAll(mem.sliceAsBytes(&shdr), offset);
@@ -2765,7 +2774,7 @@ fn writeSectHeader(self: *Elf, index: usize) !void {
.p64 => {
var shdr = [1]elf.Elf64_Shdr{self.sections.items[index]};
if (foreign_endian) {
- std.elf.bswapAllFields(elf.Elf64_Shdr, &shdr[0]);
+ mem.bswapAllFields(elf.Elf64_Shdr, &shdr[0]);
}
const offset = self.shdr_table_offset.? + index * @sizeOf(elf.Elf64_Shdr);
return self.base.file.?.pwriteAll(mem.sliceAsBytes(&shdr), offset);
@@ -2863,7 +2872,7 @@ fn writeSymbol(self: *Elf, index: usize) !void {
},
};
if (foreign_endian) {
- std.elf.bswapAllFields(elf.Elf32_Sym, &sym[0]);
+ mem.bswapAllFields(elf.Elf32_Sym, &sym[0]);
}
const off = syms_sect.sh_offset + @sizeOf(elf.Elf32_Sym) * index;
try self.base.file.?.pwriteAll(mem.sliceAsBytes(sym[0..1]), off);
@@ -2871,7 +2880,7 @@ fn writeSymbol(self: *Elf, index: usize) !void {
.p64 => {
var sym = [1]elf.Elf64_Sym{self.local_symbols.items[index]};
if (foreign_endian) {
- std.elf.bswapAllFields(elf.Elf64_Sym, &sym[0]);
+ mem.bswapAllFields(elf.Elf64_Sym, &sym[0]);
}
const off = syms_sect.sh_offset + @sizeOf(elf.Elf64_Sym) * index;
try self.base.file.?.pwriteAll(mem.sliceAsBytes(sym[0..1]), off);
@@ -2902,7 +2911,7 @@ fn writeAllGlobalSymbols(self: *Elf) !void {
.st_shndx = self.global_symbols.items[i].st_shndx,
};
if (foreign_endian) {
- std.elf.bswapAllFields(elf.Elf32_Sym, sym);
+ mem.bswapAllFields(elf.Elf32_Sym, sym);
}
}
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), global_syms_off);
@@ -2921,7 +2930,7 @@ fn writeAllGlobalSymbols(self: *Elf) !void {
.st_shndx = self.global_symbols.items[i].st_shndx,
};
if (foreign_endian) {
- std.elf.bswapAllFields(elf.Elf64_Sym, sym);
+ mem.bswapAllFields(elf.Elf64_Sym, sym);
}
}
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), global_syms_off);
@@ -2979,6 +2988,7 @@ fn dbgLineNeededHeaderBytes(self: Elf) u32 {
}
fn dbgInfoNeededHeaderBytes(self: Elf) u32 {
+ _ = self;
return 120;
}
@@ -3372,7 +3382,7 @@ const CsuObjects = struct {
if (result.crtend) |*obj| obj.* = try fs.path.join(arena, &[_][]const u8{ gcc_dir_path, obj.* });
},
else => {
- inline for (std.meta.fields(@TypeOf(result))) |f, i| {
+ inline for (std.meta.fields(@TypeOf(result))) |f| {
if (@field(result, f.name)) |*obj| {
obj.* = try fs.path.join(arena, &[_][]const u8{ crt_dir_path, obj.* });
}
@@ -3380,7 +3390,7 @@ const CsuObjects = struct {
},
}
} else {
- inline for (std.meta.fields(@TypeOf(result))) |f, i| {
+ inline for (std.meta.fields(@TypeOf(result))) |f| {
if (@field(result, f.name)) |*obj| {
if (comp.crt_files.get(obj.*)) |crtf| {
obj.* = crtf.full_object_path;
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 8c1d092f12..95389c2d95 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -441,6 +441,7 @@ pub fn flush(self: *MachO, comp: *Compilation) !void {
}
pub fn flushModule(self: *MachO, comp: *Compilation) !void {
+ _ = comp;
const tracy = trace(@src());
defer tracy.end();
@@ -513,6 +514,119 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
}
}
+fn resolvePaths(
+ arena: *Allocator,
+ resolved_paths: *std.ArrayList([]const u8),
+ syslibroot: ?[]const u8,
+ search_dirs: []const []const u8,
+ lib_names: []const []const u8,
+ kind: enum { lib, framework },
+) !void {
+ var resolved_dirs = std.ArrayList([]const u8).init(arena);
+ for (search_dirs) |dir| {
+ if (fs.path.isAbsolute(dir)) {
+ var candidates = std.ArrayList([]const u8).init(arena);
+ if (syslibroot) |root| {
+ const full_path = try fs.path.join(arena, &[_][]const u8{ root, dir });
+ try candidates.append(full_path);
+ }
+ try candidates.append(dir);
+
+ var found = false;
+ for (candidates.items) |candidate| {
+ // Verify that search path actually exists
+ var tmp = fs.cwd().openDir(candidate, .{}) catch |err| switch (err) {
+ error.FileNotFound => continue,
+ else => |e| return e,
+ };
+ defer tmp.close();
+
+ try resolved_dirs.append(candidate);
+ found = true;
+ break;
+ }
+
+ if (!found) {
+ switch (kind) {
+ .lib => log.warn("directory not found for '-L{s}'", .{dir}),
+ .framework => log.warn("directory not found for '-F{s}'", .{dir}),
+ }
+ }
+ } else {
+ // Verify that search path actually exists
+ var tmp = fs.cwd().openDir(dir, .{}) catch |err| switch (err) {
+ error.FileNotFound => {
+ switch (kind) {
+ .lib => log.warn("directory not found for '-L{s}'", .{dir}),
+ .framework => log.warn("directory not found for '-F{s}'", .{dir}),
+ }
+ continue;
+ },
+ else => |e| return e,
+ };
+ defer tmp.close();
+
+ try resolved_dirs.append(dir);
+ }
+ }
+
+ // Assume ld64 default: -search_paths_first
+ // Look in each directory for a dylib (next, tbd), and then for archive
+ // TODO implement alternative: -search_dylibs_first
+ const exts = switch (kind) {
+ .lib => &[_][]const u8{ "dylib", "tbd", "a" },
+ .framework => &[_][]const u8{ "dylib", "tbd" },
+ };
+
+ for (lib_names) |lib_name| {
+ var found = false;
+
+ ext: for (exts) |ext| {
+ const lib_name_ext = blk: {
+ switch (kind) {
+ .lib => break :blk try std.fmt.allocPrint(arena, "lib{s}.{s}", .{ lib_name, ext }),
+ .framework => {
+ const prefix = try std.fmt.allocPrint(arena, "{s}.framework", .{lib_name});
+ const nn = try std.fmt.allocPrint(arena, "{s}.{s}", .{ lib_name, ext });
+ break :blk try fs.path.join(arena, &[_][]const u8{ prefix, nn });
+ },
+ }
+ };
+
+ for (resolved_dirs.items) |dir| {
+ const full_path = try fs.path.join(arena, &[_][]const u8{ dir, lib_name_ext });
+
+ // Check if the lib file exists.
+ const tmp = fs.cwd().openFile(full_path, .{}) catch |err| switch (err) {
+ error.FileNotFound => continue,
+ else => |e| return e,
+ };
+ defer tmp.close();
+
+ try resolved_paths.append(full_path);
+ found = true;
+ break :ext;
+ }
+ }
+
+ if (!found) {
+ switch (kind) {
+ .lib => {
+ log.warn("library not found for '-l{s}'", .{lib_name});
+ log.warn("Library search paths:", .{});
+ },
+ .framework => {
+ log.warn("framework not found for '-f{s}'", .{lib_name});
+ log.warn("Framework search paths:", .{});
+ },
+ }
+ for (resolved_dirs.items) |dir| {
+ log.warn(" {s}", .{dir});
+ }
+ }
+ }
+}
+
fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -533,7 +647,7 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
.target = self.base.options.target,
.output_mode = .Obj,
});
- const o_directory = self.base.options.module.?.zig_cache_artifact_directory;
+ const o_directory = module.zig_cache_artifact_directory;
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
}
@@ -590,7 +704,7 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
man.hash.add(allow_shlib_undefined);
man.hash.add(self.base.options.bind_global_refs_locally);
man.hash.add(self.base.options.system_linker_hack);
- man.hash.addOptionalBytes(self.base.options.syslibroot);
+ man.hash.addOptionalBytes(self.base.options.sysroot);
// We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
_ = try man.hit();
@@ -675,6 +789,7 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
zld.deinit();
}
zld.arch = target.cpu.arch;
+ zld.syslibroot = self.base.options.sysroot;
zld.stack_size = stack_size;
// Positional arguments to the linker such as object files and static archives.
@@ -699,7 +814,6 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
}
// Shared and static libraries passed via `-l` flag.
- var libs = std.ArrayList([]const u8).init(arena);
var search_lib_names = std.ArrayList([]const u8).init(arena);
const system_libs = self.base.options.system_libs.keys();
@@ -715,85 +829,15 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
try search_lib_names.append(link_lib);
}
- var search_lib_dirs = std.ArrayList([]const u8).init(arena);
-
- for (self.base.options.lib_dirs) |path| {
- if (fs.path.isAbsolute(path)) {
- var candidates = std.ArrayList([]const u8).init(arena);
- if (self.base.options.syslibroot) |syslibroot| {
- const full_path = try fs.path.join(arena, &[_][]const u8{ syslibroot, path });
- try candidates.append(full_path);
- }
- try candidates.append(path);
-
- var found = false;
- for (candidates.items) |candidate| {
- // Verify that search path actually exists
- var tmp = fs.cwd().openDir(candidate, .{}) catch |err| switch (err) {
- error.FileNotFound => continue,
- else => |e| return e,
- };
- defer tmp.close();
-
- try search_lib_dirs.append(candidate);
- found = true;
- break;
- }
-
- if (!found) {
- log.warn("directory not found for '-L{s}'", .{path});
- }
- } else {
- // Verify that search path actually exists
- var tmp = fs.cwd().openDir(path, .{}) catch |err| switch (err) {
- error.FileNotFound => {
- log.warn("directory not found for '-L{s}'", .{path});
- continue;
- },
- else => |e| return e,
- };
- defer tmp.close();
-
- try search_lib_dirs.append(path);
- }
- }
-
- // Assume ld64 default: -search_paths_first
- // Look in each directory for a dylib (tbd), and then for archive
- // TODO implement alternative: -search_dylibs_first
- // TODO text-based API, or .tbd files.
- const exts = &[_][]const u8{ "dylib", "a" };
-
- for (search_lib_names.items) |l_name| {
- var found = false;
-
- ext: for (exts) |ext| {
- const l_name_ext = try std.fmt.allocPrint(arena, "lib{s}.{s}", .{ l_name, ext });
-
- for (search_lib_dirs.items) |lib_dir| {
- const full_path = try fs.path.join(arena, &[_][]const u8{ lib_dir, l_name_ext });
-
- // Check if the lib file exists.
- const tmp = fs.cwd().openFile(full_path, .{}) catch |err| switch (err) {
- error.FileNotFound => continue,
- else => |e| return e,
- };
- defer tmp.close();
-
- try libs.append(full_path);
- found = true;
- break :ext;
- }
- }
-
- if (!found) {
- log.warn("library not found for '-l{s}'", .{l_name});
- log.warn("Library search paths:", .{});
- for (search_lib_dirs.items) |lib_dir| {
- log.warn(" {s}", .{lib_dir});
- }
- }
- }
+ var libs = std.ArrayList([]const u8).init(arena);
+ try resolvePaths(
+ arena,
+ &libs,
+ self.base.options.sysroot,
+ self.base.options.lib_dirs,
+ search_lib_names.items,
+ .lib,
+ );
// rpaths
var rpath_table = std.StringArrayHashMap(void).init(arena);
@@ -809,9 +853,14 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
}
// frameworks
- for (self.base.options.frameworks) |framework| {
- log.warn("frameworks not yet supported for '-framework {s}'", .{framework});
- }
+ try resolvePaths(
+ arena,
+ &libs,
+ self.base.options.sysroot,
+ self.base.options.framework_dirs,
+ self.base.options.frameworks,
+ .framework,
+ );
if (self.base.options.verbose_link) {
var argv = std.ArrayList([]const u8).init(arena);
@@ -819,7 +868,7 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
try argv.append("zig");
try argv.append("ld");
- if (self.base.options.syslibroot) |syslibroot| {
+ if (self.base.options.sysroot) |syslibroot| {
try argv.append("-syslibroot");
try argv.append(syslibroot);
}
@@ -848,6 +897,9 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
try zld.link(positionals.items, full_out_path, .{
.libs = libs.items,
.rpaths = rpaths.items,
+ .libc_stub_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ "libc", "darwin", "libSystem.B.tbd",
+ }),
});
break :outer;
@@ -965,7 +1017,7 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
}
}
- if (self.base.options.syslibroot) |dir| {
+ if (self.base.options.sysroot) |dir| {
try argv.append("-syslibroot");
try argv.append(dir);
}
@@ -1254,6 +1306,9 @@ fn freeTextBlock(self: *MachO, text_block: *TextBlock) void {
}
fn shrinkTextBlock(self: *MachO, text_block: *TextBlock, new_block_size: u64) void {
+ _ = self;
+ _ = text_block;
+ _ = new_block_size;
// TODO check the new capacity, and if it crosses the size threshold into a big enough
// capacity, insert a free list node for it.
}
@@ -1725,18 +1780,8 @@ pub fn populateMissingMetadata(self: *MachO) !void {
if (self.pagezero_segment_cmd_index == null) {
self.pagezero_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
try self.load_commands.append(self.base.allocator, .{
- .Segment = SegmentCommand.empty(.{
- .cmd = macho.LC_SEGMENT_64,
- .cmdsize = @sizeOf(macho.segment_command_64),
- .segname = makeStaticString("__PAGEZERO"),
- .vmaddr = 0,
+ .Segment = SegmentCommand.empty("__PAGEZERO", .{
.vmsize = 0x100000000, // size always set to 4GB
- .fileoff = 0,
- .filesize = 0,
- .maxprot = 0,
- .initprot = 0,
- .nsects = 0,
- .flags = 0,
}),
});
self.header_dirty = true;
@@ -1755,18 +1800,12 @@ pub fn populateMissingMetadata(self: *MachO) !void {
log.debug("found __TEXT segment free space 0x{x} to 0x{x}", .{ 0, needed_size });
try self.load_commands.append(self.base.allocator, .{
- .Segment = SegmentCommand.empty(.{
- .cmd = macho.LC_SEGMENT_64,
- .cmdsize = @sizeOf(macho.segment_command_64),
- .segname = makeStaticString("__TEXT"),
+ .Segment = SegmentCommand.empty("__TEXT", .{
.vmaddr = 0x100000000, // always starts at 4GB
.vmsize = needed_size,
- .fileoff = 0,
.filesize = needed_size,
.maxprot = maxprot,
.initprot = initprot,
- .nsects = 0,
- .flags = 0,
}),
});
self.header_dirty = true;
@@ -1787,19 +1826,12 @@ pub fn populateMissingMetadata(self: *MachO) !void {
log.debug("found __text section free space 0x{x} to 0x{x}", .{ off, off + needed_size });
- try text_segment.addSection(self.base.allocator, .{
- .sectname = makeStaticString("__text"),
- .segname = makeStaticString("__TEXT"),
+ try text_segment.addSection(self.base.allocator, "__text", .{
.addr = text_segment.inner.vmaddr + off,
.size = @intCast(u32, needed_size),
.offset = @intCast(u32, off),
.@"align" = alignment,
- .reloff = 0,
- .nreloc = 0,
.flags = flags,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
});
self.header_dirty = true;
self.load_commands_dirty = true;
@@ -1825,19 +1857,13 @@ pub fn populateMissingMetadata(self: *MachO) !void {
log.debug("found __stubs section free space 0x{x} to 0x{x}", .{ off, off + needed_size });
- try text_segment.addSection(self.base.allocator, .{
- .sectname = makeStaticString("__stubs"),
- .segname = makeStaticString("__TEXT"),
+ try text_segment.addSection(self.base.allocator, "__stubs", .{
.addr = text_segment.inner.vmaddr + off,
.size = needed_size,
.offset = @intCast(u32, off),
.@"align" = alignment,
- .reloff = 0,
- .nreloc = 0,
.flags = flags,
- .reserved1 = 0,
.reserved2 = stub_size,
- .reserved3 = 0,
});
self.header_dirty = true;
self.load_commands_dirty = true;
@@ -1858,19 +1884,12 @@ pub fn populateMissingMetadata(self: *MachO) !void {
log.debug("found __stub_helper section free space 0x{x} to 0x{x}", .{ off, off + needed_size });
- try text_segment.addSection(self.base.allocator, .{
- .sectname = makeStaticString("__stub_helper"),
- .segname = makeStaticString("__TEXT"),
+ try text_segment.addSection(self.base.allocator, "__stub_helper", .{
.addr = text_segment.inner.vmaddr + off,
.size = needed_size,
.offset = @intCast(u32, off),
.@"align" = alignment,
- .reloff = 0,
- .nreloc = 0,
.flags = flags,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
});
self.header_dirty = true;
self.load_commands_dirty = true;
@@ -1887,18 +1906,13 @@ pub fn populateMissingMetadata(self: *MachO) !void {
log.debug("found __DATA_CONST segment free space 0x{x} to 0x{x}", .{ address_and_offset.offset, address_and_offset.offset + needed_size });
try self.load_commands.append(self.base.allocator, .{
- .Segment = SegmentCommand.empty(.{
- .cmd = macho.LC_SEGMENT_64,
- .cmdsize = @sizeOf(macho.segment_command_64),
- .segname = makeStaticString("__DATA_CONST"),
+ .Segment = SegmentCommand.empty("__DATA_CONST", .{
.vmaddr = address_and_offset.address,
.vmsize = needed_size,
.fileoff = address_and_offset.offset,
.filesize = needed_size,
.maxprot = maxprot,
.initprot = initprot,
- .nsects = 0,
- .flags = 0,
}),
});
self.header_dirty = true;
@@ -1915,19 +1929,12 @@ pub fn populateMissingMetadata(self: *MachO) !void {
log.debug("found __got section free space 0x{x} to 0x{x}", .{ off, off + needed_size });
- try dc_segment.addSection(self.base.allocator, .{
- .sectname = makeStaticString("__got"),
- .segname = makeStaticString("__DATA_CONST"),
+ try dc_segment.addSection(self.base.allocator, "__got", .{
.addr = dc_segment.inner.vmaddr + off - dc_segment.inner.fileoff,
.size = needed_size,
.offset = @intCast(u32, off),
.@"align" = 3, // 2^3 = @sizeOf(u64)
- .reloff = 0,
- .nreloc = 0,
.flags = flags,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
});
self.header_dirty = true;
self.load_commands_dirty = true;
@@ -1944,18 +1951,13 @@ pub fn populateMissingMetadata(self: *MachO) !void {
log.debug("found __DATA segment free space 0x{x} to 0x{x}", .{ address_and_offset.offset, address_and_offset.offset + needed_size });
try self.load_commands.append(self.base.allocator, .{
- .Segment = SegmentCommand.empty(.{
- .cmd = macho.LC_SEGMENT_64,
- .cmdsize = @sizeOf(macho.segment_command_64),
- .segname = makeStaticString("__DATA"),
+ .Segment = SegmentCommand.empty("__DATA", .{
.vmaddr = address_and_offset.address,
.vmsize = needed_size,
.fileoff = address_and_offset.offset,
.filesize = needed_size,
.maxprot = maxprot,
.initprot = initprot,
- .nsects = 0,
- .flags = 0,
}),
});
self.header_dirty = true;
@@ -1972,19 +1974,12 @@ pub fn populateMissingMetadata(self: *MachO) !void {
log.debug("found __la_symbol_ptr section free space 0x{x} to 0x{x}", .{ off, off + needed_size });
- try data_segment.addSection(self.base.allocator, .{
- .sectname = makeStaticString("__la_symbol_ptr"),
- .segname = makeStaticString("__DATA"),
+ try data_segment.addSection(self.base.allocator, "__la_symbol_ptr", .{
.addr = data_segment.inner.vmaddr + off - data_segment.inner.fileoff,
.size = needed_size,
.offset = @intCast(u32, off),
.@"align" = 3, // 2^3 = @sizeOf(u64)
- .reloff = 0,
- .nreloc = 0,
.flags = flags,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
});
self.header_dirty = true;
self.load_commands_dirty = true;
@@ -1993,26 +1988,17 @@ pub fn populateMissingMetadata(self: *MachO) !void {
const data_segment = &self.load_commands.items[self.data_segment_cmd_index.?].Segment;
self.data_section_index = @intCast(u16, data_segment.sections.items.len);
- const flags = macho.S_REGULAR;
const needed_size = @sizeOf(u64) * self.base.options.symbol_count_hint;
const off = data_segment.findFreeSpace(needed_size, @alignOf(u64), null);
assert(off + needed_size <= data_segment.inner.fileoff + data_segment.inner.filesize); // TODO Must expand __DATA segment.
log.debug("found __data section free space 0x{x} to 0x{x}", .{ off, off + needed_size });
- try data_segment.addSection(self.base.allocator, .{
- .sectname = makeStaticString("__data"),
- .segname = makeStaticString("__DATA"),
+ try data_segment.addSection(self.base.allocator, "__data", .{
.addr = data_segment.inner.vmaddr + off - data_segment.inner.fileoff,
.size = needed_size,
.offset = @intCast(u32, off),
.@"align" = 3, // 2^3 = @sizeOf(u64)
- .reloff = 0,
- .nreloc = 0,
- .flags = flags,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
});
self.header_dirty = true;
self.load_commands_dirty = true;
@@ -2027,18 +2013,11 @@ pub fn populateMissingMetadata(self: *MachO) !void {
log.debug("found __LINKEDIT segment free space at 0x{x}", .{address_and_offset.offset});
try self.load_commands.append(self.base.allocator, .{
- .Segment = SegmentCommand.empty(.{
- .cmd = macho.LC_SEGMENT_64,
- .cmdsize = @sizeOf(macho.segment_command_64),
- .segname = makeStaticString("__LINKEDIT"),
+ .Segment = SegmentCommand.empty("__LINKEDIT", .{
.vmaddr = address_and_offset.address,
- .vmsize = 0,
.fileoff = address_and_offset.offset,
- .filesize = 0,
.maxprot = maxprot,
.initprot = initprot,
- .nsects = 0,
- .flags = 0,
}),
});
self.header_dirty = true;
@@ -2396,13 +2375,6 @@ fn allocateTextBlock(self: *MachO, text_block: *TextBlock, new_block_size: u64,
return vaddr;
}
-pub fn makeStaticString(comptime bytes: []const u8) [16]u8 {
- var buf = [_]u8{0} ** 16;
- if (bytes.len > buf.len) @compileError("string too long; max 16 bytes");
- mem.copy(u8, &buf, bytes);
- return buf;
-}
-
fn makeString(self: *MachO, bytes: []const u8) !u32 {
if (self.string_table_directory.get(bytes)) |offset| {
log.debug("reusing '{s}' from string table at offset 0x{x}", .{ bytes, offset });
@@ -2918,7 +2890,6 @@ fn relocateSymbolTable(self: *MachO) !void {
const nsyms = nlocals + nglobals + nundefs;
if (symtab.nsyms < nsyms) {
- const linkedit_segment = self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment;
const needed_size = nsyms * @sizeOf(macho.nlist_64);
if (needed_size > self.allocatedSizeLinkedit(symtab.symoff)) {
// Move the entire symbol table to a new location
@@ -3150,7 +3121,6 @@ fn writeExportTrie(self: *MachO) !void {
const nwritten = try trie.write(stream.writer());
assert(nwritten == trie.size);
- const linkedit_segment = self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment;
const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly;
const allocated_size = self.allocatedSizeLinkedit(dyld_info.export_off);
const needed_size = mem.alignForwardGeneric(u64, buffer.len, @alignOf(u64));
@@ -3357,7 +3327,6 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, buffer: []const u8) !void {
error.EndOfStream => break,
else => return err,
};
- const imm: u8 = inst & macho.BIND_IMMEDIATE_MASK;
const opcode: u8 = inst & macho.BIND_OPCODE_MASK;
switch (opcode) {
diff --git a/src/link/MachO/Archive.zig b/src/link/MachO/Archive.zig
index f47228077c..22439d300a 100644
--- a/src/link/MachO/Archive.zig
+++ b/src/link/MachO/Archive.zig
@@ -8,12 +8,13 @@ const macho = std.macho;
const mem = std.mem;
const Allocator = mem.Allocator;
+const Arch = std.Target.Cpu.Arch;
const Object = @import("Object.zig");
usingnamespace @import("commands.zig");
allocator: *Allocator,
-arch: ?std.Target.Cpu.Arch = null,
+arch: ?Arch = null,
file: ?fs.File = null,
header: ?ar_hdr = null,
name: ?[]const u8 = null,
@@ -85,10 +86,36 @@ const ar_hdr = extern struct {
}
};
-pub fn init(allocator: *Allocator) Archive {
- return .{
+pub fn createAndParseFromPath(allocator: *Allocator, arch: Arch, path: []const u8) !?*Archive {
+ const file = fs.cwd().openFile(path, .{}) catch |err| switch (err) {
+ error.FileNotFound => return null,
+ else => |e| return e,
+ };
+ errdefer file.close();
+
+ const archive = try allocator.create(Archive);
+ errdefer allocator.destroy(archive);
+
+ const name = try allocator.dupe(u8, path);
+ errdefer allocator.free(name);
+
+ archive.* = .{
.allocator = allocator,
+ .arch = arch,
+ .name = name,
+ .file = file,
+ };
+
+ archive.parse() catch |err| switch (err) {
+ error.EndOfStream, error.NotArchive => {
+ archive.deinit();
+ allocator.destroy(archive);
+ return null;
+ },
+ else => |e| return e,
};
+
+ return archive;
}
pub fn deinit(self: *Archive) void {
@@ -116,15 +143,15 @@ pub fn parse(self: *Archive) !void {
const magic = try reader.readBytesNoEof(SARMAG);
if (!mem.eql(u8, &magic, ARMAG)) {
- log.err("invalid magic: expected '{s}', found '{s}'", .{ ARMAG, magic });
- return error.MalformedArchive;
+ log.debug("invalid magic: expected '{s}', found '{s}'", .{ ARMAG, magic });
+ return error.NotArchive;
}
self.header = try reader.readStruct(ar_hdr);
if (!mem.eql(u8, &self.header.?.ar_fmag, ARFMAG)) {
- log.err("invalid header delimiter: expected '{s}', found '{s}'", .{ ARFMAG, self.header.?.ar_fmag });
- return error.MalformedArchive;
+ log.debug("invalid header delimiter: expected '{s}', found '{s}'", .{ ARFMAG, self.header.?.ar_fmag });
+ return error.NotArchive;
}
var embedded_name = try parseName(self.allocator, self.header.?, reader);
@@ -222,20 +249,15 @@ pub fn parseObject(self: Archive, offset: u32) !*Object {
var object = try self.allocator.create(Object);
errdefer self.allocator.destroy(object);
- object.* = Object.init(self.allocator);
- object.arch = self.arch.?;
- object.file = try fs.cwd().openFile(self.name.?, .{});
- object.name = name;
- object.file_offset = @intCast(u32, try reader.context.getPos());
+ object.* = .{
+ .allocator = self.allocator,
+ .arch = self.arch.?,
+ .file = try fs.cwd().openFile(self.name.?, .{}),
+ .name = name,
+ .file_offset = @intCast(u32, try reader.context.getPos()),
+ };
try object.parse();
-
try reader.context.seekTo(0);
return object;
}
-
-pub fn isArchive(file: fs.File) !bool {
- const magic = try file.reader().readBytesNoEof(Archive.SARMAG);
- try file.seekTo(0);
- return mem.eql(u8, &magic, Archive.ARMAG);
-}
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index ca6e5157b3..684861ebf5 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -19,7 +19,6 @@ const MachO = @import("../MachO.zig");
const SrcFn = MachO.SrcFn;
const TextBlock = MachO.TextBlock;
const padToIdeal = MachO.padToIdeal;
-const makeStaticString = MachO.makeStaticString;
usingnamespace @import("commands.zig");
@@ -212,18 +211,11 @@ pub fn populateMissingMetadata(self: *DebugSymbols, allocator: *Allocator) !void
log.debug("found dSym __DWARF segment free space 0x{x} to 0x{x}", .{ off, off + needed_size });
try self.load_commands.append(allocator, .{
- .Segment = SegmentCommand.empty(.{
- .cmd = macho.LC_SEGMENT_64,
- .cmdsize = @sizeOf(macho.segment_command_64),
- .segname = makeStaticString("__DWARF"),
+ .Segment = SegmentCommand.empty("__DWARF", .{
.vmaddr = vmaddr,
.vmsize = needed_size,
.fileoff = off,
.filesize = needed_size,
- .maxprot = 0,
- .initprot = 0,
- .nsects = 0,
- .flags = 0,
}),
});
self.header_dirty = true;
@@ -234,19 +226,11 @@ pub fn populateMissingMetadata(self: *DebugSymbols, allocator: *Allocator) !void
self.debug_str_section_index = @intCast(u16, dwarf_segment.sections.items.len);
assert(self.debug_string_table.items.len == 0);
- try dwarf_segment.addSection(allocator, .{
- .sectname = makeStaticString("__debug_str"),
- .segname = makeStaticString("__DWARF"),
+ try dwarf_segment.addSection(allocator, "__debug_str", .{
.addr = dwarf_segment.inner.vmaddr,
.size = @intCast(u32, self.debug_string_table.items.len),
.offset = @intCast(u32, dwarf_segment.inner.fileoff),
.@"align" = 1,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_REGULAR,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
});
self.header_dirty = true;
self.load_commands_dirty = true;
@@ -262,19 +246,11 @@ pub fn populateMissingMetadata(self: *DebugSymbols, allocator: *Allocator) !void
log.debug("found dSym __debug_info free space 0x{x} to 0x{x}", .{ off, off + file_size_hint });
- try dwarf_segment.addSection(allocator, .{
- .sectname = makeStaticString("__debug_info"),
- .segname = makeStaticString("__DWARF"),
+ try dwarf_segment.addSection(allocator, "__debug_info", .{
.addr = dwarf_segment.inner.vmaddr + off - dwarf_segment.inner.fileoff,
.size = file_size_hint,
.offset = @intCast(u32, off),
.@"align" = p_align,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_REGULAR,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
});
self.header_dirty = true;
self.load_commands_dirty = true;
@@ -290,19 +266,11 @@ pub fn populateMissingMetadata(self: *DebugSymbols, allocator: *Allocator) !void
log.debug("found dSym __debug_abbrev free space 0x{x} to 0x{x}", .{ off, off + file_size_hint });
- try dwarf_segment.addSection(allocator, .{
- .sectname = makeStaticString("__debug_abbrev"),
- .segname = makeStaticString("__DWARF"),
+ try dwarf_segment.addSection(allocator, "__debug_abbrev", .{
.addr = dwarf_segment.inner.vmaddr + off - dwarf_segment.inner.fileoff,
.size = file_size_hint,
.offset = @intCast(u32, off),
.@"align" = p_align,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_REGULAR,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
});
self.header_dirty = true;
self.load_commands_dirty = true;
@@ -318,19 +286,11 @@ pub fn populateMissingMetadata(self: *DebugSymbols, allocator: *Allocator) !void
log.debug("found dSym __debug_aranges free space 0x{x} to 0x{x}", .{ off, off + file_size_hint });
- try dwarf_segment.addSection(allocator, .{
- .sectname = makeStaticString("__debug_aranges"),
- .segname = makeStaticString("__DWARF"),
+ try dwarf_segment.addSection(allocator, "__debug_aranges", .{
.addr = dwarf_segment.inner.vmaddr + off - dwarf_segment.inner.fileoff,
.size = file_size_hint,
.offset = @intCast(u32, off),
.@"align" = p_align,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_REGULAR,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
});
self.header_dirty = true;
self.load_commands_dirty = true;
@@ -346,19 +306,11 @@ pub fn populateMissingMetadata(self: *DebugSymbols, allocator: *Allocator) !void
log.debug("found dSym __debug_line free space 0x{x} to 0x{x}", .{ off, off + file_size_hint });
- try dwarf_segment.addSection(allocator, .{
- .sectname = makeStaticString("__debug_line"),
- .segname = makeStaticString("__DWARF"),
+ try dwarf_segment.addSection(allocator, "__debug_line", .{
.addr = dwarf_segment.inner.vmaddr + off - dwarf_segment.inner.fileoff,
.size = file_size_hint,
.offset = @intCast(u32, off),
.@"align" = p_align,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_REGULAR,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
});
self.header_dirty = true;
self.load_commands_dirty = true;
@@ -500,7 +452,6 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt
if (self.debug_aranges_section_dirty) {
const dwarf_segment = &self.load_commands.items[self.dwarf_segment_cmd_index.?].Segment;
const debug_aranges_sect = &dwarf_segment.sections.items[self.debug_aranges_section_index.?];
- const debug_info_sect = dwarf_segment.sections.items[self.debug_info_section_index.?];
var di_buf = std.ArrayList(u8).init(allocator);
defer di_buf.deinit();
@@ -693,14 +644,10 @@ pub fn deinit(self: *DebugSymbols, allocator: *Allocator) void {
}
fn copySegmentCommand(self: *DebugSymbols, allocator: *Allocator, base_cmd: SegmentCommand) !SegmentCommand {
- var cmd = SegmentCommand.empty(.{
- .cmd = macho.LC_SEGMENT_64,
+ var cmd = SegmentCommand.empty("", .{
.cmdsize = base_cmd.inner.cmdsize,
- .segname = undefined,
.vmaddr = base_cmd.inner.vmaddr,
.vmsize = base_cmd.inner.vmsize,
- .fileoff = 0,
- .filesize = 0,
.maxprot = base_cmd.inner.maxprot,
.initprot = base_cmd.inner.initprot,
.nsects = base_cmd.inner.nsects,
@@ -844,7 +791,6 @@ fn relocateSymbolTable(self: *DebugSymbols) !void {
const nsyms = nlocals + nglobals;
if (symtab.nsyms < nsyms) {
- const linkedit_segment = self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment;
const needed_size = nsyms * @sizeOf(macho.nlist_64);
if (needed_size > self.allocatedSizeLinkedit(symtab.symoff)) {
// Move the entire symbol table to a new location
@@ -901,14 +847,10 @@ fn writeStringTable(self: *DebugSymbols) !void {
}
pub fn updateDeclLineNumber(self: *DebugSymbols, module: *Module, decl: *const Module.Decl) !void {
+ _ = module;
const tracy = trace(@src());
defer tracy.end();
- const tree = decl.namespace.file_scope.tree;
- const node_tags = tree.nodes.items(.tag);
- const node_datas = tree.nodes.items(.data);
- const token_starts = tree.tokens.items(.start);
-
const func = decl.val.castTag(.function).?.data;
const line_off = @intCast(u28, decl.src_line + func.lbrace_line);
@@ -933,6 +875,8 @@ pub fn initDeclDebugBuffers(
module: *Module,
decl: *Module.Decl,
) !DeclDebugBuffers {
+ _ = self;
+ _ = module;
const tracy = trace(@src());
defer tracy.end();
@@ -1195,6 +1139,7 @@ fn addDbgInfoType(
dbg_info_buffer: *std.ArrayList(u8),
target: std.Target,
) !void {
+ _ = self;
switch (ty.zigTypeTag()) {
.Void => unreachable,
.NoReturn => unreachable,
@@ -1371,6 +1316,7 @@ fn getRelocDbgInfoSubprogramHighPC() u32 {
}
fn dbgLineNeededHeaderBytes(self: DebugSymbols, module: *Module) u32 {
+ _ = self;
const directory_entry_format_count = 1;
const file_name_entry_format_count = 1;
const directory_count = 1;
@@ -1385,6 +1331,7 @@ fn dbgLineNeededHeaderBytes(self: DebugSymbols, module: *Module) u32 {
}
fn dbgInfoNeededHeaderBytes(self: DebugSymbols) u32 {
+ _ = self;
return 120;
}
diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig
index a2a703dc08..2ecd2a20ed 100644
--- a/src/link/MachO/Dylib.zig
+++ b/src/link/MachO/Dylib.zig
@@ -1,24 +1,37 @@
const Dylib = @This();
const std = @import("std");
+const builtin = std.builtin;
+const assert = std.debug.assert;
const fs = std.fs;
+const fmt = std.fmt;
const log = std.log.scoped(.dylib);
const macho = std.macho;
+const math = std.math;
const mem = std.mem;
+const native_endian = builtin.target.cpu.arch.endian();
const Allocator = mem.Allocator;
+const Arch = std.Target.Cpu.Arch;
const Symbol = @import("Symbol.zig");
+const LibStub = @import("../tapi.zig").LibStub;
usingnamespace @import("commands.zig");
allocator: *Allocator,
-arch: ?std.Target.Cpu.Arch = null,
+
+arch: ?Arch = null,
header: ?macho.mach_header_64 = null,
file: ?fs.File = null,
name: ?[]const u8 = null,
+syslibroot: ?[]const u8 = null,
ordinal: ?u16 = null,
+// The actual dylib contents we care about linking with will be embedded at
+// an offset within a file if we are linking against a fat lib
+library_offset: u64 = 0,
+
load_commands: std.ArrayListUnmanaged(LoadCommand) = .{},
symtab_cmd_index: ?u16 = null,
@@ -27,7 +40,13 @@ id_cmd_index: ?u16 = null,
id: ?Id = null,
-symbols: std.StringArrayHashMapUnmanaged(*Symbol) = .{},
+/// Parsed symbol table represented as hash map of symbols'
+/// names. We can and should defer creating *Symbols until
+/// a symbol is referenced by an object file.
+symbols: std.StringArrayHashMapUnmanaged(void) = .{},
+
+// TODO add parsing re-exported libs from binary dylibs
+dependent_libs: std.StringArrayHashMapUnmanaged(void) = .{},
pub const Id = struct {
name: []const u8,
@@ -35,13 +54,130 @@ pub const Id = struct {
current_version: u32,
compatibility_version: u32,
+ pub fn default(name: []const u8) Id {
+ return .{
+ .name = name,
+ .timestamp = 2,
+ .current_version = 0x10000,
+ .compatibility_version = 0x10000,
+ };
+ }
+
pub fn deinit(id: *Id, allocator: *Allocator) void {
allocator.free(id.name);
}
+
+ const ParseError = fmt.ParseIntError || fmt.BufPrintError;
+
+ pub fn parseCurrentVersion(id: *Id, version: anytype) ParseError!void {
+ id.current_version = try parseVersion(version);
+ }
+
+ pub fn parseCompatibilityVersion(id: *Id, version: anytype) ParseError!void {
+ id.compatibility_version = try parseVersion(version);
+ }
+
+ fn parseVersion(version: anytype) ParseError!u32 {
+ const string = blk: {
+ switch (version) {
+ .int => |int| {
+ var out: u32 = 0;
+ const major = try math.cast(u16, int);
+ out += @intCast(u32, major) << 16;
+ return out;
+ },
+ .float => |float| {
+ var buf: [256]u8 = undefined;
+ break :blk try fmt.bufPrint(&buf, "{d:.2}", .{float});
+ },
+ .string => |string| {
+ break :blk string;
+ },
+ }
+ };
+
+ var out: u32 = 0;
+ var values: [3][]const u8 = undefined;
+
+ var split = mem.split(string, ".");
+ var count: u4 = 0;
+ while (split.next()) |value| {
+ if (count > 2) {
+ log.warn("malformed version field: {s}", .{string});
+ return 0x10000;
+ }
+ values[count] = value;
+ count += 1;
+ }
+
+ if (count > 2) {
+ out += try fmt.parseInt(u8, values[2], 10);
+ }
+ if (count > 1) {
+ out += @intCast(u32, try fmt.parseInt(u8, values[1], 10)) << 8;
+ }
+ out += @intCast(u32, try fmt.parseInt(u16, values[0], 10)) << 16;
+
+ return out;
+ }
};
-pub fn init(allocator: *Allocator) Dylib {
- return .{ .allocator = allocator };
+pub const Error = error{
+ OutOfMemory,
+ EmptyStubFile,
+ MismatchedCpuArchitecture,
+ UnsupportedCpuArchitecture,
+} || fs.File.OpenError || std.os.PReadError || Id.ParseError;
+
+pub fn createAndParseFromPath(
+ allocator: *Allocator,
+ arch: Arch,
+ path: []const u8,
+ syslibroot: ?[]const u8,
+) Error!?[]*Dylib {
+ const file = fs.cwd().openFile(path, .{}) catch |err| switch (err) {
+ error.FileNotFound => return null,
+ else => |e| return e,
+ };
+ errdefer file.close();
+
+ const dylib = try allocator.create(Dylib);
+ errdefer allocator.destroy(dylib);
+
+ const name = try allocator.dupe(u8, path);
+ errdefer allocator.free(name);
+
+ dylib.* = .{
+ .allocator = allocator,
+ .arch = arch,
+ .name = name,
+ .file = file,
+ .syslibroot = syslibroot,
+ };
+
+ dylib.parse() catch |err| switch (err) {
+ error.EndOfStream, error.NotDylib => {
+ try file.seekTo(0);
+
+ var lib_stub = LibStub.loadFromFile(allocator, file) catch {
+ dylib.deinit();
+ allocator.destroy(dylib);
+ return null;
+ };
+ defer lib_stub.deinit();
+
+ try dylib.parseFromStub(lib_stub);
+ },
+ else => |e| return e,
+ };
+
+ var dylibs = std.ArrayList(*Dylib).init(allocator);
+ defer dylibs.deinit();
+
+ try dylibs.append(dylib);
+ try dylib.parseDependentLibs(&dylibs);
+
+ return dylibs.toOwnedSlice();
}
pub fn deinit(self: *Dylib) void {
@@ -50,12 +186,16 @@ pub fn deinit(self: *Dylib) void {
}
self.load_commands.deinit(self.allocator);
- for (self.symbols.values()) |value| {
- value.deinit(self.allocator);
- self.allocator.destroy(value);
+ for (self.symbols.keys()) |key| {
+ self.allocator.free(key);
}
self.symbols.deinit(self.allocator);
+ for (self.dependent_libs.keys()) |key| {
+ self.allocator.free(key);
+ }
+ self.dependent_libs.deinit(self.allocator);
+
if (self.name) |name| {
self.allocator.free(name);
}
@@ -71,25 +211,61 @@ pub fn closeFile(self: Dylib) void {
}
}
+fn decodeArch(cputype: macho.cpu_type_t) !std.Target.Cpu.Arch {
+ const arch: Arch = switch (cputype) {
+ macho.CPU_TYPE_ARM64 => .aarch64,
+ macho.CPU_TYPE_X86_64 => .x86_64,
+ else => {
+ return error.UnsupportedCpuArchitecture;
+ },
+ };
+ return arch;
+}
+
pub fn parse(self: *Dylib) !void {
log.debug("parsing shared library '{s}'", .{self.name.?});
+ self.library_offset = offset: {
+ const fat_header = try readFatStruct(self.file.?.reader(), macho.fat_header);
+ if (fat_header.magic != macho.FAT_MAGIC) break :offset 0;
+
+ var fat_arch_index: u32 = 0;
+ while (fat_arch_index < fat_header.nfat_arch) : (fat_arch_index += 1) {
+ const fat_arch = try readFatStruct(self.file.?.reader(), macho.fat_arch);
+ // If we come across an architecture that we do not know how to handle, that's
+ // fine because we can keep looking for one that might match.
+ const lib_arch = decodeArch(fat_arch.cputype) catch |err| switch (err) {
+ error.UnsupportedCpuArchitecture => continue,
+ else => |e| return e,
+ };
+ if (lib_arch == self.arch.?) {
+ // We have found a matching architecture!
+ break :offset fat_arch.offset;
+ }
+ } else {
+ log.err("Could not find matching cpu architecture in fat library: expected {s}", .{self.arch.?});
+ return error.MismatchedCpuArchitecture;
+ }
+ };
+
+ try self.file.?.seekTo(self.library_offset);
+
var reader = self.file.?.reader();
self.header = try reader.readStruct(macho.mach_header_64);
if (self.header.?.filetype != macho.MH_DYLIB) {
- log.err("invalid filetype: expected 0x{x}, found 0x{x}", .{ macho.MH_DYLIB, self.header.?.filetype });
- return error.MalformedDylib;
+ log.debug("invalid filetype: expected 0x{x}, found 0x{x}", .{ macho.MH_DYLIB, self.header.?.filetype });
+ return error.NotDylib;
}
- const this_arch: std.Target.Cpu.Arch = switch (self.header.?.cputype) {
- macho.CPU_TYPE_ARM64 => .aarch64,
- macho.CPU_TYPE_X86_64 => .x86_64,
- else => |value| {
- log.err("unsupported cpu architecture 0x{x}", .{value});
- return error.UnsupportedCpuArchitecture;
+ const this_arch: Arch = decodeArch(self.header.?.cputype) catch |err| switch (err) {
+ error.UnsupportedCpuArchitecture => |e| {
+ log.err("unsupported cpu architecture 0x{x}", .{self.header.?.cputype});
+ return e;
},
+ else => |e| return e,
};
+
if (this_arch != self.arch.?) {
log.err("mismatched cpu architecture: expected {s}, found {s}", .{ self.arch.?, this_arch });
return error.MismatchedCpuArchitecture;
@@ -100,7 +276,17 @@ pub fn parse(self: *Dylib) !void {
try self.parseSymbols();
}
-pub fn readLoadCommands(self: *Dylib, reader: anytype) !void {
+fn readFatStruct(reader: anytype, comptime T: type) !T {
+ // Fat structures (fat_header & fat_arch) are always written and read to/from
+ // disk in big endian order.
+ var res: T = try reader.readStruct(T);
+ if (native_endian != builtin.Endian.Big) {
+ mem.bswapAllFields(T, &res);
+ }
+ return res;
+}
+
+fn readLoadCommands(self: *Dylib, reader: anytype) !void {
try self.load_commands.ensureCapacity(self.allocator, self.header.?.ncmds);
var i: u16 = 0;
@@ -124,15 +310,10 @@ pub fn readLoadCommands(self: *Dylib, reader: anytype) !void {
}
}
-pub fn parseId(self: *Dylib) !void {
+fn parseId(self: *Dylib) !void {
const index = self.id_cmd_index orelse {
log.debug("no LC_ID_DYLIB load command found; using hard-coded defaults...", .{});
- self.id = .{
- .name = try self.allocator.dupe(u8, self.name.?),
- .timestamp = 2,
- .current_version = 0,
- .compatibility_version = 0,
- };
+ self.id = Id.default(try self.allocator.dupe(u8, self.name.?));
return;
};
const id_cmd = self.load_commands.items[index].Dylib;
@@ -150,18 +331,18 @@ pub fn parseId(self: *Dylib) !void {
};
}
-pub fn parseSymbols(self: *Dylib) !void {
+fn parseSymbols(self: *Dylib) !void {
const index = self.symtab_cmd_index orelse return;
const symtab_cmd = self.load_commands.items[index].Symtab;
var symtab = try self.allocator.alloc(u8, @sizeOf(macho.nlist_64) * symtab_cmd.nsyms);
defer self.allocator.free(symtab);
- _ = try self.file.?.preadAll(symtab, symtab_cmd.symoff);
+ _ = try self.file.?.preadAll(symtab, symtab_cmd.symoff + self.library_offset);
const slice = @alignCast(@alignOf(macho.nlist_64), mem.bytesAsSlice(macho.nlist_64, symtab));
var strtab = try self.allocator.alloc(u8, symtab_cmd.strsize);
defer self.allocator.free(strtab);
- _ = try self.file.?.preadAll(strtab, symtab_cmd.stroff);
+ _ = try self.file.?.preadAll(strtab, symtab_cmd.stroff + self.library_offset);
for (slice) |sym| {
const sym_name = mem.spanZ(@ptrCast([*:0]const u8, strtab.ptr + sym.n_strx));
@@ -169,23 +350,191 @@ pub fn parseSymbols(self: *Dylib) !void {
if (!(Symbol.isSect(sym) and Symbol.isExt(sym))) continue;
const name = try self.allocator.dupe(u8, sym_name);
- const proxy = try self.allocator.create(Symbol.Proxy);
- errdefer self.allocator.destroy(proxy);
+ try self.symbols.putNoClobber(self.allocator, name, {});
+ }
+}
- proxy.* = .{
- .base = .{
- .@"type" = .proxy,
- .name = name,
- },
- .dylib = self,
- };
+fn hasTarget(targets: []const []const u8, target: []const u8) bool {
+ for (targets) |t| {
+ if (mem.eql(u8, t, target)) return true;
+ }
+ return false;
+}
+
+fn addObjCClassSymbols(self: *Dylib, sym_name: []const u8) !void {
+ const expanded = &[_][]const u8{
+ try std.fmt.allocPrint(self.allocator, "_OBJC_CLASS_$_{s}", .{sym_name}),
+ try std.fmt.allocPrint(self.allocator, "_OBJC_METACLASS_$_{s}", .{sym_name}),
+ };
- try self.symbols.putNoClobber(self.allocator, name, &proxy.base);
+ for (expanded) |sym| {
+ if (self.symbols.contains(sym)) continue;
+ try self.symbols.putNoClobber(self.allocator, sym, .{});
}
}
-pub fn isDylib(file: fs.File) !bool {
- const header = try file.reader().readStruct(macho.mach_header_64);
- try file.seekTo(0);
- return header.filetype == macho.MH_DYLIB;
+pub fn parseFromStub(self: *Dylib, lib_stub: LibStub) !void {
+ if (lib_stub.inner.len == 0) return error.EmptyStubFile;
+
+ log.debug("parsing shared library from stub '{s}'", .{self.name.?});
+
+ const umbrella_lib = lib_stub.inner[0];
+
+ var id = Id.default(try self.allocator.dupe(u8, umbrella_lib.install_name));
+ if (umbrella_lib.current_version) |version| {
+ try id.parseCurrentVersion(version);
+ }
+ if (umbrella_lib.compatibility_version) |version| {
+ try id.parseCompatibilityVersion(version);
+ }
+ self.id = id;
+
+ const target_string: []const u8 = switch (self.arch.?) {
+ .aarch64 => "arm64-macos",
+ .x86_64 => "x86_64-macos",
+ else => unreachable,
+ };
+
+ var umbrella_libs = std.StringHashMap(void).init(self.allocator);
+ defer umbrella_libs.deinit();
+
+ for (lib_stub.inner) |stub, stub_index| {
+ if (!hasTarget(stub.targets, target_string)) continue;
+
+ if (stub_index > 0) {
+ // TODO I thought that we could switch on presence of `parent-umbrella` map;
+ // however, turns out `libsystem_notify.dylib` is fully reexported by `libSystem.dylib`
+ // BUT does not feature a `parent-umbrella` map as the only sublib. Apple's bug perhaps?
+ try umbrella_libs.put(stub.install_name, .{});
+ }
+
+ if (stub.exports) |exports| {
+ for (exports) |exp| {
+ if (!hasTarget(exp.targets, target_string)) continue;
+
+ if (exp.symbols) |symbols| {
+ for (symbols) |sym_name| {
+ if (self.symbols.contains(sym_name)) continue;
+ try self.symbols.putNoClobber(self.allocator, try self.allocator.dupe(u8, sym_name), {});
+ }
+ }
+
+ if (exp.objc_classes) |classes| {
+ for (classes) |sym_name| {
+ try self.addObjCClassSymbols(sym_name);
+ }
+ }
+ }
+ }
+
+ if (stub.reexports) |reexports| {
+ for (reexports) |reexp| {
+ if (!hasTarget(reexp.targets, target_string)) continue;
+
+ if (reexp.symbols) |symbols| {
+ for (symbols) |sym_name| {
+ if (self.symbols.contains(sym_name)) continue;
+ try self.symbols.putNoClobber(self.allocator, try self.allocator.dupe(u8, sym_name), {});
+ }
+ }
+
+ if (reexp.objc_classes) |classes| {
+ for (classes) |sym_name| {
+ try self.addObjCClassSymbols(sym_name);
+ }
+ }
+ }
+ }
+
+ if (stub.objc_classes) |classes| {
+ for (classes) |sym_name| {
+ try self.addObjCClassSymbols(sym_name);
+ }
+ }
+ }
+
+ log.debug("{s}", .{umbrella_lib.install_name});
+
+ // TODO track which libs were already parsed in different steps
+ for (lib_stub.inner) |stub| {
+ if (!hasTarget(stub.targets, target_string)) continue;
+
+ if (stub.reexported_libraries) |reexports| {
+ for (reexports) |reexp| {
+ if (!hasTarget(reexp.targets, target_string)) continue;
+
+ for (reexp.libraries) |lib| {
+ if (umbrella_libs.contains(lib)) {
+ log.debug(" | {s} <= {s}", .{ lib, umbrella_lib.install_name });
+ continue;
+ }
+
+ log.debug(" | {s}", .{lib});
+ try self.dependent_libs.put(self.allocator, try self.allocator.dupe(u8, lib), {});
+ }
+ }
+ }
+ }
+}
+
+pub fn parseDependentLibs(self: *Dylib, out: *std.ArrayList(*Dylib)) !void {
+ outer: for (self.dependent_libs.keys()) |lib| {
+ const dirname = fs.path.dirname(lib) orelse {
+ log.warn("unable to resolve dependency {s}", .{lib});
+ continue;
+ };
+ const filename = fs.path.basename(lib);
+ const without_ext = if (mem.lastIndexOfScalar(u8, filename, '.')) |index|
+ filename[0..index]
+ else
+ filename;
+
+ for (&[_][]const u8{ "dylib", "tbd" }) |ext| {
+ const with_ext = try std.fmt.allocPrint(self.allocator, "{s}.{s}", .{
+ without_ext,
+ ext,
+ });
+ defer self.allocator.free(with_ext);
+
+ const lib_path = if (self.syslibroot) |syslibroot|
+ try fs.path.join(self.allocator, &.{ syslibroot, dirname, with_ext })
+ else
+ try fs.path.join(self.allocator, &.{ dirname, with_ext });
+
+ log.debug("trying dependency at fully resolved path {s}", .{lib_path});
+
+ const dylibs = (try createAndParseFromPath(
+ self.allocator,
+ self.arch.?,
+ lib_path,
+ self.syslibroot,
+ )) orelse {
+ continue;
+ };
+
+ try out.appendSlice(dylibs);
+
+ continue :outer;
+ } else {
+ log.warn("unable to resolve dependency {s}", .{lib});
+ }
+ }
+}
+
+pub fn createProxy(self: *Dylib, sym_name: []const u8) !?*Symbol {
+ if (!self.symbols.contains(sym_name)) return null;
+
+ const name = try self.allocator.dupe(u8, sym_name);
+ const proxy = try self.allocator.create(Symbol.Proxy);
+ errdefer self.allocator.destroy(proxy);
+
+ proxy.* = .{
+ .base = .{
+ .@"type" = .proxy,
+ .name = name,
+ },
+ .file = self,
+ };
+
+ return &proxy.base;
}
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index c4a044b446..cb55dd1fd8 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -11,6 +11,7 @@ const mem = std.mem;
const reloc = @import("reloc.zig");
const Allocator = mem.Allocator;
+const Arch = std.Target.Cpu.Arch;
const Relocation = reloc.Relocation;
const Symbol = @import("Symbol.zig");
const parseName = @import("Zld.zig").parseName;
@@ -18,7 +19,7 @@ const parseName = @import("Zld.zig").parseName;
usingnamespace @import("commands.zig");
allocator: *Allocator,
-arch: ?std.Target.Cpu.Arch = null,
+arch: ?Arch = null,
header: ?macho.mach_header_64 = null,
file: ?fs.File = null,
file_offset: ?u32 = null,
@@ -93,7 +94,7 @@ pub const Section = struct {
pub fn isCode(self: Section) bool {
const attr = self.sectionAttrs();
- return attr & macho.S_ATTR_PURE_INSTRUCTIONS != 0 and attr & macho.S_ATTR_SOME_INSTRUCTIONS != 0;
+ return attr & macho.S_ATTR_PURE_INSTRUCTIONS != 0 or attr & macho.S_ATTR_SOME_INSTRUCTIONS != 0;
}
pub fn isDebug(self: Section) bool {
@@ -173,10 +174,36 @@ const DebugInfo = struct {
}
};
-pub fn init(allocator: *Allocator) Object {
- return .{
+pub fn createAndParseFromPath(allocator: *Allocator, arch: Arch, path: []const u8) !?*Object {
+ const file = fs.cwd().openFile(path, .{}) catch |err| switch (err) {
+ error.FileNotFound => return null,
+ else => |e| return e,
+ };
+ errdefer file.close();
+
+ const object = try allocator.create(Object);
+ errdefer allocator.destroy(object);
+
+ const name = try allocator.dupe(u8, path);
+ errdefer allocator.free(name);
+
+ object.* = .{
.allocator = allocator,
+ .arch = arch,
+ .name = name,
+ .file = file,
};
+
+ object.parse() catch |err| switch (err) {
+ error.EndOfStream, error.NotObject => {
+ object.deinit();
+ allocator.destroy(object);
+ return null;
+ },
+ else => |e| return e,
+ };
+
+ return object;
}
pub fn deinit(self: *Object) void {
@@ -220,14 +247,14 @@ pub fn parse(self: *Object) !void {
try reader.context.seekTo(offset);
}
- self.header = try reader.readStruct(macho.mach_header_64);
+ const header = try reader.readStruct(macho.mach_header_64);
- if (self.header.?.filetype != macho.MH_OBJECT) {
- log.err("invalid filetype: expected 0x{x}, found 0x{x}", .{ macho.MH_OBJECT, self.header.?.filetype });
- return error.MalformedObject;
+ if (header.filetype != macho.MH_OBJECT) {
+ log.debug("invalid filetype: expected 0x{x}, found 0x{x}", .{ macho.MH_OBJECT, header.filetype });
+ return error.NotObject;
}
- const this_arch: std.Target.Cpu.Arch = switch (self.header.?.cputype) {
+ const this_arch: Arch = switch (header.cputype) {
macho.CPU_TYPE_ARM64 => .aarch64,
macho.CPU_TYPE_X86_64 => .x86_64,
else => |value| {
@@ -240,6 +267,8 @@ pub fn parse(self: *Object) !void {
return error.MismatchedCpuArchitecture;
}
+ self.header = header;
+
try self.readLoadCommands(reader);
try self.parseSymbols();
try self.parseSections();
@@ -478,7 +507,6 @@ pub fn parseDebugInfo(self: *Object) !void {
self.tu_path = try std.fs.path.join(self.allocator, &[_][]const u8{ comp_dir, name });
self.tu_mtime = mtime: {
- var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
const stat = try self.file.?.stat();
break :mtime @intCast(u64, @divFloor(stat.mtime, 1_000_000_000));
};
@@ -534,9 +562,3 @@ pub fn parseDataInCode(self: *Object) !void {
try self.data_in_code_entries.append(self.allocator, dice);
}
}
-
-pub fn isObject(file: fs.File) !bool {
- const header = try file.reader().readStruct(macho.mach_header_64);
- try file.seekTo(0);
- return header.filetype == macho.MH_OBJECT;
-}
diff --git a/src/link/MachO/Symbol.zig b/src/link/MachO/Symbol.zig
index bb97acdf9f..c58af27672 100644
--- a/src/link/MachO/Symbol.zig
+++ b/src/link/MachO/Symbol.zig
@@ -84,10 +84,27 @@ pub const Regular = struct {
pub const Proxy = struct {
base: Symbol,
+ /// Dynamic binding info - spots within the final
+ /// executable where this proxy is referenced from.
+ bind_info: std.ArrayListUnmanaged(struct {
+ segment_id: u16,
+ address: u64,
+ }) = .{},
+
/// Dylib where to locate this symbol.
- dylib: ?*Dylib = null,
+ /// null means self-reference.
+ file: ?*Dylib = null,
pub const base_type: Symbol.Type = .proxy;
+
+ pub fn deinit(proxy: *Proxy, allocator: *Allocator) void {
+ proxy.bind_info.deinit(allocator);
+ }
+
+ pub fn dylibOrdinal(proxy: *Proxy) u16 {
+ const dylib = proxy.file orelse return 0;
+ return dylib.ordinal.?;
+ }
};
pub const Unresolved = struct {
@@ -116,6 +133,10 @@ pub const Tentative = struct {
pub fn deinit(base: *Symbol, allocator: *Allocator) void {
allocator.free(base.name);
+ switch (base.@"type") {
+ .proxy => @fieldParentPtr(Proxy, "base", base).deinit(allocator),
+ else => {},
+ }
}
pub fn cast(base: *Symbol, comptime T: type) ?*T {
diff --git a/src/link/MachO/Zld.zig b/src/link/MachO/Zld.zig
index dcaf86ed67..6cabdfa73e 100644
--- a/src/link/MachO/Zld.zig
+++ b/src/link/MachO/Zld.zig
@@ -32,12 +32,16 @@ out_path: ?[]const u8 = null,
// TODO these args will become obselete once Zld is coalesced with incremental
// linker.
+syslibroot: ?[]const u8 = null,
stack_size: u64 = 0,
objects: std.ArrayListUnmanaged(*Object) = .{},
archives: std.ArrayListUnmanaged(*Archive) = .{},
dylibs: std.ArrayListUnmanaged(*Dylib) = .{},
+libsystem_dylib_index: ?u16 = null,
+next_dylib_ordinal: u16 = 1,
+
load_commands: std.ArrayListUnmanaged(LoadCommand) = .{},
pagezero_segment_cmd_index: ?u16 = null,
@@ -49,7 +53,6 @@ dyld_info_cmd_index: ?u16 = null,
symtab_cmd_index: ?u16 = null,
dysymtab_cmd_index: ?u16 = null,
dylinker_cmd_index: ?u16 = null,
-libsystem_cmd_index: ?u16 = null,
data_in_code_cmd_index: ?u16 = null,
function_starts_cmd_index: ?u16 = null,
main_cmd_index: ?u16 = null,
@@ -69,12 +72,21 @@ gcc_except_tab_section_index: ?u16 = null,
unwind_info_section_index: ?u16 = null,
eh_frame_section_index: ?u16 = null,
+objc_methlist_section_index: ?u16 = null,
+objc_methname_section_index: ?u16 = null,
+objc_methtype_section_index: ?u16 = null,
+objc_classname_section_index: ?u16 = null,
+
// __DATA_CONST segment sections
got_section_index: ?u16 = null,
mod_init_func_section_index: ?u16 = null,
mod_term_func_section_index: ?u16 = null,
data_const_section_index: ?u16 = null,
+objc_cfstring_section_index: ?u16 = null,
+objc_classlist_section_index: ?u16 = null,
+objc_imageinfo_section_index: ?u16 = null,
+
// __DATA segment sections
tlv_section_index: ?u16 = null,
tlv_data_section_index: ?u16 = null,
@@ -84,6 +96,11 @@ data_section_index: ?u16 = null,
bss_section_index: ?u16 = null,
common_section_index: ?u16 = null,
+objc_const_section_index: ?u16 = null,
+objc_selrefs_section_index: ?u16 = null,
+objc_classrefs_section_index: ?u16 = null,
+objc_data_section_index: ?u16 = null,
+
globals: std.StringArrayHashMapUnmanaged(*Symbol) = .{},
imports: std.StringArrayHashMapUnmanaged(*Symbol) = .{},
unresolved: std.StringArrayHashMapUnmanaged(*Symbol) = .{},
@@ -108,6 +125,7 @@ const TlvOffset = struct {
offset: u64,
fn cmp(context: void, a: TlvOffset, b: TlvOffset) bool {
+ _ = context;
return a.source_addr < b.source_addr;
}
};
@@ -115,10 +133,6 @@ const TlvOffset = struct {
/// Default path to dyld
const DEFAULT_DYLD_PATH: [*:0]const u8 = "/usr/lib/dyld";
-const LIB_SYSTEM_NAME: [*:0]const u8 = "System";
-/// TODO this should be inferred from included libSystem.tbd or similar.
-const LIB_SYSTEM_PATH: [*:0]const u8 = "/usr/lib/libSystem.B.dylib";
-
pub fn init(allocator: *Allocator) Zld {
return .{ .allocator = allocator };
}
@@ -152,9 +166,14 @@ pub fn deinit(self: *Zld) void {
}
self.dylibs.deinit(self.allocator);
+ for (self.imports.values()) |proxy| {
+ proxy.deinit(self.allocator);
+ self.allocator.destroy(proxy);
+ }
+ self.imports.deinit(self.allocator);
+
self.tentatives.deinit(self.allocator);
self.globals.deinit(self.allocator);
- self.imports.deinit(self.allocator);
self.unresolved.deinit(self.allocator);
self.strtab.deinit(self.allocator);
@@ -180,6 +199,7 @@ pub fn closeFiles(self: Zld) void {
const LinkArgs = struct {
libs: []const []const u8,
rpaths: []const []const u8,
+ libc_stub_path: []const u8,
};
pub fn link(self: *Zld, files: []const []const u8, out_path: []const u8, args: LinkArgs) !void {
@@ -218,185 +238,111 @@ pub fn link(self: *Zld, files: []const []const u8, out_path: []const u8, args: L
});
try self.populateMetadata();
- try self.addRpaths(args.rpaths);
try self.parseInputFiles(files);
try self.parseLibs(args.libs);
+ try self.parseLibSystem(args.libc_stub_path);
try self.resolveSymbols();
try self.resolveStubsAndGotEntries();
try self.updateMetadata();
try self.sortSections();
+ try self.addRpaths(args.rpaths);
+ try self.addDataInCodeLC();
+ try self.addCodeSignatureLC();
try self.allocateTextSegment();
try self.allocateDataConstSegment();
try self.allocateDataSegment();
self.allocateLinkeditSegment();
try self.allocateSymbols();
try self.allocateTentativeSymbols();
+ try self.allocateProxyBindAddresses();
try self.flush();
}
fn parseInputFiles(self: *Zld, files: []const []const u8) !void {
- const Input = struct {
- kind: enum {
- object,
- archive,
- dylib,
- },
- file: fs.File,
- name: []const u8,
- };
- var classified = std.ArrayList(Input).init(self.allocator);
- defer classified.deinit();
-
- // First, classify input files: object, archive or dylib.
for (files) |file_name| {
- const file = try fs.cwd().openFile(file_name, .{});
const full_path = full_path: {
var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
const path = try std.fs.realpath(file_name, &buffer);
break :full_path try self.allocator.dupe(u8, path);
};
- try_object: {
- if (!(try Object.isObject(file))) break :try_object;
- try classified.append(.{
- .kind = .object,
- .file = file,
- .name = full_path,
- });
+ if (try Object.createAndParseFromPath(self.allocator, self.arch.?, full_path)) |object| {
+ try self.objects.append(self.allocator, object);
continue;
}
- try_archive: {
- if (!(try Archive.isArchive(file))) break :try_archive;
- try classified.append(.{
- .kind = .archive,
- .file = file,
- .name = full_path,
- });
+ if (try Archive.createAndParseFromPath(self.allocator, self.arch.?, full_path)) |archive| {
+ try self.archives.append(self.allocator, archive);
continue;
}
- try_dylib: {
- if (!(try Dylib.isDylib(file))) break :try_dylib;
- try classified.append(.{
- .kind = .dylib,
- .file = file,
- .name = full_path,
- });
+ if (try Dylib.createAndParseFromPath(
+ self.allocator,
+ self.arch.?,
+ full_path,
+ self.syslibroot,
+ )) |dylibs| {
+ defer self.allocator.free(dylibs);
+ try self.dylibs.appendSlice(self.allocator, dylibs);
continue;
}
- file.close();
log.warn("unknown filetype for positional input file: '{s}'", .{file_name});
}
-
- // Based on our classification, proceed with parsing.
- for (classified.items) |input| {
- switch (input.kind) {
- .object => {
- const object = try self.allocator.create(Object);
- errdefer self.allocator.destroy(object);
-
- object.* = Object.init(self.allocator);
- object.arch = self.arch.?;
- object.name = input.name;
- object.file = input.file;
- try object.parse();
- try self.objects.append(self.allocator, object);
- },
- .archive => {
- const archive = try self.allocator.create(Archive);
- errdefer self.allocator.destroy(archive);
-
- archive.* = Archive.init(self.allocator);
- archive.arch = self.arch.?;
- archive.name = input.name;
- archive.file = input.file;
- try archive.parse();
- try self.archives.append(self.allocator, archive);
- },
- .dylib => {
- const dylib = try self.allocator.create(Dylib);
- errdefer self.allocator.destroy(dylib);
-
- dylib.* = Dylib.init(self.allocator);
- dylib.arch = self.arch.?;
- dylib.name = input.name;
- dylib.file = input.file;
-
- const ordinal = @intCast(u16, self.dylibs.items.len);
- dylib.ordinal = ordinal + 2; // TODO +2 since 1 is reserved for libSystem
-
- // TODO Defer parsing of the dylibs until they are actually needed
- try dylib.parse();
- try self.dylibs.append(self.allocator, dylib);
-
- // Add LC_LOAD_DYLIB command
- const dylib_id = dylib.id orelse unreachable;
- var dylib_cmd = try createLoadDylibCommand(
- self.allocator,
- dylib_id.name,
- dylib_id.timestamp,
- dylib_id.current_version,
- dylib_id.compatibility_version,
- );
- errdefer dylib_cmd.deinit(self.allocator);
-
- try self.load_commands.append(self.allocator, .{ .Dylib = dylib_cmd });
- },
- }
- }
}
fn parseLibs(self: *Zld, libs: []const []const u8) !void {
for (libs) |lib| {
- const file = try fs.cwd().openFile(lib, .{});
-
- if (try Dylib.isDylib(file)) {
- const dylib = try self.allocator.create(Dylib);
- errdefer self.allocator.destroy(dylib);
-
- dylib.* = Dylib.init(self.allocator);
- dylib.arch = self.arch.?;
- dylib.name = try self.allocator.dupe(u8, lib);
- dylib.file = file;
-
- const ordinal = @intCast(u16, self.dylibs.items.len);
- dylib.ordinal = ordinal + 2; // TODO +2 since 1 is reserved for libSystem
-
- // TODO Defer parsing of the dylibs until they are actually needed
- try dylib.parse();
- try self.dylibs.append(self.allocator, dylib);
-
- // Add LC_LOAD_DYLIB command
- const dylib_id = dylib.id orelse unreachable;
- var dylib_cmd = try createLoadDylibCommand(
- self.allocator,
- dylib_id.name,
- dylib_id.timestamp,
- dylib_id.current_version,
- dylib_id.compatibility_version,
- );
- errdefer dylib_cmd.deinit(self.allocator);
-
- try self.load_commands.append(self.allocator, .{ .Dylib = dylib_cmd });
- } else if (try Archive.isArchive(file)) {
- const archive = try self.allocator.create(Archive);
- errdefer self.allocator.destroy(archive);
-
- archive.* = Archive.init(self.allocator);
- archive.arch = self.arch.?;
- archive.name = try self.allocator.dupe(u8, lib);
- archive.file = file;
- try archive.parse();
+ if (try Dylib.createAndParseFromPath(
+ self.allocator,
+ self.arch.?,
+ lib,
+ self.syslibroot,
+ )) |dylibs| {
+ defer self.allocator.free(dylibs);
+ try self.dylibs.appendSlice(self.allocator, dylibs);
+ continue;
+ }
+
+ if (try Archive.createAndParseFromPath(self.allocator, self.arch.?, lib)) |archive| {
try self.archives.append(self.allocator, archive);
- } else {
- file.close();
- log.warn("unknown filetype for a library: '{s}'", .{lib});
+ continue;
}
+
+ log.warn("unknown filetype for a library: '{s}'", .{lib});
}
}
+fn parseLibSystem(self: *Zld, libc_stub_path: []const u8) !void {
+ const dylibs = (try Dylib.createAndParseFromPath(
+ self.allocator,
+ self.arch.?,
+ libc_stub_path,
+ self.syslibroot,
+ )) orelse return error.FailedToParseLibSystem;
+ defer self.allocator.free(dylibs);
+
+ assert(dylibs.len == 1); // More than one dylib output from parsing libSystem!
+ const dylib = dylibs[0];
+
+ self.libsystem_dylib_index = @intCast(u16, self.dylibs.items.len);
+ try self.dylibs.append(self.allocator, dylib);
+
+ // Add LC_LOAD_DYLIB load command.
+ dylib.ordinal = self.next_dylib_ordinal;
+ const dylib_id = dylib.id orelse unreachable;
+ var dylib_cmd = try createLoadDylibCommand(
+ self.allocator,
+ dylib_id.name,
+ dylib_id.timestamp,
+ dylib_id.current_version,
+ dylib_id.compatibility_version,
+ );
+ errdefer dylib_cmd.deinit(self.allocator);
+ try self.load_commands.append(self.allocator, .{ .Dylib = dylib_cmd });
+ self.next_dylib_ordinal += 1;
+}
+
fn mapAndUpdateSections(
self: *Zld,
object: *Object,
@@ -421,6 +367,7 @@ fn mapAndUpdateSections(
offset,
offset + size,
});
+ log.debug(" | flags 0x{x}", .{source_sect.inner.flags});
source_sect.target_map = .{
.segment_id = target_seg_id,
@@ -432,429 +379,22 @@ fn mapAndUpdateSections(
fn updateMetadata(self: *Zld) !void {
for (self.objects.items) |object| {
- const object_seg = object.load_commands.items[object.segment_cmd_index.?].Segment;
- const text_seg = &self.load_commands.items[self.text_segment_cmd_index.?].Segment;
- const data_const_seg = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment;
- const data_seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment;
-
- // Create missing metadata
+ // Find ideal section alignment and update section mappings
for (object.sections.items) |sect, sect_id| {
- const segname = sect.segname();
- const sectname = sect.sectname();
-
- switch (sect.sectionType()) {
- macho.S_4BYTE_LITERALS, macho.S_8BYTE_LITERALS, macho.S_16BYTE_LITERALS, macho.S_LITERAL_POINTERS => {
- if (self.text_const_section_index != null) continue;
-
- self.text_const_section_index = @intCast(u16, text_seg.sections.items.len);
- try text_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__const"),
- .segname = makeStaticString("__TEXT"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_REGULAR,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- continue;
- },
- macho.S_CSTRING_LITERALS => {
- if (self.cstring_section_index != null) continue;
-
- self.cstring_section_index = @intCast(u16, text_seg.sections.items.len);
- try text_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__cstring"),
- .segname = makeStaticString("__TEXT"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_CSTRING_LITERALS,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- continue;
- },
- macho.S_MOD_INIT_FUNC_POINTERS => {
- if (self.mod_init_func_section_index != null) continue;
-
- self.mod_init_func_section_index = @intCast(u16, data_const_seg.sections.items.len);
- try data_const_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__mod_init_func"),
- .segname = makeStaticString("__DATA_CONST"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_MOD_INIT_FUNC_POINTERS,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- continue;
- },
- macho.S_MOD_TERM_FUNC_POINTERS => {
- if (self.mod_term_func_section_index != null) continue;
-
- self.mod_term_func_section_index = @intCast(u16, data_const_seg.sections.items.len);
- try data_const_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__mod_term_func"),
- .segname = makeStaticString("__DATA_CONST"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_MOD_TERM_FUNC_POINTERS,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- continue;
- },
- macho.S_ZEROFILL => {
- if (mem.eql(u8, sectname, "__common")) {
- if (self.common_section_index != null) continue;
-
- self.common_section_index = @intCast(u16, data_seg.sections.items.len);
- try data_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__common"),
- .segname = makeStaticString("__DATA"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_ZEROFILL,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- } else {
- if (self.bss_section_index != null) continue;
-
- self.bss_section_index = @intCast(u16, data_seg.sections.items.len);
- try data_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__bss"),
- .segname = makeStaticString("__DATA"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_ZEROFILL,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- }
- continue;
- },
- macho.S_THREAD_LOCAL_VARIABLES => {
- if (self.tlv_section_index != null) continue;
-
- self.tlv_section_index = @intCast(u16, data_seg.sections.items.len);
- try data_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__thread_vars"),
- .segname = makeStaticString("__DATA"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_THREAD_LOCAL_VARIABLES,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- continue;
- },
- macho.S_THREAD_LOCAL_REGULAR => {
- if (self.tlv_data_section_index != null) continue;
-
- self.tlv_data_section_index = @intCast(u16, data_seg.sections.items.len);
- try data_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__thread_data"),
- .segname = makeStaticString("__DATA"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_THREAD_LOCAL_REGULAR,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- continue;
- },
- macho.S_THREAD_LOCAL_ZEROFILL => {
- if (self.tlv_bss_section_index != null) continue;
-
- self.tlv_bss_section_index = @intCast(u16, data_seg.sections.items.len);
- try data_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__thread_bss"),
- .segname = makeStaticString("__DATA"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_THREAD_LOCAL_ZEROFILL,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- continue;
- },
- macho.S_COALESCED => {
- if (mem.eql(u8, "__TEXT", segname) and mem.eql(u8, "__eh_frame", sectname)) {
- // TODO I believe __eh_frame is currently part of __unwind_info section
- // in the latest ld64 output.
- if (self.eh_frame_section_index != null) continue;
-
- self.eh_frame_section_index = @intCast(u16, text_seg.sections.items.len);
- try text_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__eh_frame"),
- .segname = makeStaticString("__TEXT"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_REGULAR,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- continue;
- }
-
- // TODO audit this: is this the right mapping?
- if (self.data_const_section_index != null) continue;
-
- self.data_const_section_index = @intCast(u16, data_const_seg.sections.items.len);
- try data_const_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__const"),
- .segname = makeStaticString("__DATA_CONST"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_REGULAR,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- continue;
- },
- macho.S_REGULAR => {
- if (sect.isCode()) {
- if (self.text_section_index != null) continue;
-
- self.text_section_index = @intCast(u16, text_seg.sections.items.len);
- try text_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__text"),
- .segname = makeStaticString("__TEXT"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- continue;
- }
-
- if (sect.isDebug()) {
- if (mem.eql(u8, "__LD", segname) and mem.eql(u8, "__compact_unwind", sectname)) {
- log.debug("TODO compact unwind section: type 0x{x}, name '{s},{s}'", .{
- sect.flags(), segname, sectname,
- });
- }
- continue;
- }
-
- if (mem.eql(u8, segname, "__TEXT")) {
- if (mem.eql(u8, sectname, "__ustring")) {
- if (self.ustring_section_index != null) continue;
-
- self.ustring_section_index = @intCast(u16, text_seg.sections.items.len);
- try text_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__ustring"),
- .segname = makeStaticString("__TEXT"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_REGULAR,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- } else if (mem.eql(u8, sectname, "__gcc_except_tab")) {
- if (self.gcc_except_tab_section_index != null) continue;
-
- self.gcc_except_tab_section_index = @intCast(u16, text_seg.sections.items.len);
- try text_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__gcc_except_tab"),
- .segname = makeStaticString("__TEXT"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_REGULAR,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- } else {
- if (self.text_const_section_index != null) continue;
-
- self.text_const_section_index = @intCast(u16, text_seg.sections.items.len);
- try text_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__const"),
- .segname = makeStaticString("__TEXT"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_REGULAR,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- }
- continue;
- }
-
- if (mem.eql(u8, segname, "__DATA_CONST")) {
- if (self.data_const_section_index != null) continue;
-
- self.data_const_section_index = @intCast(u16, data_const_seg.sections.items.len);
- try data_const_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__const"),
- .segname = makeStaticString("__DATA_CONST"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_REGULAR,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- continue;
- }
-
- if (mem.eql(u8, segname, "__DATA")) {
- if (mem.eql(u8, sectname, "__const")) {
- if (self.data_const_section_index != null) continue;
-
- self.data_const_section_index = @intCast(u16, data_const_seg.sections.items.len);
- try data_const_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__const"),
- .segname = makeStaticString("__DATA_CONST"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_REGULAR,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- } else {
- if (self.data_section_index != null) continue;
-
- self.data_section_index = @intCast(u16, data_seg.sections.items.len);
- try data_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__data"),
- .segname = makeStaticString("__DATA"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_REGULAR,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
- }
-
- continue;
- }
-
- if (mem.eql(u8, "__LLVM", segname) and mem.eql(u8, "__asm", sectname)) {
- log.debug("TODO LLVM asm section: type 0x{x}, name '{s},{s}'", .{
- sect.flags(), segname, sectname,
- });
- continue;
- }
- },
- else => {},
- }
-
- log.err("{s}: unhandled section type 0x{x} for '{s},{s}'", .{
- object.name.?,
- sect.flags(),
- segname,
- sectname,
- });
- return error.UnhandledSection;
- }
-
- // Find ideal section alignment.
- for (object.sections.items) |sect| {
- if (self.getMatchingSection(sect)) |res| {
- const target_seg = &self.load_commands.items[res.seg].Segment;
- const target_sect = &target_seg.sections.items[res.sect];
- target_sect.@"align" = math.max(target_sect.@"align", sect.inner.@"align");
- }
- }
-
- // Update section mappings
- for (object.sections.items) |sect, sect_id| {
- if (self.getMatchingSection(sect)) |res| {
- try self.mapAndUpdateSections(object, @intCast(u16, sect_id), res.seg, res.sect);
+ const match = (try self.getMatchingSection(sect)) orelse {
+ log.debug("{s}: unhandled section type 0x{x} for '{s},{s}'", .{
+ object.name.?,
+ sect.flags(),
+ sect.segname(),
+ sect.sectname(),
+ });
continue;
- }
- log.debug("section '{s},{s}' will be unmapped", .{ sect.segname(), sect.sectname() });
+ };
+ const target_seg = &self.load_commands.items[match.seg].Segment;
+ const target_sect = &target_seg.sections.items[match.sect];
+ target_sect.@"align" = math.max(target_sect.@"align", sect.inner.@"align");
+
+ try self.mapAndUpdateSections(object, @intCast(u16, sect_id), match.seg, match.sect);
}
}
@@ -864,19 +404,8 @@ fn updateMetadata(self: *Zld) !void {
const data_seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment;
const common_section_index = self.common_section_index orelse ind: {
self.common_section_index = @intCast(u16, data_seg.sections.items.len);
- try data_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__common"),
- .segname = makeStaticString("__DATA"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
+ try data_seg.addSection(self.allocator, "__common", .{
.flags = macho.S_ZEROFILL,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
});
break :ind self.common_section_index.?;
};
@@ -950,31 +479,116 @@ const MatchingSection = struct {
sect: u16,
};
-fn getMatchingSection(self: *Zld, sect: Object.Section) ?MatchingSection {
+fn getMatchingSection(self: *Zld, sect: Object.Section) !?MatchingSection {
+ const text_seg = &self.load_commands.items[self.text_segment_cmd_index.?].Segment;
+ const data_const_seg = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment;
+ const data_seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment;
const segname = sect.segname();
const sectname = sect.sectname();
const res: ?MatchingSection = blk: {
switch (sect.sectionType()) {
- macho.S_4BYTE_LITERALS, macho.S_8BYTE_LITERALS, macho.S_16BYTE_LITERALS, macho.S_LITERAL_POINTERS => {
+ macho.S_4BYTE_LITERALS, macho.S_8BYTE_LITERALS, macho.S_16BYTE_LITERALS => {
+ if (self.text_const_section_index == null) {
+ self.text_const_section_index = @intCast(u16, text_seg.sections.items.len);
+ try text_seg.addSection(self.allocator, "__const", .{});
+ }
+
break :blk .{
.seg = self.text_segment_cmd_index.?,
.sect = self.text_const_section_index.?,
};
},
macho.S_CSTRING_LITERALS => {
+ if (mem.eql(u8, sectname, "__objc_methname")) {
+ // TODO it seems the common values within the sections in objects are deduplicated/merged
+ // on merging the sections' contents.
+ if (self.objc_methname_section_index == null) {
+ self.objc_methname_section_index = @intCast(u16, text_seg.sections.items.len);
+ try text_seg.addSection(self.allocator, "__objc_methname", .{
+ .flags = macho.S_CSTRING_LITERALS,
+ });
+ }
+
+ break :blk .{
+ .seg = self.text_segment_cmd_index.?,
+ .sect = self.objc_methname_section_index.?,
+ };
+ } else if (mem.eql(u8, sectname, "__objc_methtype")) {
+ if (self.objc_methtype_section_index == null) {
+ self.objc_methtype_section_index = @intCast(u16, text_seg.sections.items.len);
+ try text_seg.addSection(self.allocator, "__objc_methtype", .{
+ .flags = macho.S_CSTRING_LITERALS,
+ });
+ }
+
+ break :blk .{
+ .seg = self.text_segment_cmd_index.?,
+ .sect = self.objc_methtype_section_index.?,
+ };
+ } else if (mem.eql(u8, sectname, "__objc_classname")) {
+ if (self.objc_classname_section_index == null) {
+ self.objc_classname_section_index = @intCast(u16, text_seg.sections.items.len);
+ try text_seg.addSection(self.allocator, "__objc_classname", .{});
+ }
+
+ break :blk .{
+ .seg = self.text_segment_cmd_index.?,
+ .sect = self.objc_classname_section_index.?,
+ };
+ }
+
+ if (self.cstring_section_index == null) {
+ self.cstring_section_index = @intCast(u16, text_seg.sections.items.len);
+ try text_seg.addSection(self.allocator, "__cstring", .{
+ .flags = macho.S_CSTRING_LITERALS,
+ });
+ }
+
break :blk .{
.seg = self.text_segment_cmd_index.?,
.sect = self.cstring_section_index.?,
};
},
+ macho.S_LITERAL_POINTERS => {
+ if (mem.eql(u8, segname, "__DATA") and mem.eql(u8, sectname, "__objc_selrefs")) {
+ if (self.objc_selrefs_section_index == null) {
+ self.objc_selrefs_section_index = @intCast(u16, data_seg.sections.items.len);
+ try data_seg.addSection(self.allocator, "__objc_selrefs", .{
+ .flags = macho.S_LITERAL_POINTERS,
+ });
+ }
+
+ break :blk .{
+ .seg = self.data_segment_cmd_index.?,
+ .sect = self.objc_selrefs_section_index.?,
+ };
+ }
+
+ // TODO investigate
+ break :blk null;
+ },
macho.S_MOD_INIT_FUNC_POINTERS => {
+ if (self.mod_init_func_section_index == null) {
+ self.mod_init_func_section_index = @intCast(u16, data_const_seg.sections.items.len);
+ try data_const_seg.addSection(self.allocator, "__mod_init_func", .{
+ .flags = macho.S_MOD_INIT_FUNC_POINTERS,
+ });
+ }
+
break :blk .{
.seg = self.data_const_segment_cmd_index.?,
.sect = self.mod_init_func_section_index.?,
};
},
macho.S_MOD_TERM_FUNC_POINTERS => {
+ if (self.mod_term_func_section_index == null) {
+ self.mod_term_func_section_index = @intCast(u16, data_const_seg.sections.items.len);
+ try data_const_seg.addSection(self.allocator, "__mod_term_func", .{
+ .flags = macho.S_MOD_TERM_FUNC_POINTERS,
+ });
+ }
+
break :blk .{
.seg = self.data_const_segment_cmd_index.?,
.sect = self.mod_term_func_section_index.?,
@@ -982,11 +596,25 @@ fn getMatchingSection(self: *Zld, sect: Object.Section) ?MatchingSection {
},
macho.S_ZEROFILL => {
if (mem.eql(u8, sectname, "__common")) {
+ if (self.common_section_index == null) {
+ self.common_section_index = @intCast(u16, data_seg.sections.items.len);
+ try data_seg.addSection(self.allocator, "__common", .{
+ .flags = macho.S_ZEROFILL,
+ });
+ }
+
break :blk .{
.seg = self.data_segment_cmd_index.?,
.sect = self.common_section_index.?,
};
} else {
+ if (self.bss_section_index == null) {
+ self.bss_section_index = @intCast(u16, data_seg.sections.items.len);
+ try data_seg.addSection(self.allocator, "__bss", .{
+ .flags = macho.S_ZEROFILL,
+ });
+ }
+
break :blk .{
.seg = self.data_segment_cmd_index.?,
.sect = self.bss_section_index.?,
@@ -994,18 +622,39 @@ fn getMatchingSection(self: *Zld, sect: Object.Section) ?MatchingSection {
}
},
macho.S_THREAD_LOCAL_VARIABLES => {
+ if (self.tlv_section_index == null) {
+ self.tlv_section_index = @intCast(u16, data_seg.sections.items.len);
+ try data_seg.addSection(self.allocator, "__thread_vars", .{
+ .flags = macho.S_THREAD_LOCAL_VARIABLES,
+ });
+ }
+
break :blk .{
.seg = self.data_segment_cmd_index.?,
.sect = self.tlv_section_index.?,
};
},
macho.S_THREAD_LOCAL_REGULAR => {
+ if (self.tlv_data_section_index == null) {
+ self.tlv_data_section_index = @intCast(u16, data_seg.sections.items.len);
+ try data_seg.addSection(self.allocator, "__thread_data", .{
+ .flags = macho.S_THREAD_LOCAL_REGULAR,
+ });
+ }
+
break :blk .{
.seg = self.data_segment_cmd_index.?,
.sect = self.tlv_data_section_index.?,
};
},
macho.S_THREAD_LOCAL_ZEROFILL => {
+ if (self.tlv_bss_section_index == null) {
+ self.tlv_bss_section_index = @intCast(u16, data_seg.sections.items.len);
+ try data_seg.addSection(self.allocator, "__thread_bss", .{
+ .flags = macho.S_THREAD_LOCAL_ZEROFILL,
+ });
+ }
+
break :blk .{
.seg = self.data_segment_cmd_index.?,
.sect = self.tlv_bss_section_index.?,
@@ -1013,12 +662,25 @@ fn getMatchingSection(self: *Zld, sect: Object.Section) ?MatchingSection {
},
macho.S_COALESCED => {
if (mem.eql(u8, "__TEXT", segname) and mem.eql(u8, "__eh_frame", sectname)) {
+ // TODO I believe __eh_frame is currently part of __unwind_info section
+ // in the latest ld64 output.
+ if (self.eh_frame_section_index == null) {
+ self.eh_frame_section_index = @intCast(u16, text_seg.sections.items.len);
+ try text_seg.addSection(self.allocator, "__eh_frame", .{});
+ }
+
break :blk .{
.seg = self.text_segment_cmd_index.?,
.sect = self.eh_frame_section_index.?,
};
}
+ // TODO audit this: is this the right mapping?
+ if (self.data_const_section_index == null) {
+ self.data_const_section_index = @intCast(u16, data_const_seg.sections.items.len);
+ try data_const_seg.addSection(self.allocator, "__const", .{});
+ }
+
break :blk .{
.seg = self.data_const_segment_cmd_index.?,
.sect = self.data_const_section_index.?,
@@ -1026,6 +688,13 @@ fn getMatchingSection(self: *Zld, sect: Object.Section) ?MatchingSection {
},
macho.S_REGULAR => {
if (sect.isCode()) {
+ if (self.text_section_index == null) {
+ self.text_section_index = @intCast(u16, text_seg.sections.items.len);
+ try text_seg.addSection(self.allocator, "__text", .{
+ .flags = macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS,
+ });
+ }
+
break :blk .{
.seg = self.text_segment_cmd_index.?,
.sect = self.text_section_index.?,
@@ -1033,21 +702,66 @@ fn getMatchingSection(self: *Zld, sect: Object.Section) ?MatchingSection {
}
if (sect.isDebug()) {
// TODO debug attributes
+ if (mem.eql(u8, "__LD", segname) and mem.eql(u8, "__compact_unwind", sectname)) {
+ log.debug("TODO compact unwind section: type 0x{x}, name '{s},{s}'", .{
+ sect.flags(), segname, sectname,
+ });
+ }
break :blk null;
}
if (mem.eql(u8, segname, "__TEXT")) {
if (mem.eql(u8, sectname, "__ustring")) {
+ if (self.ustring_section_index == null) {
+ self.ustring_section_index = @intCast(u16, text_seg.sections.items.len);
+ try text_seg.addSection(self.allocator, "__ustring", .{});
+ }
+
break :blk .{
.seg = self.text_segment_cmd_index.?,
.sect = self.ustring_section_index.?,
};
} else if (mem.eql(u8, sectname, "__gcc_except_tab")) {
+ if (self.gcc_except_tab_section_index == null) {
+ self.gcc_except_tab_section_index = @intCast(u16, text_seg.sections.items.len);
+ try text_seg.addSection(self.allocator, "__gcc_except_tab", .{});
+ }
+
break :blk .{
.seg = self.text_segment_cmd_index.?,
.sect = self.gcc_except_tab_section_index.?,
};
+ } else if (mem.eql(u8, sectname, "__objc_methlist")) {
+ if (self.objc_methlist_section_index == null) {
+ self.objc_methlist_section_index = @intCast(u16, text_seg.sections.items.len);
+ try text_seg.addSection(self.allocator, "__objc_methlist", .{});
+ }
+
+ break :blk .{
+ .seg = self.text_segment_cmd_index.?,
+ .sect = self.objc_methlist_section_index.?,
+ };
+ } else if (mem.eql(u8, sectname, "__rodata") or
+ mem.eql(u8, sectname, "__typelink") or
+ mem.eql(u8, sectname, "__itablink") or
+ mem.eql(u8, sectname, "__gosymtab") or
+ mem.eql(u8, sectname, "__gopclntab"))
+ {
+ if (self.data_const_section_index == null) {
+ self.data_const_section_index = @intCast(u16, data_const_seg.sections.items.len);
+ try data_const_seg.addSection(self.allocator, "__const", .{});
+ }
+
+ break :blk .{
+ .seg = self.data_const_segment_cmd_index.?,
+ .sect = self.data_const_section_index.?,
+ };
} else {
+ if (self.text_const_section_index == null) {
+ self.text_const_section_index = @intCast(u16, text_seg.sections.items.len);
+ try text_seg.addSection(self.allocator, "__const", .{});
+ }
+
break :blk .{
.seg = self.text_segment_cmd_index.?,
.sect = self.text_const_section_index.?,
@@ -1056,6 +770,11 @@ fn getMatchingSection(self: *Zld, sect: Object.Section) ?MatchingSection {
}
if (mem.eql(u8, segname, "__DATA_CONST")) {
+ if (self.data_const_section_index == null) {
+ self.data_const_section_index = @intCast(u16, data_const_seg.sections.items.len);
+ try data_const_seg.addSection(self.allocator, "__const", .{});
+ }
+
break :blk .{
.seg = self.data_const_segment_cmd_index.?,
.sect = self.data_const_section_index.?,
@@ -1064,15 +783,92 @@ fn getMatchingSection(self: *Zld, sect: Object.Section) ?MatchingSection {
if (mem.eql(u8, segname, "__DATA")) {
if (mem.eql(u8, sectname, "__const")) {
+ if (self.data_const_section_index == null) {
+ self.data_const_section_index = @intCast(u16, data_const_seg.sections.items.len);
+ try data_const_seg.addSection(self.allocator, "__const", .{});
+ }
+
break :blk .{
.seg = self.data_const_segment_cmd_index.?,
.sect = self.data_const_section_index.?,
};
+ } else if (mem.eql(u8, sectname, "__cfstring")) {
+ if (self.objc_cfstring_section_index == null) {
+ self.objc_cfstring_section_index = @intCast(u16, data_const_seg.sections.items.len);
+ try data_const_seg.addSection(self.allocator, "__cfstring", .{});
+ }
+
+ break :blk .{
+ .seg = self.data_const_segment_cmd_index.?,
+ .sect = self.objc_cfstring_section_index.?,
+ };
+ } else if (mem.eql(u8, sectname, "__objc_classlist")) {
+ if (self.objc_classlist_section_index == null) {
+ self.objc_classlist_section_index = @intCast(u16, data_const_seg.sections.items.len);
+ try data_const_seg.addSection(self.allocator, "__objc_classlist", .{});
+ }
+
+ break :blk .{
+ .seg = self.data_const_segment_cmd_index.?,
+ .sect = self.objc_classlist_section_index.?,
+ };
+ } else if (mem.eql(u8, sectname, "__objc_imageinfo")) {
+ if (self.objc_imageinfo_section_index == null) {
+ self.objc_imageinfo_section_index = @intCast(u16, data_const_seg.sections.items.len);
+ try data_const_seg.addSection(self.allocator, "__objc_imageinfo", .{});
+ }
+
+ break :blk .{
+ .seg = self.data_const_segment_cmd_index.?,
+ .sect = self.objc_imageinfo_section_index.?,
+ };
+ } else if (mem.eql(u8, sectname, "__objc_const")) {
+ if (self.objc_const_section_index == null) {
+ self.objc_const_section_index = @intCast(u16, data_seg.sections.items.len);
+ try data_seg.addSection(self.allocator, "__objc_const", .{});
+ }
+
+ break :blk .{
+ .seg = self.data_segment_cmd_index.?,
+ .sect = self.objc_const_section_index.?,
+ };
+ } else if (mem.eql(u8, sectname, "__objc_classrefs")) {
+ if (self.objc_classrefs_section_index == null) {
+ self.objc_classrefs_section_index = @intCast(u16, data_seg.sections.items.len);
+ try data_seg.addSection(self.allocator, "__objc_classrefs", .{});
+ }
+
+ break :blk .{
+ .seg = self.data_segment_cmd_index.?,
+ .sect = self.objc_classrefs_section_index.?,
+ };
+ } else if (mem.eql(u8, sectname, "__objc_data")) {
+ if (self.objc_data_section_index == null) {
+ self.objc_data_section_index = @intCast(u16, data_seg.sections.items.len);
+ try data_seg.addSection(self.allocator, "__objc_data", .{});
+ }
+
+ break :blk .{
+ .seg = self.data_segment_cmd_index.?,
+ .sect = self.objc_data_section_index.?,
+ };
+ } else {
+ if (self.data_section_index == null) {
+ self.data_section_index = @intCast(u16, data_seg.sections.items.len);
+ try data_seg.addSection(self.allocator, "__data", .{});
+ }
+
+ break :blk .{
+ .seg = self.data_segment_cmd_index.?,
+ .sect = self.data_section_index.?,
+ };
}
- break :blk .{
- .seg = self.data_segment_cmd_index.?,
- .sect = self.data_section_index.?,
- };
+ }
+
+ if (mem.eql(u8, "__LLVM", segname) and mem.eql(u8, "__asm", sectname)) {
+ log.debug("TODO LLVM asm section: type 0x{x}, name '{s},{s}'", .{
+ sect.flags(), segname, sectname,
+ });
}
break :blk null;
@@ -1107,6 +903,9 @@ fn sortSections(self: *Zld) !void {
&self.cstring_section_index,
&self.ustring_section_index,
&self.text_const_section_index,
+ &self.objc_methname_section_index,
+ &self.objc_methtype_section_index,
+ &self.objc_classname_section_index,
&self.eh_frame_section_index,
};
for (indices) |maybe_index| {
@@ -1132,6 +931,9 @@ fn sortSections(self: *Zld) !void {
&self.mod_init_func_section_index,
&self.mod_term_func_section_index,
&self.data_const_section_index,
+ &self.objc_cfstring_section_index,
+ &self.objc_classlist_section_index,
+ &self.objc_imageinfo_section_index,
};
for (indices) |maybe_index| {
const new_index: u16 = if (maybe_index.*) |index| blk: {
@@ -1154,6 +956,10 @@ fn sortSections(self: *Zld) !void {
// __DATA segment
const indices = &[_]*?u16{
&self.la_symbol_ptr_section_index,
+ &self.objc_const_section_index,
+ &self.objc_selrefs_section_index,
+ &self.objc_classrefs_section_index,
+ &self.objc_data_section_index,
&self.data_section_index,
&self.tlv_section_index,
&self.tlv_data_section_index,
@@ -1294,7 +1100,6 @@ fn allocateLinkeditSegment(self: *Zld) void {
}
fn allocateSegment(self: *Zld, index: u16, offset: u64) !void {
- const base_vmaddr = self.load_commands.items[self.pagezero_segment_cmd_index.?].Segment.inner.vmsize;
const seg = &self.load_commands.items[index].Segment;
// Allocate the sections according to their alignment at the beginning of the segment.
@@ -1375,7 +1180,7 @@ fn allocateTentativeSymbols(self: *Zld) !void {
}
// Convert tentative definitions into regular symbols.
- for (self.tentatives.values()) |sym, i| {
+ for (self.tentatives.values()) |sym| {
const tent = sym.cast(Symbol.Tentative) orelse unreachable;
const reg = try self.allocator.create(Symbol.Regular);
errdefer self.allocator.destroy(reg);
@@ -1420,6 +1225,31 @@ fn allocateTentativeSymbols(self: *Zld) !void {
}
}
+fn allocateProxyBindAddresses(self: *Zld) !void {
+ for (self.objects.items) |object| {
+ for (object.sections.items) |sect| {
+ const relocs = sect.relocs orelse continue;
+
+ for (relocs) |rel| {
+ if (rel.@"type" != .unsigned) continue; // GOT is currently special-cased
+ if (rel.target != .symbol) continue;
+
+ const sym = rel.target.symbol.getTopmostAlias();
+ if (sym.cast(Symbol.Proxy)) |proxy| {
+ const target_map = sect.target_map orelse continue;
+ const target_seg = self.load_commands.items[target_map.segment_id].Segment;
+ const target_sect = target_seg.sections.items[target_map.section_id];
+
+ try proxy.bind_info.append(self.allocator, .{
+ .segment_id = target_map.segment_id,
+ .address = target_sect.addr + target_map.offset + rel.offset,
+ });
+ }
+ }
+ }
+ }
+}
+
fn writeStubHelperCommon(self: *Zld) !void {
const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment;
const stub_helper = &text_segment.sections.items[self.stub_helper_section_index.?];
@@ -1427,7 +1257,6 @@ fn writeStubHelperCommon(self: *Zld) !void {
const got = &data_const_segment.sections.items[self.got_section_index.?];
const data_segment = &self.load_commands.items[self.data_segment_cmd_index.?].Segment;
const data = &data_segment.sections.items[self.data_section_index.?];
- const la_symbol_ptr = data_segment.sections.items[self.la_symbol_ptr_section_index.?];
self.stub_helper_stubs_start_off = blk: {
switch (self.arch.?) {
@@ -1761,7 +1590,7 @@ fn resolveSymbolsInObject(self: *Zld, object: *Object) !void {
t_sym.alias = sym;
sym_ptr.* = sym;
- } else if (sym.cast(Symbol.Unresolved)) |und| {
+ } else if (sym.cast(Symbol.Unresolved)) |_| {
if (self.globals.get(sym.name)) |g_sym| {
sym.alias = g_sym;
continue;
@@ -1816,9 +1645,8 @@ fn resolveSymbols(self: *Zld) !void {
next_sym += 1;
}
}
+
// Third pass, resolve symbols in dynamic libraries.
- // TODO Implement libSystem as a hard-coded library, or ship with
- // a libSystem.B.tbd definition file?
var unresolved = std.ArrayList(*Symbol).init(self.allocator);
defer unresolved.deinit();
@@ -1826,61 +1654,84 @@ fn resolveSymbols(self: *Zld) !void {
for (self.unresolved.values()) |value| {
unresolved.appendAssumeCapacity(value);
}
- self.unresolved.clearAndFree(self.allocator);
+ self.unresolved.clearRetainingCapacity();
- var has_undefined = false;
- while (unresolved.popOrNull()) |undef| {
- var found = false;
- for (self.dylibs.items) |dylib| {
- const proxy = dylib.symbols.get(undef.name) orelse continue;
- try self.imports.putNoClobber(self.allocator, proxy.name, proxy);
- undef.alias = proxy;
- found = true;
- }
+ var referenced = std.AutoHashMap(*Dylib, void).init(self.allocator);
+ defer referenced.deinit();
- if (!found) {
- // TODO we currently hardcode all unresolved symbols to libSystem
- const proxy = try self.allocator.create(Symbol.Proxy);
- errdefer self.allocator.destroy(proxy);
+ loop: while (unresolved.popOrNull()) |undef| {
+ const proxy = self.imports.get(undef.name) orelse outer: {
+ const proxy = inner: {
+ for (self.dylibs.items) |dylib, i| {
+ const proxy = (try dylib.createProxy(undef.name)) orelse continue;
+ if (self.libsystem_dylib_index.? != @intCast(u16, i)) { // LibSystem gets load command seperately.
+ try referenced.put(dylib, {});
+ }
+ break :inner proxy;
+ }
+ if (mem.eql(u8, undef.name, "___dso_handle")) {
+ // TODO this is just a temp patch until I work out what to actually
+ // do with ___dso_handle and __mh_execute_header symbols which are
+ // synthetically created by the linker on macOS.
+ const name = try self.allocator.dupe(u8, undef.name);
+ const proxy = try self.allocator.create(Symbol.Proxy);
+ errdefer self.allocator.destroy(proxy);
+ proxy.* = .{
+ .base = .{
+ .@"type" = .proxy,
+ .name = name,
+ },
+ .file = null,
+ };
+ break :inner &proxy.base;
+ }
- proxy.* = .{
- .base = .{
- .@"type" = .proxy,
- .name = try self.allocator.dupe(u8, undef.name),
- },
- .dylib = null, // TODO null means libSystem
+ self.unresolved.putAssumeCapacityNoClobber(undef.name, undef);
+ continue :loop;
};
- try self.imports.putNoClobber(self.allocator, proxy.base.name, &proxy.base);
- undef.alias = &proxy.base;
+ try self.imports.putNoClobber(self.allocator, proxy.name, proxy);
+ break :outer proxy;
+ };
+ undef.alias = proxy;
+ }
+
+ // Add LC_LOAD_DYLIB load command for each referenced dylib/stub.
+ var it = referenced.iterator();
+ while (it.next()) |entry| {
+ const dylib = entry.key_ptr.*;
+ dylib.ordinal = self.next_dylib_ordinal;
+ const dylib_id = dylib.id orelse unreachable;
+ var dylib_cmd = try createLoadDylibCommand(
+ self.allocator,
+ dylib_id.name,
+ dylib_id.timestamp,
+ dylib_id.current_version,
+ dylib_id.compatibility_version,
+ );
+ errdefer dylib_cmd.deinit(self.allocator);
+ try self.load_commands.append(self.allocator, .{ .Dylib = dylib_cmd });
+ self.next_dylib_ordinal += 1;
+ }
- // log.err("undefined reference to symbol '{s}'", .{undef.name});
- // log.err(" | referenced in {s}", .{
- // undef.cast(Symbol.Unresolved).?.file.name.?,
- // });
- // has_undefined = true;
+ if (self.unresolved.count() > 0) {
+ for (self.unresolved.values()) |undef| {
+ log.err("undefined reference to symbol '{s}'", .{undef.name});
+ log.err(" | referenced in {s}", .{
+ undef.cast(Symbol.Unresolved).?.file.name.?,
+ });
}
- }
- if (has_undefined) return error.UndefinedSymbolReference;
+ return error.UndefinedSymbolReference;
+ }
// Finally put dyld_stub_binder as an Import
- const dyld_stub_binder = try self.allocator.create(Symbol.Proxy);
- errdefer self.allocator.destroy(dyld_stub_binder);
-
- dyld_stub_binder.* = .{
- .base = .{
- .@"type" = .proxy,
- .name = try self.allocator.dupe(u8, "dyld_stub_binder"),
- },
- .dylib = null, // TODO null means libSystem
+ const libsystem_dylib = self.dylibs.items[self.libsystem_dylib_index.?];
+ const proxy = (try libsystem_dylib.createProxy("dyld_stub_binder")) orelse {
+ log.err("undefined reference to symbol 'dyld_stub_binder'", .{});
+ return error.UndefinedSymbolReference;
};
-
- try self.imports.putNoClobber(
- self.allocator,
- dyld_stub_binder.base.name,
- &dyld_stub_binder.base,
- );
+ try self.imports.putNoClobber(self.allocator, proxy.name, proxy);
}
fn resolveStubsAndGotEntries(self: *Zld) !void {
@@ -1891,6 +1742,7 @@ fn resolveStubsAndGotEntries(self: *Zld) !void {
const relocs = sect.relocs orelse continue;
for (relocs) |rel| {
switch (rel.@"type") {
+ .unsigned => continue,
.got_page, .got_page_off, .got_load, .got, .pointer_to_got => {
const sym = rel.target.symbol.getTopmostAlias();
if (sym.got_index != null) continue;
@@ -1976,29 +1828,52 @@ fn resolveRelocsAndWriteSections(self: *Zld) !void {
args.source_target_sect_addr = source_sect.inner.addr;
}
- rebases: {
- var hit: bool = false;
- if (target_map.segment_id == self.data_segment_cmd_index.?) {
- if (self.data_section_index) |index| {
- if (index == target_map.section_id) hit = true;
+ const flags = @truncate(u8, target_sect.flags & 0xff);
+ const should_rebase = rebase: {
+ if (!unsigned.is_64bit) break :rebase false;
+
+ // TODO actually, a check similar to what dyld is doing, that is, verifying
+ // that the segment is writable should be enough here.
+ const is_right_segment = blk: {
+ if (self.data_segment_cmd_index) |idx| {
+ if (target_map.segment_id == idx) {
+ break :blk true;
+ }
+ }
+ if (self.data_const_segment_cmd_index) |idx| {
+ if (target_map.segment_id == idx) {
+ break :blk true;
+ }
}
+ break :blk false;
+ };
+
+ if (!is_right_segment) break :rebase false;
+ if (flags != macho.S_LITERAL_POINTERS and
+ flags != macho.S_REGULAR)
+ {
+ break :rebase false;
}
- if (target_map.segment_id == self.data_const_segment_cmd_index.?) {
- if (self.data_const_section_index) |index| {
- if (index == target_map.section_id) hit = true;
+ if (rel.target == .symbol) {
+ const final = rel.target.symbol.getTopmostAlias();
+ if (final.cast(Symbol.Proxy)) |_| {
+ break :rebase false;
}
}
- if (!hit) break :rebases;
+ break :rebase true;
+ };
+ if (should_rebase) {
try self.local_rebases.append(self.allocator, .{
.offset = source_addr - target_seg.inner.vmaddr,
.segment_id = target_map.segment_id,
});
}
+
// TLV is handled via a separate offset mechanism.
// Calculate the offset to the initializer.
- if (target_sect.flags == macho.S_THREAD_LOCAL_VARIABLES) tlv: {
+ if (flags == macho.S_THREAD_LOCAL_VARIABLES) tlv: {
// TODO we don't want to save offset to tlv_bootstrap
if (mem.eql(u8, rel.target.symbol.name, "__tlv_bootstrap")) break :tlv;
@@ -2095,7 +1970,13 @@ fn relocTargetAddr(self: *Zld, object: *const Object, target: reloc.Relocation.T
const segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment;
const stubs = segment.sections.items[self.stubs_section_index.?];
const stubs_index = proxy.base.stubs_index orelse {
- log.err("expected stubs index when relocating symbol '{s}'", .{final.name});
+ if (proxy.bind_info.items.len > 0) {
+ break :blk 0; // Dynamically bound by dyld.
+ }
+ log.err(
+ "expected stubs index or dynamic bind address when relocating symbol '{s}'",
+ .{final.name},
+ );
log.err("this is an internal linker error", .{});
return error.FailedToResolveRelocationTarget;
};
@@ -2127,18 +2008,8 @@ fn populateMetadata(self: *Zld) !void {
if (self.pagezero_segment_cmd_index == null) {
self.pagezero_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
try self.load_commands.append(self.allocator, .{
- .Segment = SegmentCommand.empty(.{
- .cmd = macho.LC_SEGMENT_64,
- .cmdsize = @sizeOf(macho.segment_command_64),
- .segname = makeStaticString("__PAGEZERO"),
- .vmaddr = 0,
+ .Segment = SegmentCommand.empty("__PAGEZERO", .{
.vmsize = 0x100000000, // size always set to 4GB
- .fileoff = 0,
- .filesize = 0,
- .maxprot = 0,
- .initprot = 0,
- .nsects = 0,
- .flags = 0,
}),
});
}
@@ -2146,18 +2017,10 @@ fn populateMetadata(self: *Zld) !void {
if (self.text_segment_cmd_index == null) {
self.text_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
try self.load_commands.append(self.allocator, .{
- .Segment = SegmentCommand.empty(.{
- .cmd = macho.LC_SEGMENT_64,
- .cmdsize = @sizeOf(macho.segment_command_64),
- .segname = makeStaticString("__TEXT"),
+ .Segment = SegmentCommand.empty("__TEXT", .{
.vmaddr = 0x100000000, // always starts at 4GB
- .vmsize = 0,
- .fileoff = 0,
- .filesize = 0,
.maxprot = macho.VM_PROT_READ | macho.VM_PROT_EXECUTE,
.initprot = macho.VM_PROT_READ | macho.VM_PROT_EXECUTE,
- .nsects = 0,
- .flags = 0,
}),
});
}
@@ -2170,19 +2033,9 @@ fn populateMetadata(self: *Zld) !void {
.aarch64 => 2,
else => unreachable, // unhandled architecture type
};
- try text_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__text"),
- .segname = makeStaticString("__TEXT"),
- .addr = 0,
- .size = 0,
- .offset = 0,
+ try text_seg.addSection(self.allocator, "__text", .{
.@"align" = alignment,
- .reloff = 0,
- .nreloc = 0,
.flags = macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
});
}
@@ -2199,19 +2052,10 @@ fn populateMetadata(self: *Zld) !void {
.aarch64 => 3 * @sizeOf(u32),
else => unreachable, // unhandled architecture type
};
- try text_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__stubs"),
- .segname = makeStaticString("__TEXT"),
- .addr = 0,
- .size = 0,
- .offset = 0,
+ try text_seg.addSection(self.allocator, "__stubs", .{
.@"align" = alignment,
- .reloff = 0,
- .nreloc = 0,
.flags = macho.S_SYMBOL_STUBS | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS,
- .reserved1 = 0,
.reserved2 = stub_size,
- .reserved3 = 0,
});
}
@@ -2228,37 +2072,19 @@ fn populateMetadata(self: *Zld) !void {
.aarch64 => 6 * @sizeOf(u32),
else => unreachable,
};
- try text_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__stub_helper"),
- .segname = makeStaticString("__TEXT"),
- .addr = 0,
+ try text_seg.addSection(self.allocator, "__stub_helper", .{
.size = stub_helper_size,
- .offset = 0,
.@"align" = alignment,
- .reloff = 0,
- .nreloc = 0,
.flags = macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
});
}
if (self.data_const_segment_cmd_index == null) {
self.data_const_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
try self.load_commands.append(self.allocator, .{
- .Segment = SegmentCommand.empty(.{
- .cmd = macho.LC_SEGMENT_64,
- .cmdsize = @sizeOf(macho.segment_command_64),
- .segname = makeStaticString("__DATA_CONST"),
- .vmaddr = 0,
- .vmsize = 0,
- .fileoff = 0,
- .filesize = 0,
+ .Segment = SegmentCommand.empty("__DATA_CONST", .{
.maxprot = macho.VM_PROT_READ | macho.VM_PROT_WRITE,
.initprot = macho.VM_PROT_READ | macho.VM_PROT_WRITE,
- .nsects = 0,
- .flags = 0,
}),
});
}
@@ -2266,37 +2092,18 @@ fn populateMetadata(self: *Zld) !void {
if (self.got_section_index == null) {
const data_const_seg = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment;
self.got_section_index = @intCast(u16, data_const_seg.sections.items.len);
- try data_const_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__got"),
- .segname = makeStaticString("__DATA_CONST"),
- .addr = 0,
- .size = 0,
- .offset = 0,
+ try data_const_seg.addSection(self.allocator, "__got", .{
.@"align" = 3, // 2^3 = @sizeOf(u64)
- .reloff = 0,
- .nreloc = 0,
.flags = macho.S_NON_LAZY_SYMBOL_POINTERS,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
});
}
if (self.data_segment_cmd_index == null) {
self.data_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
try self.load_commands.append(self.allocator, .{
- .Segment = SegmentCommand.empty(.{
- .cmd = macho.LC_SEGMENT_64,
- .cmdsize = @sizeOf(macho.segment_command_64),
- .segname = makeStaticString("__DATA"),
- .vmaddr = 0,
- .vmsize = 0,
- .fileoff = 0,
- .filesize = 0,
+ .Segment = SegmentCommand.empty("__DATA", .{
.maxprot = macho.VM_PROT_READ | macho.VM_PROT_WRITE,
.initprot = macho.VM_PROT_READ | macho.VM_PROT_WRITE,
- .nsects = 0,
- .flags = 0,
}),
});
}
@@ -2304,56 +2111,26 @@ fn populateMetadata(self: *Zld) !void {
if (self.la_symbol_ptr_section_index == null) {
const data_seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment;
self.la_symbol_ptr_section_index = @intCast(u16, data_seg.sections.items.len);
- try data_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__la_symbol_ptr"),
- .segname = makeStaticString("__DATA"),
- .addr = 0,
- .size = 0,
- .offset = 0,
+ try data_seg.addSection(self.allocator, "__la_symbol_ptr", .{
.@"align" = 3, // 2^3 = @sizeOf(u64)
- .reloff = 0,
- .nreloc = 0,
.flags = macho.S_LAZY_SYMBOL_POINTERS,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
});
}
if (self.data_section_index == null) {
const data_seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment;
self.data_section_index = @intCast(u16, data_seg.sections.items.len);
- try data_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__data"),
- .segname = makeStaticString("__DATA"),
- .addr = 0,
- .size = 0,
- .offset = 0,
+ try data_seg.addSection(self.allocator, "__data", .{
.@"align" = 3, // 2^3 = @sizeOf(u64)
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_REGULAR,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
});
}
if (self.linkedit_segment_cmd_index == null) {
self.linkedit_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
try self.load_commands.append(self.allocator, .{
- .Segment = SegmentCommand.empty(.{
- .cmd = macho.LC_SEGMENT_64,
- .cmdsize = @sizeOf(macho.segment_command_64),
- .segname = makeStaticString("__LINKEDIT"),
- .vmaddr = 0,
- .vmsize = 0,
- .fileoff = 0,
- .filesize = 0,
+ .Segment = SegmentCommand.empty("__LINKEDIT", .{
.maxprot = macho.VM_PROT_READ,
.initprot = macho.VM_PROT_READ,
- .nsects = 0,
- .flags = 0,
}),
});
}
@@ -2439,15 +2216,6 @@ fn populateMetadata(self: *Zld) !void {
try self.load_commands.append(self.allocator, .{ .Dylinker = dylinker_cmd });
}
- if (self.libsystem_cmd_index == null) {
- self.libsystem_cmd_index = @intCast(u16, self.load_commands.items.len);
-
- var dylib_cmd = try createLoadDylibCommand(self.allocator, mem.spanZ(LIB_SYSTEM_PATH), 2, 0, 0);
- errdefer dylib_cmd.deinit(self.allocator);
-
- try self.load_commands.append(self.allocator, .{ .Dylib = dylib_cmd });
- }
-
if (self.main_cmd_index == null) {
self.main_cmd_index = @intCast(u16, self.load_commands.items.len);
try self.load_commands.append(self.allocator, .{
@@ -2481,24 +2249,28 @@ fn populateMetadata(self: *Zld) !void {
std.crypto.random.bytes(&uuid_cmd.uuid);
try self.load_commands.append(self.allocator, .{ .Uuid = uuid_cmd });
}
+}
- if (self.code_signature_cmd_index == null and self.arch.? == .aarch64) {
- self.code_signature_cmd_index = @intCast(u16, self.load_commands.items.len);
+fn addDataInCodeLC(self: *Zld) !void {
+ if (self.data_in_code_cmd_index == null) {
+ self.data_in_code_cmd_index = @intCast(u16, self.load_commands.items.len);
try self.load_commands.append(self.allocator, .{
.LinkeditData = .{
- .cmd = macho.LC_CODE_SIGNATURE,
+ .cmd = macho.LC_DATA_IN_CODE,
.cmdsize = @sizeOf(macho.linkedit_data_command),
.dataoff = 0,
.datasize = 0,
},
});
}
+}
- if (self.data_in_code_cmd_index == null and self.arch.? == .x86_64) {
- self.data_in_code_cmd_index = @intCast(u16, self.load_commands.items.len);
+fn addCodeSignatureLC(self: *Zld) !void {
+ if (self.code_signature_cmd_index == null and self.arch.? == .aarch64) {
+ self.code_signature_cmd_index = @intCast(u16, self.load_commands.items.len);
try self.load_commands.append(self.allocator, .{
.LinkeditData = .{
- .cmd = macho.LC_DATA_IN_CODE,
+ .cmd = macho.LC_CODE_SIGNATURE,
.cmdsize = @sizeOf(macho.linkedit_data_command),
.dataoff = 0,
.datasize = 0,
@@ -2511,7 +2283,7 @@ fn addRpaths(self: *Zld, rpaths: []const []const u8) !void {
for (rpaths) |rpath| {
const cmdsize = @intCast(u32, mem.alignForwardGeneric(
u64,
- @sizeOf(macho.rpath_command) + rpath.len,
+ @sizeOf(macho.rpath_command) + rpath.len + 1,
@sizeOf(u64),
));
var rpath_cmd = emptyGenericCommandWithData(macho.rpath_command{
@@ -2594,9 +2366,7 @@ fn flush(self: *Zld) !void {
try self.writeBindInfoTable();
try self.writeLazyBindInfoTable();
try self.writeExportInfo();
- if (self.arch.? == .x86_64) {
- try self.writeDataInCode();
- }
+ try self.writeDataInCode();
{
const seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment;
@@ -2654,7 +2424,6 @@ fn setEntryPoint(self: *Zld) !void {
// TODO we should respect the -entry flag passed in by the user to set a custom
// entrypoint. For now, assume default of `_main`.
const seg = self.load_commands.items[self.text_segment_cmd_index.?].Segment;
- const text = seg.sections.items[self.text_section_index.?];
const sym = self.globals.get("_main") orelse return error.MissingMainEntrypoint;
const entry_sym = sym.cast(Symbol.Regular) orelse unreachable;
const ec = &self.load_commands.items[self.main_cmd_index.?].Main;
@@ -2749,14 +2518,24 @@ fn writeBindInfoTable(self: *Zld) !void {
for (self.got_entries.items) |sym| {
if (sym.cast(Symbol.Proxy)) |proxy| {
- const dylib_ordinal = ordinal: {
- const dylib = proxy.dylib orelse break :ordinal 1; // TODO embedded libSystem
- break :ordinal dylib.ordinal.?;
- };
try pointers.append(.{
.offset = base_offset + proxy.base.got_index.? * @sizeOf(u64),
.segment_id = segment_id,
- .dylib_ordinal = dylib_ordinal,
+ .dylib_ordinal = proxy.dylibOrdinal(),
+ .name = proxy.base.name,
+ });
+ }
+ }
+ }
+
+ for (self.imports.values()) |sym| {
+ if (sym.cast(Symbol.Proxy)) |proxy| {
+ for (proxy.bind_info.items) |info| {
+ const seg = self.load_commands.items[info.segment_id].Segment;
+ try pointers.append(.{
+ .offset = info.address - seg.inner.vmaddr,
+ .segment_id = info.segment_id,
+ .dylib_ordinal = proxy.dylibOrdinal(),
.name = proxy.base.name,
});
}
@@ -2771,15 +2550,11 @@ fn writeBindInfoTable(self: *Zld) !void {
const sym = self.imports.get("__tlv_bootstrap") orelse unreachable;
const proxy = sym.cast(Symbol.Proxy) orelse unreachable;
- const dylib_ordinal = ordinal: {
- const dylib = proxy.dylib orelse break :ordinal 1; // TODO embedded libSystem
- break :ordinal dylib.ordinal.?;
- };
try pointers.append(.{
.offset = base_offset,
.segment_id = segment_id,
- .dylib_ordinal = dylib_ordinal,
+ .dylib_ordinal = proxy.dylibOrdinal(),
.name = proxy.base.name,
});
}
@@ -2816,15 +2591,10 @@ fn writeLazyBindInfoTable(self: *Zld) !void {
for (self.stubs.items) |sym| {
const proxy = sym.cast(Symbol.Proxy) orelse unreachable;
- const dylib_ordinal = ordinal: {
- const dylib = proxy.dylib orelse break :ordinal 1; // TODO embedded libSystem
- break :ordinal dylib.ordinal.?;
- };
-
pointers.appendAssumeCapacity(.{
.offset = base_offset + sym.stubs_index.? * @sizeOf(u64),
.segment_id = segment_id,
- .dylib_ordinal = dylib_ordinal,
+ .dylib_ordinal = proxy.dylibOrdinal(),
.name = sym.name,
});
}
@@ -2862,7 +2632,6 @@ fn populateLazyBindOffsetsInStubHelper(self: *Zld, buffer: []const u8) !void {
error.EndOfStream => break,
else => return err,
};
- const imm: u8 = inst & macho.BIND_IMMEDIATE_MASK;
const opcode: u8 = inst & macho.BIND_OPCODE_MASK;
switch (opcode) {
@@ -2959,6 +2728,7 @@ fn writeDebugInfo(self: *Zld) !void {
for (self.objects.items) |object| {
const tu_path = object.tu_path orelse continue;
const tu_mtime = object.tu_mtime orelse continue;
+ _ = tu_mtime;
const dirname = std.fs.path.dirname(tu_path) orelse "./";
// Current dir
try stabs.append(.{
@@ -3131,15 +2901,12 @@ fn writeSymbolTable(self: *Zld) !void {
defer undefs.deinit();
for (self.imports.values()) |sym| {
- const ordinal = ordinal: {
- const dylib = sym.cast(Symbol.Proxy).?.dylib orelse break :ordinal 1; // TODO handle libSystem
- break :ordinal dylib.ordinal.?;
- };
+ const proxy = sym.cast(Symbol.Proxy) orelse unreachable;
try undefs.append(.{
.n_strx = try self.makeString(sym.name),
.n_type = macho.N_UNDF | macho.N_EXT,
.n_sect = 0,
- .n_desc = (ordinal * macho.N_SYMBOL_RESOLVER) | macho.REFERENCE_FLAG_UNDEFINED_NON_LAZY,
+ .n_desc = (proxy.dylibOrdinal() * macho.N_SYMBOL_RESOLVER) | macho.REFERENCE_FLAG_UNDEFINED_NON_LAZY,
.n_value = 0,
});
}
@@ -3382,13 +3149,6 @@ fn writeHeader(self: *Zld) !void {
try self.file.?.pwriteAll(mem.asBytes(&header), 0);
}
-pub fn makeStaticString(bytes: []const u8) [16]u8 {
- var buf = [_]u8{0} ** 16;
- assert(bytes.len <= buf.len);
- mem.copy(u8, &buf, bytes);
- return buf;
-}
-
fn makeString(self: *Zld, bytes: []const u8) !u32 {
if (self.strtab_dir.get(bytes)) |offset| {
log.debug("reusing '{s}' from string table at offset 0x{x}", .{ bytes, offset });
diff --git a/src/link/MachO/bind.zig b/src/link/MachO/bind.zig
index d234fa8242..402e74d776 100644
--- a/src/link/MachO/bind.zig
+++ b/src/link/MachO/bind.zig
@@ -10,6 +10,7 @@ pub const Pointer = struct {
};
pub fn pointerCmp(context: void, a: Pointer, b: Pointer) bool {
+ _ = context;
if (a.segment_id < b.segment_id) return true;
if (a.segment_id == b.segment_id) {
return a.offset < b.offset;
diff --git a/src/link/MachO/commands.zig b/src/link/MachO/commands.zig
index 8bceb64f1e..6958b8d1e6 100644
--- a/src/link/MachO/commands.zig
+++ b/src/link/MachO/commands.zig
@@ -9,7 +9,6 @@ const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const MachO = @import("../MachO.zig");
-const makeStaticString = MachO.makeStaticString;
const padToIdeal = MachO.padToIdeal;
pub const LoadCommand = union(enum) {
@@ -187,11 +186,70 @@ pub const SegmentCommand = struct {
inner: macho.segment_command_64,
sections: std.ArrayListUnmanaged(macho.section_64) = .{},
- pub fn empty(inner: macho.segment_command_64) SegmentCommand {
- return .{ .inner = inner };
+ const SegmentOptions = struct {
+ cmdsize: u32 = @sizeOf(macho.segment_command_64),
+ vmaddr: u64 = 0,
+ vmsize: u64 = 0,
+ fileoff: u64 = 0,
+ filesize: u64 = 0,
+ maxprot: macho.vm_prot_t = macho.VM_PROT_NONE,
+ initprot: macho.vm_prot_t = macho.VM_PROT_NONE,
+ nsects: u32 = 0,
+ flags: u32 = 0,
+ };
+
+ pub fn empty(comptime segname: []const u8, opts: SegmentOptions) SegmentCommand {
+ return .{
+ .inner = .{
+ .cmd = macho.LC_SEGMENT_64,
+ .cmdsize = opts.cmdsize,
+ .segname = makeStaticString(segname),
+ .vmaddr = opts.vmaddr,
+ .vmsize = opts.vmsize,
+ .fileoff = opts.fileoff,
+ .filesize = opts.filesize,
+ .maxprot = opts.maxprot,
+ .initprot = opts.initprot,
+ .nsects = opts.nsects,
+ .flags = opts.flags,
+ },
+ };
}
- pub fn addSection(self: *SegmentCommand, alloc: *Allocator, section: macho.section_64) !void {
+ const SectionOptions = struct {
+ addr: u64 = 0,
+ size: u64 = 0,
+ offset: u32 = 0,
+ @"align": u32 = 0,
+ reloff: u32 = 0,
+ nreloc: u32 = 0,
+ flags: u32 = macho.S_REGULAR,
+ reserved1: u32 = 0,
+ reserved2: u32 = 0,
+ reserved3: u32 = 0,
+ };
+
+ pub fn addSection(
+ self: *SegmentCommand,
+ alloc: *Allocator,
+ comptime sectname: []const u8,
+ opts: SectionOptions,
+ ) !void {
+ var section = macho.section_64{
+ .sectname = makeStaticString(sectname),
+ .segname = undefined,
+ .addr = opts.addr,
+ .size = opts.size,
+ .offset = opts.offset,
+ .@"align" = opts.@"align",
+ .reloff = opts.reloff,
+ .nreloc = opts.nreloc,
+ .flags = opts.flags,
+ .reserved1 = opts.reserved1,
+ .reserved2 = opts.reserved2,
+ .reserved3 = opts.reserved3,
+ };
+ mem.copy(u8, &section.segname, &self.inner.segname);
try self.sections.append(alloc, section);
self.inner.cmdsize += @sizeOf(macho.section_64);
self.inner.nsects += 1;
@@ -338,6 +396,13 @@ pub fn createLoadDylibCommand(
return dylib_cmd;
}
+fn makeStaticString(bytes: []const u8) [16]u8 {
+ var buf = [_]u8{0} ** 16;
+ assert(bytes.len <= buf.len);
+ mem.copy(u8, &buf, bytes);
+ return buf;
+}
+
fn testRead(allocator: *Allocator, buffer: []const u8, expected: anytype) !void {
var stream = io.fixedBufferStream(buffer);
var given = try LoadCommand.read(allocator, stream.reader());
diff --git a/src/link/MachO/reloc/x86_64.zig b/src/link/MachO/reloc/x86_64.zig
index 2a457fdea2..6df68b6b3e 100644
--- a/src/link/MachO/reloc/x86_64.zig
+++ b/src/link/MachO/reloc/x86_64.zig
@@ -175,7 +175,6 @@ pub const Parser = struct {
const rel_type = @intToEnum(macho.reloc_type_x86_64, rel.r_type);
const target = Relocation.Target.from_reloc(rel, parser.symbols);
- const is_extern = rel.r_extern == 1;
const offset = @intCast(u32, rel.r_address);
const inst = parser.code[offset..][0..4];
diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig
index 9d64245bbb..bfae799462 100644
--- a/src/link/SpirV.zig
+++ b/src/link/SpirV.zig
@@ -102,6 +102,7 @@ pub fn deinit(self: *SpirV) void {
}
pub fn updateDecl(self: *SpirV, module: *Module, decl: *Module.Decl) !void {
+ _ = module;
// Keep track of all decls so we can iterate over them on flush().
_ = try self.decl_table.getOrPut(self.base.allocator, decl);
}
@@ -111,7 +112,12 @@ pub fn updateDeclExports(
module: *Module,
decl: *const Module.Decl,
exports: []const *Module.Export,
-) !void {}
+) !void {
+ _ = self;
+ _ = module;
+ _ = decl;
+ _ = exports;
+}
pub fn freeDecl(self: *SpirV, decl: *Module.Decl) void {
assert(self.decl_table.swapRemove(decl));
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 3fcdc04e71..6d38939a88 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -216,7 +216,7 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
try module.failed_decls.put(module.gpa, decl, context.err_msg);
return;
},
- else => |e| return err,
+ else => |e| return e,
};
const code: []const u8 = switch (result) {
@@ -258,7 +258,12 @@ pub fn updateDeclExports(
module: *Module,
decl: *const Module.Decl,
exports: []const *Module.Export,
-) !void {}
+) !void {
+ _ = self;
+ _ = module;
+ _ = decl;
+ _ = exports;
+}
pub fn freeDecl(self: *Wasm, decl: *Module.Decl) void {
if (self.getFuncidx(decl)) |func_idx| {
@@ -300,6 +305,7 @@ pub fn flush(self: *Wasm, comp: *Compilation) !void {
}
pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
+ _ = comp;
const tracy = trace(@src());
defer tracy.end();
@@ -496,7 +502,6 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
if (data_size != 0) {
const header_offset = try reserveVecSectionHeader(file);
const writer = file.writer();
- var len: u32 = 0;
// index to memory section (currently, there can only be 1 memory section in wasm)
try leb.writeULEB128(writer, @as(u32, 0));
@@ -558,7 +563,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
.target = self.base.options.target,
.output_mode = .Obj,
});
- const o_directory = self.base.options.module.?.zig_cache_artifact_directory;
+ const o_directory = module.zig_cache_artifact_directory;
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
}
diff --git a/src/link/tapi.zig b/src/link/tapi.zig
new file mode 100644
index 0000000000..35193b0eec
--- /dev/null
+++ b/src/link/tapi.zig
@@ -0,0 +1,86 @@
+const std = @import("std");
+const fs = std.fs;
+const mem = std.mem;
+const log = std.log.scoped(.tapi);
+
+const Allocator = mem.Allocator;
+const Yaml = @import("tapi/yaml.zig").Yaml;
+
+pub const LibStub = struct {
+ /// Underlying memory for stub's contents.
+ yaml: Yaml,
+
+ /// Typed contents of the tbd file.
+ inner: []Tbd,
+
+ const Tbd = struct {
+ tbd_version: u3,
+ targets: []const []const u8,
+ uuids: []const struct {
+ target: []const u8,
+ value: []const u8,
+ },
+ install_name: []const u8,
+ current_version: ?union(enum) {
+ string: []const u8,
+ float: f64,
+ int: u64,
+ },
+ compatibility_version: ?union(enum) {
+ string: []const u8,
+ float: f64,
+ int: u64,
+ },
+ reexported_libraries: ?[]const struct {
+ targets: []const []const u8,
+ libraries: []const []const u8,
+ },
+ parent_umbrella: ?[]const struct {
+ targets: []const []const u8,
+ umbrella: []const u8,
+ },
+ exports: ?[]const struct {
+ targets: []const []const u8,
+ symbols: ?[]const []const u8,
+ objc_classes: ?[]const []const u8,
+ },
+ reexports: ?[]const struct {
+ targets: []const []const u8,
+ symbols: ?[]const []const u8,
+ objc_classes: ?[]const []const u8,
+ },
+ allowable_clients: ?[]const struct {
+ targets: []const []const u8,
+ clients: []const []const u8,
+ },
+ objc_classes: ?[]const []const u8,
+ };
+
+ pub fn loadFromFile(allocator: *Allocator, file: fs.File) !LibStub {
+ const source = try file.readToEndAlloc(allocator, std.math.maxInt(u32));
+ defer allocator.free(source);
+
+ var lib_stub = LibStub{
+ .yaml = try Yaml.load(allocator, source),
+ .inner = undefined,
+ };
+
+ lib_stub.inner = lib_stub.yaml.parse([]Tbd) catch |err| blk: {
+ switch (err) {
+ error.TypeMismatch => {
+ // TODO clean this up.
+ var out = try lib_stub.yaml.arena.allocator.alloc(Tbd, 1);
+ out[0] = try lib_stub.yaml.parse(Tbd);
+ break :blk out;
+ },
+ else => |e| return e,
+ }
+ };
+
+ return lib_stub;
+ }
+
+ pub fn deinit(self: *LibStub) void {
+ self.yaml.deinit();
+ }
+};
diff --git a/src/link/tapi/Tokenizer.zig b/src/link/tapi/Tokenizer.zig
new file mode 100644
index 0000000000..37fcedbfce
--- /dev/null
+++ b/src/link/tapi/Tokenizer.zig
@@ -0,0 +1,439 @@
+const Tokenizer = @This();
+
+const std = @import("std");
+const log = std.log.scoped(.tapi);
+const testing = std.testing;
+
+buffer: []const u8,
+index: usize = 0,
+
+pub const Token = struct {
+ id: Id,
+ start: usize,
+ end: usize,
+ // Count of spaces/tabs.
+ // Only active for .Space and .Tab tokens.
+ count: ?usize = null,
+
+ pub const Id = enum {
+ Eof,
+
+ NewLine,
+ DocStart, // ---
+ DocEnd, // ...
+ SeqItemInd, // -
+ MapValueInd, // :
+ FlowMapStart, // {
+ FlowMapEnd, // }
+ FlowSeqStart, // [
+ FlowSeqEnd, // ]
+
+ Comma,
+ Space,
+ Tab,
+ Comment, // #
+ Alias, // *
+ Anchor, // &
+ Tag, // !
+ SingleQuote, // '
+ DoubleQuote, // "
+
+ Literal,
+ };
+};
+
+pub const TokenIndex = usize;
+
+pub const TokenIterator = struct {
+ buffer: []const Token,
+ pos: TokenIndex = 0,
+
+ pub fn next(self: *TokenIterator) Token {
+ const token = self.buffer[self.pos];
+ self.pos += 1;
+ return token;
+ }
+
+ pub fn peek(self: TokenIterator) ?Token {
+ if (self.pos >= self.buffer.len) return null;
+ return self.buffer[self.pos];
+ }
+
+ pub fn reset(self: *TokenIterator) void {
+ self.pos = 0;
+ }
+
+ pub fn seekTo(self: *TokenIterator, pos: TokenIndex) void {
+ self.pos = pos;
+ }
+
+ pub fn seekBy(self: *TokenIterator, offset: isize) void {
+ const new_pos = @bitCast(isize, self.pos) + offset;
+ if (new_pos < 0) {
+ self.pos = 0;
+ } else {
+ self.pos = @intCast(usize, new_pos);
+ }
+ }
+};
+
+pub fn next(self: *Tokenizer) Token {
+ var result = Token{
+ .id = .Eof,
+ .start = self.index,
+ .end = undefined,
+ };
+
+ var state: union(enum) {
+ Start,
+ NewLine,
+ Space: usize,
+ Tab: usize,
+ Hyphen: usize,
+ Dot: usize,
+ Literal,
+ } = .Start;
+
+ while (self.index < self.buffer.len) : (self.index += 1) {
+ const c = self.buffer[self.index];
+ switch (state) {
+ .Start => switch (c) {
+ ' ' => {
+ state = .{ .Space = 1 };
+ },
+ '\t' => {
+ state = .{ .Tab = 1 };
+ },
+ '\n' => {
+ result.id = .NewLine;
+ self.index += 1;
+ break;
+ },
+ '\r' => {
+ state = .NewLine;
+ },
+ '-' => {
+ state = .{ .Hyphen = 1 };
+ },
+ '.' => {
+ state = .{ .Dot = 1 };
+ },
+ ',' => {
+ result.id = .Comma;
+ self.index += 1;
+ break;
+ },
+ '#' => {
+ result.id = .Comment;
+ self.index += 1;
+ break;
+ },
+ '*' => {
+ result.id = .Alias;
+ self.index += 1;
+ break;
+ },
+ '&' => {
+ result.id = .Anchor;
+ self.index += 1;
+ break;
+ },
+ '!' => {
+ result.id = .Tag;
+ self.index += 1;
+ break;
+ },
+ '\'' => {
+ result.id = .SingleQuote;
+ self.index += 1;
+ break;
+ },
+ '"' => {
+ result.id = .DoubleQuote;
+ self.index += 1;
+ break;
+ },
+ '[' => {
+ result.id = .FlowSeqStart;
+ self.index += 1;
+ break;
+ },
+ ']' => {
+ result.id = .FlowSeqEnd;
+ self.index += 1;
+ break;
+ },
+ ':' => {
+ result.id = .MapValueInd;
+ self.index += 1;
+ break;
+ },
+ '{' => {
+ result.id = .FlowMapStart;
+ self.index += 1;
+ break;
+ },
+ '}' => {
+ result.id = .FlowMapEnd;
+ self.index += 1;
+ break;
+ },
+ else => {
+ state = .Literal;
+ },
+ },
+ .Space => |*count| switch (c) {
+ ' ' => {
+ count.* += 1;
+ },
+ else => {
+ result.id = .Space;
+ result.count = count.*;
+ break;
+ },
+ },
+ .Tab => |*count| switch (c) {
+ ' ' => {
+ count.* += 1;
+ },
+ else => {
+ result.id = .Tab;
+ result.count = count.*;
+ break;
+ },
+ },
+ .NewLine => switch (c) {
+ '\n' => {
+ result.id = .NewLine;
+ self.index += 1;
+ break;
+ },
+ else => {}, // TODO this should be an error condition
+ },
+ .Hyphen => |*count| switch (c) {
+ ' ' => {
+ result.id = .SeqItemInd;
+ self.index += 1;
+ break;
+ },
+ '-' => {
+ count.* += 1;
+
+ if (count.* == 3) {
+ result.id = .DocStart;
+ self.index += 1;
+ break;
+ }
+ },
+ else => {
+ state = .Literal;
+ },
+ },
+ .Dot => |*count| switch (c) {
+ '.' => {
+ count.* += 1;
+
+ if (count.* == 3) {
+ result.id = .DocEnd;
+ self.index += 1;
+ break;
+ }
+ },
+ else => {
+ state = .Literal;
+ },
+ },
+ .Literal => switch (c) {
+ '\r', '\n', ' ', '\'', '"', ',', ':', ']', '}' => {
+ result.id = .Literal;
+ break;
+ },
+ else => {
+ result.id = .Literal;
+ },
+ },
+ }
+ }
+
+ if (state == .Literal and result.id == .Eof) {
+ result.id = .Literal;
+ }
+
+ result.end = self.index;
+
+ log.debug("{any}", .{result});
+ log.debug(" | {s}", .{self.buffer[result.start..result.end]});
+
+ return result;
+}
+
+fn testExpected(source: []const u8, expected: []const Token.Id) !void {
+ var tokenizer = Tokenizer{
+ .buffer = source,
+ };
+
+ for (expected) |exp| {
+ const token = tokenizer.next();
+ try testing.expectEqual(exp, token.id);
+ }
+}
+
+test "empty doc" {
+ try testExpected("", &[_]Token.Id{.Eof});
+}
+
+test "empty doc with explicit markers" {
+ try testExpected(
+ \\---
+ \\...
+ , &[_]Token.Id{
+ .DocStart, .NewLine, .DocEnd, .Eof,
+ });
+}
+
+test "sequence of values" {
+ try testExpected(
+ \\- 0
+ \\- 1
+ \\- 2
+ , &[_]Token.Id{
+ .SeqItemInd,
+ .Literal,
+ .NewLine,
+ .SeqItemInd,
+ .Literal,
+ .NewLine,
+ .SeqItemInd,
+ .Literal,
+ .Eof,
+ });
+}
+
+test "sequence of sequences" {
+ try testExpected(
+ \\- [ val1, val2]
+ \\- [val3, val4 ]
+ , &[_]Token.Id{
+ .SeqItemInd,
+ .FlowSeqStart,
+ .Space,
+ .Literal,
+ .Comma,
+ .Space,
+ .Literal,
+ .FlowSeqEnd,
+ .NewLine,
+ .SeqItemInd,
+ .FlowSeqStart,
+ .Literal,
+ .Comma,
+ .Space,
+ .Literal,
+ .Space,
+ .FlowSeqEnd,
+ .Eof,
+ });
+}
+
+test "mappings" {
+ try testExpected(
+ \\key1: value1
+ \\key2: value2
+ , &[_]Token.Id{
+ .Literal,
+ .MapValueInd,
+ .Space,
+ .Literal,
+ .NewLine,
+ .Literal,
+ .MapValueInd,
+ .Space,
+ .Literal,
+ .Eof,
+ });
+}
+
+test "inline mapped sequence of values" {
+ try testExpected(
+ \\key : [ val1,
+ \\ val2 ]
+ , &[_]Token.Id{
+ .Literal,
+ .Space,
+ .MapValueInd,
+ .Space,
+ .FlowSeqStart,
+ .Space,
+ .Literal,
+ .Comma,
+ .Space,
+ .NewLine,
+ .Space,
+ .Literal,
+ .Space,
+ .FlowSeqEnd,
+ .Eof,
+ });
+}
+
+test "part of tdb" {
+ try testExpected(
+ \\--- !tapi-tbd
+ \\tbd-version: 4
+ \\targets: [ x86_64-macos ]
+ \\
+ \\uuids:
+ \\ - target: x86_64-macos
+ \\ value: F86CC732-D5E4-30B5-AA7D-167DF5EC2708
+ \\
+ \\install-name: '/usr/lib/libSystem.B.dylib'
+ \\...
+ , &[_]Token.Id{
+ .DocStart,
+ .Space,
+ .Tag,
+ .Literal,
+ .NewLine,
+ .Literal,
+ .MapValueInd,
+ .Space,
+ .Literal,
+ .NewLine,
+ .Literal,
+ .MapValueInd,
+ .Space,
+ .FlowSeqStart,
+ .Space,
+ .Literal,
+ .Space,
+ .FlowSeqEnd,
+ .NewLine,
+ .NewLine,
+ .Literal,
+ .MapValueInd,
+ .NewLine,
+ .Space,
+ .SeqItemInd,
+ .Literal,
+ .MapValueInd,
+ .Space,
+ .Literal,
+ .NewLine,
+ .Space,
+ .Literal,
+ .MapValueInd,
+ .Space,
+ .Literal,
+ .NewLine,
+ .NewLine,
+ .Literal,
+ .MapValueInd,
+ .Space,
+ .SingleQuote,
+ .Literal,
+ .SingleQuote,
+ .NewLine,
+ .DocEnd,
+ .Eof,
+ });
+}
diff --git a/src/link/tapi/parse.zig b/src/link/tapi/parse.zig
new file mode 100644
index 0000000000..0c923f961b
--- /dev/null
+++ b/src/link/tapi/parse.zig
@@ -0,0 +1,708 @@
+const std = @import("std");
+const assert = std.debug.assert;
+const log = std.log.scoped(.tapi);
+const mem = std.mem;
+const testing = std.testing;
+
+const Allocator = mem.Allocator;
+const Tokenizer = @import("Tokenizer.zig");
+const Token = Tokenizer.Token;
+const TokenIndex = Tokenizer.TokenIndex;
+const TokenIterator = Tokenizer.TokenIterator;
+
+pub const ParseError = error{
+ MalformedYaml,
+ NestedDocuments,
+ UnexpectedTag,
+ UnexpectedEof,
+ UnexpectedToken,
+ Unhandled,
+} || Allocator.Error;
+
+pub const Node = struct {
+ tag: Tag,
+ tree: *const Tree,
+
+ pub const Tag = enum {
+ doc,
+ map,
+ list,
+ value,
+ };
+
+ pub fn cast(self: *const Node, comptime T: type) ?*const T {
+ if (self.tag != T.base_tag) {
+ return null;
+ }
+ return @fieldParentPtr(T, "base", self);
+ }
+
+ pub fn deinit(self: *Node, allocator: *Allocator) void {
+ switch (self.tag) {
+ .doc => @fieldParentPtr(Node.Doc, "base", self).deinit(allocator),
+ .map => @fieldParentPtr(Node.Map, "base", self).deinit(allocator),
+ .list => @fieldParentPtr(Node.List, "base", self).deinit(allocator),
+ .value => {},
+ }
+ }
+
+ pub fn format(
+ self: *const Node,
+ comptime fmt: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+ ) !void {
+ return switch (self.tag) {
+ .doc => @fieldParentPtr(Node.Doc, "base", self).format(fmt, options, writer),
+ .map => @fieldParentPtr(Node.Map, "base", self).format(fmt, options, writer),
+ .list => @fieldParentPtr(Node.List, "base", self).format(fmt, options, writer),
+ .value => @fieldParentPtr(Node.Value, "base", self).format(fmt, options, writer),
+ };
+ }
+
+ pub const Doc = struct {
+ base: Node = Node{ .tag = Tag.doc, .tree = undefined },
+ start: ?TokenIndex = null,
+ end: ?TokenIndex = null,
+ directive: ?TokenIndex = null,
+ value: ?*Node = null,
+
+ pub const base_tag: Node.Tag = .doc;
+
+ pub fn deinit(self: *Doc, allocator: *Allocator) void {
+ if (self.value) |node| {
+ node.deinit(allocator);
+ allocator.destroy(node);
+ }
+ }
+
+ pub fn format(
+ self: *const Doc,
+ comptime fmt: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+ ) !void {
+ _ = fmt;
+ _ = options;
+ if (self.directive) |id| {
+ try std.fmt.format(writer, "{{ ", .{});
+ const directive = self.base.tree.tokens[id];
+ try std.fmt.format(writer, ".directive = {s}, ", .{
+ self.base.tree.source[directive.start..directive.end],
+ });
+ }
+ if (self.value) |node| {
+ try std.fmt.format(writer, "{}", .{node});
+ }
+ if (self.directive != null) {
+ try std.fmt.format(writer, " }}", .{});
+ }
+ }
+ };
+
+ pub const Map = struct {
+ base: Node = Node{ .tag = Tag.map, .tree = undefined },
+ start: ?TokenIndex = null,
+ end: ?TokenIndex = null,
+ values: std.ArrayListUnmanaged(Entry) = .{},
+
+ pub const base_tag: Node.Tag = .map;
+
+ pub const Entry = struct {
+ key: TokenIndex,
+ value: *Node,
+ };
+
+ pub fn deinit(self: *Map, allocator: *Allocator) void {
+ for (self.values.items) |entry| {
+ entry.value.deinit(allocator);
+ allocator.destroy(entry.value);
+ }
+ self.values.deinit(allocator);
+ }
+
+ pub fn format(
+ self: *const Map,
+ comptime fmt: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+ ) !void {
+ _ = fmt;
+ _ = options;
+ try std.fmt.format(writer, "{{ ", .{});
+ for (self.values.items) |entry| {
+ const key = self.base.tree.tokens[entry.key];
+ try std.fmt.format(writer, "{s} => {}, ", .{
+ self.base.tree.source[key.start..key.end],
+ entry.value,
+ });
+ }
+ return std.fmt.format(writer, " }}", .{});
+ }
+ };
+
+ pub const List = struct {
+ base: Node = Node{ .tag = Tag.list, .tree = undefined },
+ start: ?TokenIndex = null,
+ end: ?TokenIndex = null,
+ values: std.ArrayListUnmanaged(*Node) = .{},
+
+ pub const base_tag: Node.Tag = .list;
+
+ pub fn deinit(self: *List, allocator: *Allocator) void {
+ for (self.values.items) |node| {
+ node.deinit(allocator);
+ allocator.destroy(node);
+ }
+ self.values.deinit(allocator);
+ }
+
+ pub fn format(
+ self: *const List,
+ comptime fmt: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+ ) !void {
+ _ = fmt;
+ _ = options;
+ try std.fmt.format(writer, "[ ", .{});
+ for (self.values.items) |node| {
+ try std.fmt.format(writer, "{}, ", .{node});
+ }
+ return std.fmt.format(writer, " ]", .{});
+ }
+ };
+
+ pub const Value = struct {
+ base: Node = Node{ .tag = Tag.value, .tree = undefined },
+ start: ?TokenIndex = null,
+ end: ?TokenIndex = null,
+
+ pub const base_tag: Node.Tag = .value;
+
+ pub fn format(
+ self: *const Value,
+ comptime fmt: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+ ) !void {
+ _ = fmt;
+ _ = options;
+ const start = self.base.tree.tokens[self.start.?];
+ const end = self.base.tree.tokens[self.end.?];
+ return std.fmt.format(writer, "{s}", .{
+ self.base.tree.source[start.start..end.end],
+ });
+ }
+ };
+};
+
+pub const Tree = struct {
+ allocator: *Allocator,
+ source: []const u8,
+ tokens: []Token,
+ docs: std.ArrayListUnmanaged(*Node) = .{},
+
+ pub fn init(allocator: *Allocator) Tree {
+ return .{
+ .allocator = allocator,
+ .source = undefined,
+ .tokens = undefined,
+ };
+ }
+
+ pub fn deinit(self: *Tree) void {
+ self.allocator.free(self.tokens);
+ for (self.docs.items) |doc| {
+ doc.deinit(self.allocator);
+ self.allocator.destroy(doc);
+ }
+ self.docs.deinit(self.allocator);
+ }
+
+ pub fn parse(self: *Tree, source: []const u8) !void {
+ var tokenizer = Tokenizer{ .buffer = source };
+ var tokens = std.ArrayList(Token).init(self.allocator);
+ errdefer tokens.deinit();
+
+ while (true) {
+ const token = tokenizer.next();
+ try tokens.append(token);
+ if (token.id == .Eof) break;
+ }
+
+ self.source = source;
+ self.tokens = tokens.toOwnedSlice();
+
+ var it = TokenIterator{ .buffer = self.tokens };
+ var parser = Parser{
+ .allocator = self.allocator,
+ .tree = self,
+ .token_it = &it,
+ };
+ defer parser.deinit();
+
+ try parser.scopes.append(self.allocator, .{
+ .indent = 0,
+ });
+
+ while (true) {
+ if (parser.token_it.peek() == null) return;
+ const pos = parser.token_it.pos;
+ const token = parser.token_it.next();
+
+ log.debug("Next token: {}, {}", .{ pos, token });
+
+ switch (token.id) {
+ .Space, .Comment, .NewLine => {},
+ .Eof => break,
+ else => {
+ const doc = try parser.doc(pos);
+ try self.docs.append(self.allocator, &doc.base);
+ },
+ }
+ }
+ }
+};
+
+const Parser = struct {
+ allocator: *Allocator,
+ tree: *Tree,
+ token_it: *TokenIterator,
+ scopes: std.ArrayListUnmanaged(Scope) = .{},
+
+ const Scope = struct {
+ indent: usize,
+ };
+
+ fn deinit(self: *Parser) void {
+ self.scopes.deinit(self.allocator);
+ }
+
+ fn doc(self: *Parser, start: TokenIndex) ParseError!*Node.Doc {
+ const node = try self.allocator.create(Node.Doc);
+ errdefer self.allocator.destroy(node);
+ node.* = .{
+ .start = start,
+ };
+ node.base.tree = self.tree;
+
+ self.token_it.seekTo(start);
+
+ log.debug("Doc start: {}, {}", .{ start, self.tree.tokens[start] });
+
+ const explicit_doc: bool = if (self.eatToken(.DocStart)) |_| explicit_doc: {
+ if (self.eatToken(.Tag)) |_| {
+ node.directive = try self.expectToken(.Literal);
+ }
+ _ = try self.expectToken(.NewLine);
+ break :explicit_doc true;
+ } else false;
+
+ while (true) {
+ const pos = self.token_it.pos;
+ const token = self.token_it.next();
+
+ log.debug("Next token: {}, {}", .{ pos, token });
+
+ switch (token.id) {
+ .Tag => {
+ return error.UnexpectedTag;
+ },
+ .Literal, .SingleQuote, .DoubleQuote => {
+ _ = try self.expectToken(.MapValueInd);
+ const map_node = try self.map(pos);
+ node.value = &map_node.base;
+ },
+ .SeqItemInd => {
+ const list_node = try self.list(pos);
+ node.value = &list_node.base;
+ },
+ .FlowSeqStart => {
+ const list_node = try self.list_bracketed(pos);
+ node.value = &list_node.base;
+ },
+ .DocEnd => {
+ if (explicit_doc) break;
+ return error.UnexpectedToken;
+ },
+ .DocStart, .Eof => {
+ self.token_it.seekBy(-1);
+ break;
+ },
+ else => {
+ return error.UnexpectedToken;
+ },
+ }
+ }
+
+ node.end = self.token_it.pos - 1;
+
+ log.debug("Doc end: {}, {}", .{ node.end.?, self.tree.tokens[node.end.?] });
+
+ return node;
+ }
+
+ fn map(self: *Parser, start: TokenIndex) ParseError!*Node.Map {
+ const node = try self.allocator.create(Node.Map);
+ errdefer self.allocator.destroy(node);
+ node.* = .{
+ .start = start,
+ };
+ node.base.tree = self.tree;
+
+ self.token_it.seekTo(start);
+
+ log.debug("Map start: {}, {}", .{ start, self.tree.tokens[start] });
+ log.debug("Current scope: {}", .{self.scopes.items[self.scopes.items.len - 1]});
+
+ while (true) {
+ // Parse key.
+ const key_pos = self.token_it.pos;
+ const key = self.token_it.next();
+ switch (key.id) {
+ .Literal => {},
+ else => {
+ self.token_it.seekBy(-1);
+ break;
+ },
+ }
+
+ log.debug("Map key: {}, '{s}'", .{ key, self.tree.source[key.start..key.end] });
+
+ // Separator
+ _ = try self.expectToken(.MapValueInd);
+ self.eatCommentsAndSpace();
+
+ // Parse value.
+ const value: *Node = value: {
+ if (self.eatToken(.NewLine)) |_| {
+ // Explicit, complex value such as list or map.
+ try self.openScope();
+ const value_pos = self.token_it.pos;
+ const value = self.token_it.next();
+ switch (value.id) {
+ .Literal, .SingleQuote, .DoubleQuote => {
+ // Assume nested map.
+ const map_node = try self.map(value_pos);
+ break :value &map_node.base;
+ },
+ .SeqItemInd => {
+ // Assume list of values.
+ const list_node = try self.list(value_pos);
+ break :value &list_node.base;
+ },
+ else => {
+ log.err("{}", .{key});
+ return error.Unhandled;
+ },
+ }
+ } else {
+ const value_pos = self.token_it.pos;
+ const value = self.token_it.next();
+ switch (value.id) {
+ .Literal, .SingleQuote, .DoubleQuote => {
+ // Assume leaf value.
+ const leaf_node = try self.leaf_value(value_pos);
+ break :value &leaf_node.base;
+ },
+ .FlowSeqStart => {
+ const list_node = try self.list_bracketed(value_pos);
+ break :value &list_node.base;
+ },
+ else => {
+ log.err("{}", .{key});
+ return error.Unhandled;
+ },
+ }
+ }
+ };
+ log.debug("Map value: {}", .{value});
+
+ try node.values.append(self.allocator, .{
+ .key = key_pos,
+ .value = value,
+ });
+
+ if (self.eatToken(.NewLine)) |_| {
+ if (try self.closeScope()) {
+ break;
+ }
+ }
+ }
+
+ node.end = self.token_it.pos - 1;
+
+ log.debug("Map end: {}, {}", .{ node.end.?, self.tree.tokens[node.end.?] });
+
+ return node;
+ }
+
+ fn list(self: *Parser, start: TokenIndex) ParseError!*Node.List {
+ const node = try self.allocator.create(Node.List);
+ errdefer self.allocator.destroy(node);
+ node.* = .{
+ .start = start,
+ };
+ node.base.tree = self.tree;
+
+ self.token_it.seekTo(start);
+
+ log.debug("List start: {}, {}", .{ start, self.tree.tokens[start] });
+ log.debug("Current scope: {}", .{self.scopes.items[self.scopes.items.len - 1]});
+
+ while (true) {
+ _ = self.eatToken(.SeqItemInd) orelse {
+ _ = try self.closeScope();
+ break;
+ };
+ self.eatCommentsAndSpace();
+
+ const pos = self.token_it.pos;
+ const token = self.token_it.next();
+ const value: *Node = value: {
+ switch (token.id) {
+ .Literal, .SingleQuote, .DoubleQuote => {
+ if (self.eatToken(.MapValueInd)) |_| {
+ if (self.eatToken(.NewLine)) |_| {
+ try self.openScope();
+ }
+ // nested map
+ const map_node = try self.map(pos);
+ break :value &map_node.base;
+ } else {
+ // standalone (leaf) value
+ const leaf_node = try self.leaf_value(pos);
+ break :value &leaf_node.base;
+ }
+ },
+ .FlowSeqStart => {
+ const list_node = try self.list_bracketed(pos);
+ break :value &list_node.base;
+ },
+ else => {
+ log.err("{}", .{token});
+ return error.Unhandled;
+ },
+ }
+ };
+ try node.values.append(self.allocator, value);
+
+ _ = self.eatToken(.NewLine);
+ }
+
+ node.end = self.token_it.pos - 1;
+
+ log.debug("List end: {}, {}", .{ node.end.?, self.tree.tokens[node.end.?] });
+
+ return node;
+ }
+
+ fn list_bracketed(self: *Parser, start: TokenIndex) ParseError!*Node.List {
+ const node = try self.allocator.create(Node.List);
+ errdefer self.allocator.destroy(node);
+ node.* = .{
+ .start = start,
+ };
+ node.base.tree = self.tree;
+
+ self.token_it.seekTo(start);
+
+ log.debug("List start: {}, {}", .{ start, self.tree.tokens[start] });
+ log.debug("Current scope: {}", .{self.scopes.items[self.scopes.items.len - 1]});
+
+ _ = try self.expectToken(.FlowSeqStart);
+
+ while (true) {
+ _ = self.eatToken(.NewLine);
+ self.eatCommentsAndSpace();
+
+ const pos = self.token_it.pos;
+ const token = self.token_it.next();
+
+ log.debug("Next token: {}, {}", .{ pos, token });
+
+ const value: *Node = value: {
+ switch (token.id) {
+ .FlowSeqStart => {
+ const list_node = try self.list_bracketed(pos);
+ break :value &list_node.base;
+ },
+ .FlowSeqEnd => {
+ break;
+ },
+ .Literal, .SingleQuote, .DoubleQuote => {
+ const leaf_node = try self.leaf_value(pos);
+ _ = self.eatToken(.Comma);
+ // TODO newline
+ break :value &leaf_node.base;
+ },
+ else => {
+ log.err("{}", .{token});
+ return error.Unhandled;
+ },
+ }
+ };
+ try node.values.append(self.allocator, value);
+ }
+
+ node.end = self.token_it.pos - 1;
+
+ log.debug("List end: {}, {}", .{ node.end.?, self.tree.tokens[node.end.?] });
+
+ return node;
+ }
+
+ fn leaf_value(self: *Parser, start: TokenIndex) ParseError!*Node.Value {
+ const node = try self.allocator.create(Node.Value);
+ errdefer self.allocator.destroy(node);
+ node.* = .{
+ .start = start,
+ };
+ node.base.tree = self.tree;
+
+ self.token_it.seekTo(start);
+
+ log.debug("Leaf start: {}, {}", .{ node.start.?, self.tree.tokens[node.start.?] });
+
+ parse: {
+ if (self.eatToken(.SingleQuote)) |_| {
+ node.start = node.start.? + 1;
+ while (true) {
+ const tok = self.token_it.next();
+ switch (tok.id) {
+ .SingleQuote => {
+ node.end = self.token_it.pos - 2;
+ break :parse;
+ },
+ .NewLine => return error.UnexpectedToken,
+ else => {},
+ }
+ }
+ }
+
+ if (self.eatToken(.DoubleQuote)) |_| {
+ node.start = node.start.? + 1;
+ while (true) {
+ const tok = self.token_it.next();
+ switch (tok.id) {
+ .DoubleQuote => {
+ node.end = self.token_it.pos - 2;
+ break :parse;
+ },
+ .NewLine => return error.UnexpectedToken,
+ else => {},
+ }
+ }
+ }
+
+ // TODO handle multiline strings in new block scope
+ while (true) {
+ const tok = self.token_it.next();
+ switch (tok.id) {
+ .Literal => {},
+ .Space => {
+ const trailing = self.token_it.pos - 2;
+ self.eatCommentsAndSpace();
+ if (self.token_it.peek()) |peek| {
+ if (peek.id != .Literal) {
+ node.end = trailing;
+ break;
+ }
+ }
+ },
+ else => {
+ self.token_it.seekBy(-1);
+ node.end = self.token_it.pos - 1;
+ break;
+ },
+ }
+ }
+ }
+
+ log.debug("Leaf end: {}, {}", .{ node.end.?, self.tree.tokens[node.end.?] });
+
+ return node;
+ }
+
+ fn openScope(self: *Parser) !void {
+ const peek = self.token_it.peek() orelse return error.UnexpectedEof;
+ if (peek.id != .Space and peek.id != .Tab) {
+ // No need to open scope.
+ return;
+ }
+ const indent = self.token_it.next().count.?;
+ const prev_scope = self.scopes.items[self.scopes.items.len - 1];
+ if (indent < prev_scope.indent) {
+ return error.MalformedYaml;
+ }
+
+ log.debug("Opening scope...", .{});
+
+ try self.scopes.append(self.allocator, .{
+ .indent = indent,
+ });
+ }
+
+ fn closeScope(self: *Parser) !bool {
+ const indent = indent: {
+ const peek = self.token_it.peek() orelse return error.UnexpectedEof;
+ switch (peek.id) {
+ .Space, .Tab => {
+ break :indent self.token_it.next().count.?;
+ },
+ else => {
+ break :indent 0;
+ },
+ }
+ };
+
+ const scope = self.scopes.items[self.scopes.items.len - 1];
+ if (indent < scope.indent) {
+ log.debug("Closing scope...", .{});
+ _ = self.scopes.pop();
+ return true;
+ }
+
+ return false;
+ }
+
+ fn eatCommentsAndSpace(self: *Parser) void {
+ while (true) {
+ _ = self.token_it.peek() orelse return;
+ const token = self.token_it.next();
+ switch (token.id) {
+ .Comment, .Space => {},
+ else => {
+ self.token_it.seekBy(-1);
+ break;
+ },
+ }
+ }
+ }
+
+ fn eatToken(self: *Parser, id: Token.Id) ?TokenIndex {
+ while (true) {
+ const pos = self.token_it.pos;
+ _ = self.token_it.peek() orelse return null;
+ const token = self.token_it.next();
+ switch (token.id) {
+ .Comment, .Space => continue,
+ else => |next_id| if (next_id == id) {
+ return pos;
+ } else {
+ self.token_it.seekTo(pos);
+ return null;
+ },
+ }
+ }
+ }
+
+ fn expectToken(self: *Parser, id: Token.Id) ParseError!TokenIndex {
+ return self.eatToken(id) orelse error.UnexpectedToken;
+ }
+};
+
+test {
+ _ = @import("parse/test.zig");
+}
diff --git a/src/link/tapi/parse/test.zig b/src/link/tapi/parse/test.zig
new file mode 100644
index 0000000000..b96a71fe97
--- /dev/null
+++ b/src/link/tapi/parse/test.zig
@@ -0,0 +1,556 @@
+const std = @import("std");
+const mem = std.mem;
+const testing = std.testing;
+
+usingnamespace @import("../parse.zig");
+
+test "explicit doc" {
+ const source =
+ \\--- !tapi-tbd
+ \\tbd-version: 4
+ \\abc-version: 5
+ \\...
+ ;
+
+ var tree = Tree.init(testing.allocator);
+ defer tree.deinit();
+ try tree.parse(source);
+
+ try testing.expectEqual(tree.docs.items.len, 1);
+
+ const doc = tree.docs.items[0].cast(Node.Doc).?;
+ try testing.expectEqual(doc.start.?, 0);
+ try testing.expectEqual(doc.end.?, tree.tokens.len - 2);
+
+ const directive = tree.tokens[doc.directive.?];
+ try testing.expectEqual(directive.id, .Literal);
+ try testing.expect(mem.eql(u8, "tapi-tbd", tree.source[directive.start..directive.end]));
+
+ try testing.expect(doc.value != null);
+ try testing.expectEqual(doc.value.?.tag, .map);
+
+ const map = doc.value.?.cast(Node.Map).?;
+ try testing.expectEqual(map.start.?, 5);
+ try testing.expectEqual(map.end.?, 14);
+ try testing.expectEqual(map.values.items.len, 2);
+
+ {
+ const entry = map.values.items[0];
+
+ const key = tree.tokens[entry.key];
+ try testing.expectEqual(key.id, .Literal);
+ try testing.expect(mem.eql(u8, "tbd-version", tree.source[key.start..key.end]));
+
+ const value = entry.value.cast(Node.Value).?;
+ const value_tok = tree.tokens[value.start.?];
+ try testing.expectEqual(value_tok.id, .Literal);
+ try testing.expect(mem.eql(u8, "4", tree.source[value_tok.start..value_tok.end]));
+ }
+
+ {
+ const entry = map.values.items[1];
+
+ const key = tree.tokens[entry.key];
+ try testing.expectEqual(key.id, .Literal);
+ try testing.expect(mem.eql(u8, "abc-version", tree.source[key.start..key.end]));
+
+ const value = entry.value.cast(Node.Value).?;
+ const value_tok = tree.tokens[value.start.?];
+ try testing.expectEqual(value_tok.id, .Literal);
+ try testing.expect(mem.eql(u8, "5", tree.source[value_tok.start..value_tok.end]));
+ }
+}
+
+test "leaf in quotes" {
+ const source =
+ \\key1: no quotes
+ \\key2: 'single quoted'
+ \\key3: "double quoted"
+ ;
+
+ var tree = Tree.init(testing.allocator);
+ defer tree.deinit();
+ try tree.parse(source);
+
+ try testing.expectEqual(tree.docs.items.len, 1);
+
+ const doc = tree.docs.items[0].cast(Node.Doc).?;
+ try testing.expectEqual(doc.start.?, 0);
+ try testing.expectEqual(doc.end.?, tree.tokens.len - 2);
+ try testing.expect(doc.directive == null);
+
+ try testing.expect(doc.value != null);
+ try testing.expectEqual(doc.value.?.tag, .map);
+
+ const map = doc.value.?.cast(Node.Map).?;
+ try testing.expectEqual(map.start.?, 0);
+ try testing.expectEqual(map.end.?, tree.tokens.len - 2);
+ try testing.expectEqual(map.values.items.len, 3);
+
+ {
+ const entry = map.values.items[0];
+
+ const key = tree.tokens[entry.key];
+ try testing.expectEqual(key.id, .Literal);
+ try testing.expect(mem.eql(
+ u8,
+ "key1",
+ tree.source[key.start..key.end],
+ ));
+
+ const value = entry.value.cast(Node.Value).?;
+ const start = tree.tokens[value.start.?];
+ const end = tree.tokens[value.end.?];
+ try testing.expectEqual(start.id, .Literal);
+ try testing.expectEqual(end.id, .Literal);
+ try testing.expect(mem.eql(
+ u8,
+ "no quotes",
+ tree.source[start.start..end.end],
+ ));
+ }
+}
+
+test "nested maps" {
+ const source =
+ \\key1:
+ \\ key1_1 : value1_1
+ \\ key1_2 : value1_2
+ \\key2 : value2
+ ;
+
+ var tree = Tree.init(testing.allocator);
+ defer tree.deinit();
+ try tree.parse(source);
+
+ try testing.expectEqual(tree.docs.items.len, 1);
+
+ const doc = tree.docs.items[0].cast(Node.Doc).?;
+ try testing.expectEqual(doc.start.?, 0);
+ try testing.expectEqual(doc.end.?, tree.tokens.len - 2);
+ try testing.expect(doc.directive == null);
+
+ try testing.expect(doc.value != null);
+ try testing.expectEqual(doc.value.?.tag, .map);
+
+ const map = doc.value.?.cast(Node.Map).?;
+ try testing.expectEqual(map.start.?, 0);
+ try testing.expectEqual(map.end.?, tree.tokens.len - 2);
+ try testing.expectEqual(map.values.items.len, 2);
+
+ {
+ const entry = map.values.items[0];
+
+ const key = tree.tokens[entry.key];
+ try testing.expectEqual(key.id, .Literal);
+ try testing.expect(mem.eql(u8, "key1", tree.source[key.start..key.end]));
+
+ const nested_map = entry.value.cast(Node.Map).?;
+ try testing.expectEqual(nested_map.start.?, 4);
+ try testing.expectEqual(nested_map.end.?, 16);
+ try testing.expectEqual(nested_map.values.items.len, 2);
+
+ {
+ const nested_entry = nested_map.values.items[0];
+
+ const nested_key = tree.tokens[nested_entry.key];
+ try testing.expectEqual(nested_key.id, .Literal);
+ try testing.expect(mem.eql(
+ u8,
+ "key1_1",
+ tree.source[nested_key.start..nested_key.end],
+ ));
+
+ const nested_value = nested_entry.value.cast(Node.Value).?;
+ const nested_value_tok = tree.tokens[nested_value.start.?];
+ try testing.expectEqual(nested_value_tok.id, .Literal);
+ try testing.expect(mem.eql(
+ u8,
+ "value1_1",
+ tree.source[nested_value_tok.start..nested_value_tok.end],
+ ));
+ }
+
+ {
+ const nested_entry = nested_map.values.items[1];
+
+ const nested_key = tree.tokens[nested_entry.key];
+ try testing.expectEqual(nested_key.id, .Literal);
+ try testing.expect(mem.eql(
+ u8,
+ "key1_2",
+ tree.source[nested_key.start..nested_key.end],
+ ));
+
+ const nested_value = nested_entry.value.cast(Node.Value).?;
+ const nested_value_tok = tree.tokens[nested_value.start.?];
+ try testing.expectEqual(nested_value_tok.id, .Literal);
+ try testing.expect(mem.eql(
+ u8,
+ "value1_2",
+ tree.source[nested_value_tok.start..nested_value_tok.end],
+ ));
+ }
+ }
+
+ {
+ const entry = map.values.items[1];
+
+ const key = tree.tokens[entry.key];
+ try testing.expectEqual(key.id, .Literal);
+ try testing.expect(mem.eql(u8, "key2", tree.source[key.start..key.end]));
+
+ const value = entry.value.cast(Node.Value).?;
+ const value_tok = tree.tokens[value.start.?];
+ try testing.expectEqual(value_tok.id, .Literal);
+ try testing.expect(mem.eql(
+ u8,
+ "value2",
+ tree.source[value_tok.start..value_tok.end],
+ ));
+ }
+}
+
+test "map of list of values" {
+ const source =
+ \\ints:
+ \\ - 0
+ \\ - 1
+ \\ - 2
+ ;
+ var tree = Tree.init(testing.allocator);
+ defer tree.deinit();
+ try tree.parse(source);
+
+ try testing.expectEqual(tree.docs.items.len, 1);
+
+ const doc = tree.docs.items[0].cast(Node.Doc).?;
+ try testing.expectEqual(doc.start.?, 0);
+ try testing.expectEqual(doc.end.?, tree.tokens.len - 2);
+
+ try testing.expect(doc.value != null);
+ try testing.expectEqual(doc.value.?.tag, .map);
+
+ const map = doc.value.?.cast(Node.Map).?;
+ try testing.expectEqual(map.start.?, 0);
+ try testing.expectEqual(map.end.?, tree.tokens.len - 2);
+ try testing.expectEqual(map.values.items.len, 1);
+
+ const entry = map.values.items[0];
+ const key = tree.tokens[entry.key];
+ try testing.expectEqual(key.id, .Literal);
+ try testing.expect(mem.eql(u8, "ints", tree.source[key.start..key.end]));
+
+ const value = entry.value.cast(Node.List).?;
+ try testing.expectEqual(value.start.?, 4);
+ try testing.expectEqual(value.end.?, tree.tokens.len - 2);
+ try testing.expectEqual(value.values.items.len, 3);
+
+ {
+ const elem = value.values.items[0].cast(Node.Value).?;
+ const leaf = tree.tokens[elem.start.?];
+ try testing.expectEqual(leaf.id, .Literal);
+ try testing.expect(mem.eql(u8, "0", tree.source[leaf.start..leaf.end]));
+ }
+
+ {
+ const elem = value.values.items[1].cast(Node.Value).?;
+ const leaf = tree.tokens[elem.start.?];
+ try testing.expectEqual(leaf.id, .Literal);
+ try testing.expect(mem.eql(u8, "1", tree.source[leaf.start..leaf.end]));
+ }
+
+ {
+ const elem = value.values.items[2].cast(Node.Value).?;
+ const leaf = tree.tokens[elem.start.?];
+ try testing.expectEqual(leaf.id, .Literal);
+ try testing.expect(mem.eql(u8, "2", tree.source[leaf.start..leaf.end]));
+ }
+}
+
+test "map of list of maps" {
+ const source =
+ \\key1:
+ \\- key2 : value2
+ \\- key3 : value3
+ \\- key4 : value4
+ ;
+
+ var tree = Tree.init(testing.allocator);
+ defer tree.deinit();
+ try tree.parse(source);
+
+ try testing.expectEqual(tree.docs.items.len, 1);
+
+ const doc = tree.docs.items[0].cast(Node.Doc).?;
+ try testing.expectEqual(doc.start.?, 0);
+ try testing.expectEqual(doc.end.?, tree.tokens.len - 2);
+
+ try testing.expect(doc.value != null);
+ try testing.expectEqual(doc.value.?.tag, .map);
+
+ const map = doc.value.?.cast(Node.Map).?;
+ try testing.expectEqual(map.start.?, 0);
+ try testing.expectEqual(map.end.?, tree.tokens.len - 2);
+ try testing.expectEqual(map.values.items.len, 1);
+
+ const entry = map.values.items[0];
+ const key = tree.tokens[entry.key];
+ try testing.expectEqual(key.id, .Literal);
+ try testing.expect(mem.eql(u8, "key1", tree.source[key.start..key.end]));
+
+ const value = entry.value.cast(Node.List).?;
+ try testing.expectEqual(value.start.?, 3);
+ try testing.expectEqual(value.end.?, tree.tokens.len - 2);
+ try testing.expectEqual(value.values.items.len, 3);
+
+ {
+ const elem = value.values.items[0].cast(Node.Map).?;
+ const nested = elem.values.items[0];
+ const nested_key = tree.tokens[nested.key];
+ try testing.expectEqual(nested_key.id, .Literal);
+ try testing.expect(mem.eql(u8, "key2", tree.source[nested_key.start..nested_key.end]));
+
+ const nested_v = nested.value.cast(Node.Value).?;
+ const leaf = tree.tokens[nested_v.start.?];
+ try testing.expectEqual(leaf.id, .Literal);
+ try testing.expect(mem.eql(u8, "value2", tree.source[leaf.start..leaf.end]));
+ }
+
+ {
+ const elem = value.values.items[1].cast(Node.Map).?;
+ const nested = elem.values.items[0];
+ const nested_key = tree.tokens[nested.key];
+ try testing.expectEqual(nested_key.id, .Literal);
+ try testing.expect(mem.eql(u8, "key3", tree.source[nested_key.start..nested_key.end]));
+
+ const nested_v = nested.value.cast(Node.Value).?;
+ const leaf = tree.tokens[nested_v.start.?];
+ try testing.expectEqual(leaf.id, .Literal);
+ try testing.expect(mem.eql(u8, "value3", tree.source[leaf.start..leaf.end]));
+ }
+
+ {
+ const elem = value.values.items[2].cast(Node.Map).?;
+ const nested = elem.values.items[0];
+ const nested_key = tree.tokens[nested.key];
+ try testing.expectEqual(nested_key.id, .Literal);
+ try testing.expect(mem.eql(u8, "key4", tree.source[nested_key.start..nested_key.end]));
+
+ const nested_v = nested.value.cast(Node.Value).?;
+ const leaf = tree.tokens[nested_v.start.?];
+ try testing.expectEqual(leaf.id, .Literal);
+ try testing.expect(mem.eql(u8, "value4", tree.source[leaf.start..leaf.end]));
+ }
+}
+
+test "list of lists" {
+ const source =
+ \\- [name , hr, avg ]
+ \\- [Mark McGwire , 65, 0.278]
+ \\- [Sammy Sosa , 63, 0.288]
+ ;
+
+ var tree = Tree.init(testing.allocator);
+ defer tree.deinit();
+ try tree.parse(source);
+
+ try testing.expectEqual(tree.docs.items.len, 1);
+
+ const doc = tree.docs.items[0].cast(Node.Doc).?;
+ try testing.expectEqual(doc.start.?, 0);
+ try testing.expectEqual(doc.end.?, tree.tokens.len - 2);
+
+ try testing.expect(doc.value != null);
+ try testing.expectEqual(doc.value.?.tag, .list);
+
+ const list = doc.value.?.cast(Node.List).?;
+ try testing.expectEqual(list.start.?, 0);
+ try testing.expectEqual(list.end.?, tree.tokens.len - 2);
+ try testing.expectEqual(list.values.items.len, 3);
+
+ {
+ try testing.expectEqual(list.values.items[0].tag, .list);
+ const nested = list.values.items[0].cast(Node.List).?;
+ try testing.expectEqual(nested.values.items.len, 3);
+
+ {
+ try testing.expectEqual(nested.values.items[0].tag, .value);
+ const value = nested.values.items[0].cast(Node.Value).?;
+ const leaf = tree.tokens[value.start.?];
+ try testing.expect(mem.eql(u8, "name", tree.source[leaf.start..leaf.end]));
+ }
+
+ {
+ try testing.expectEqual(nested.values.items[1].tag, .value);
+ const value = nested.values.items[1].cast(Node.Value).?;
+ const leaf = tree.tokens[value.start.?];
+ try testing.expect(mem.eql(u8, "hr", tree.source[leaf.start..leaf.end]));
+ }
+
+ {
+ try testing.expectEqual(nested.values.items[2].tag, .value);
+ const value = nested.values.items[2].cast(Node.Value).?;
+ const leaf = tree.tokens[value.start.?];
+ try testing.expect(mem.eql(u8, "avg", tree.source[leaf.start..leaf.end]));
+ }
+ }
+
+ {
+ try testing.expectEqual(list.values.items[1].tag, .list);
+ const nested = list.values.items[1].cast(Node.List).?;
+ try testing.expectEqual(nested.values.items.len, 3);
+
+ {
+ try testing.expectEqual(nested.values.items[0].tag, .value);
+ const value = nested.values.items[0].cast(Node.Value).?;
+ const start = tree.tokens[value.start.?];
+ const end = tree.tokens[value.end.?];
+ try testing.expect(mem.eql(u8, "Mark McGwire", tree.source[start.start..end.end]));
+ }
+
+ {
+ try testing.expectEqual(nested.values.items[1].tag, .value);
+ const value = nested.values.items[1].cast(Node.Value).?;
+ const leaf = tree.tokens[value.start.?];
+ try testing.expect(mem.eql(u8, "65", tree.source[leaf.start..leaf.end]));
+ }
+
+ {
+ try testing.expectEqual(nested.values.items[2].tag, .value);
+ const value = nested.values.items[2].cast(Node.Value).?;
+ const leaf = tree.tokens[value.start.?];
+ try testing.expect(mem.eql(u8, "0.278", tree.source[leaf.start..leaf.end]));
+ }
+ }
+
+ {
+ try testing.expectEqual(list.values.items[2].tag, .list);
+ const nested = list.values.items[2].cast(Node.List).?;
+ try testing.expectEqual(nested.values.items.len, 3);
+
+ {
+ try testing.expectEqual(nested.values.items[0].tag, .value);
+ const value = nested.values.items[0].cast(Node.Value).?;
+ const start = tree.tokens[value.start.?];
+ const end = tree.tokens[value.end.?];
+ try testing.expect(mem.eql(u8, "Sammy Sosa", tree.source[start.start..end.end]));
+ }
+
+ {
+ try testing.expectEqual(nested.values.items[1].tag, .value);
+ const value = nested.values.items[1].cast(Node.Value).?;
+ const leaf = tree.tokens[value.start.?];
+ try testing.expect(mem.eql(u8, "63", tree.source[leaf.start..leaf.end]));
+ }
+
+ {
+ try testing.expectEqual(nested.values.items[2].tag, .value);
+ const value = nested.values.items[2].cast(Node.Value).?;
+ const leaf = tree.tokens[value.start.?];
+ try testing.expect(mem.eql(u8, "0.288", tree.source[leaf.start..leaf.end]));
+ }
+ }
+}
+
+test "inline list" {
+ const source =
+ \\[name , hr, avg ]
+ ;
+
+ var tree = Tree.init(testing.allocator);
+ defer tree.deinit();
+ try tree.parse(source);
+
+ try testing.expectEqual(tree.docs.items.len, 1);
+
+ const doc = tree.docs.items[0].cast(Node.Doc).?;
+ try testing.expectEqual(doc.start.?, 0);
+ try testing.expectEqual(doc.end.?, tree.tokens.len - 2);
+
+ try testing.expect(doc.value != null);
+ try testing.expectEqual(doc.value.?.tag, .list);
+
+ const list = doc.value.?.cast(Node.List).?;
+ try testing.expectEqual(list.start.?, 0);
+ try testing.expectEqual(list.end.?, tree.tokens.len - 2);
+ try testing.expectEqual(list.values.items.len, 3);
+
+ {
+ try testing.expectEqual(list.values.items[0].tag, .value);
+ const value = list.values.items[0].cast(Node.Value).?;
+ const leaf = tree.tokens[value.start.?];
+ try testing.expect(mem.eql(u8, "name", tree.source[leaf.start..leaf.end]));
+ }
+
+ {
+ try testing.expectEqual(list.values.items[1].tag, .value);
+ const value = list.values.items[1].cast(Node.Value).?;
+ const leaf = tree.tokens[value.start.?];
+ try testing.expect(mem.eql(u8, "hr", tree.source[leaf.start..leaf.end]));
+ }
+
+ {
+ try testing.expectEqual(list.values.items[2].tag, .value);
+ const value = list.values.items[2].cast(Node.Value).?;
+ const leaf = tree.tokens[value.start.?];
+ try testing.expect(mem.eql(u8, "avg", tree.source[leaf.start..leaf.end]));
+ }
+}
+
+test "inline list as mapping value" {
+ const source =
+ \\key : [
+ \\ name ,
+ \\ hr, avg ]
+ ;
+
+ var tree = Tree.init(testing.allocator);
+ defer tree.deinit();
+ try tree.parse(source);
+
+ try testing.expectEqual(tree.docs.items.len, 1);
+
+ const doc = tree.docs.items[0].cast(Node.Doc).?;
+ try testing.expectEqual(doc.start.?, 0);
+ try testing.expectEqual(doc.end.?, tree.tokens.len - 2);
+
+ try testing.expect(doc.value != null);
+ try testing.expectEqual(doc.value.?.tag, .map);
+
+ const map = doc.value.?.cast(Node.Map).?;
+ try testing.expectEqual(map.start.?, 0);
+ try testing.expectEqual(map.end.?, tree.tokens.len - 2);
+ try testing.expectEqual(map.values.items.len, 1);
+
+ const entry = map.values.items[0];
+ const key = tree.tokens[entry.key];
+ try testing.expectEqual(key.id, .Literal);
+ try testing.expect(mem.eql(u8, "key", tree.source[key.start..key.end]));
+
+ const list = entry.value.cast(Node.List).?;
+ try testing.expectEqual(list.start.?, 4);
+ try testing.expectEqual(list.end.?, tree.tokens.len - 2);
+ try testing.expectEqual(list.values.items.len, 3);
+
+ {
+ try testing.expectEqual(list.values.items[0].tag, .value);
+ const value = list.values.items[0].cast(Node.Value).?;
+ const leaf = tree.tokens[value.start.?];
+ try testing.expect(mem.eql(u8, "name", tree.source[leaf.start..leaf.end]));
+ }
+
+ {
+ try testing.expectEqual(list.values.items[1].tag, .value);
+ const value = list.values.items[1].cast(Node.Value).?;
+ const leaf = tree.tokens[value.start.?];
+ try testing.expect(mem.eql(u8, "hr", tree.source[leaf.start..leaf.end]));
+ }
+
+ {
+ try testing.expectEqual(list.values.items[2].tag, .value);
+ const value = list.values.items[2].cast(Node.Value).?;
+ const leaf = tree.tokens[value.start.?];
+ try testing.expect(mem.eql(u8, "avg", tree.source[leaf.start..leaf.end]));
+ }
+}
diff --git a/src/link/tapi/yaml.zig b/src/link/tapi/yaml.zig
new file mode 100644
index 0000000000..b58df7609f
--- /dev/null
+++ b/src/link/tapi/yaml.zig
@@ -0,0 +1,651 @@
+const std = @import("std");
+const assert = std.debug.assert;
+const math = std.math;
+const mem = std.mem;
+const testing = std.testing;
+const log = std.log.scoped(.tapi);
+
+const Allocator = mem.Allocator;
+const ArenaAllocator = std.heap.ArenaAllocator;
+
+pub const Tokenizer = @import("Tokenizer.zig");
+pub const parse = @import("parse.zig");
+
+const Node = parse.Node;
+const Tree = parse.Tree;
+const ParseError = parse.ParseError;
+
+pub const YamlError = error{
+ UnexpectedNodeType,
+ OutOfMemory,
+} || ParseError || std.fmt.ParseIntError;
+
+pub const ValueType = enum {
+ empty,
+ int,
+ float,
+ string,
+ list,
+ map,
+};
+
+pub const List = []Value;
+pub const Map = std.StringArrayHashMap(Value);
+
+pub const Value = union(ValueType) {
+ empty,
+ int: i64,
+ float: f64,
+ string: []const u8,
+ list: List,
+ map: Map,
+
+ pub fn asInt(self: Value) !i64 {
+ if (self != .int) return error.TypeMismatch;
+ return self.int;
+ }
+
+ pub fn asFloat(self: Value) !f64 {
+ if (self != .float) return error.TypeMismatch;
+ return self.float;
+ }
+
+ pub fn asString(self: Value) ![]const u8 {
+ if (self != .string) return error.TypeMismatch;
+ return self.string;
+ }
+
+ pub fn asList(self: Value) !List {
+ if (self != .list) return error.TypeMismatch;
+ return self.list;
+ }
+
+ pub fn asMap(self: Value) !Map {
+ if (self != .map) return error.TypeMismatch;
+ return self.map;
+ }
+
+ const StringifyArgs = struct {
+ indentation: usize = 0,
+ should_inline_first_key: bool = false,
+ };
+
+ pub const StringifyError = std.os.WriteError;
+
+ pub fn stringify(self: Value, writer: anytype, args: StringifyArgs) StringifyError!void {
+ switch (self) {
+ .empty => return,
+ .int => |int| return writer.print("{}", .{int}),
+ .float => |float| return writer.print("{d}", .{float}),
+ .string => |string| return writer.print("{s}", .{string}),
+ .list => |list| {
+ const len = list.len;
+ if (len == 0) return;
+
+ const first = list[0];
+ if (first.is_compound()) {
+ for (list) |elem, i| {
+ try writer.writeByteNTimes(' ', args.indentation);
+ try writer.writeAll("- ");
+ try elem.stringify(writer, .{
+ .indentation = args.indentation + 2,
+ .should_inline_first_key = true,
+ });
+ if (i < len - 1) {
+ try writer.writeByte('\n');
+ }
+ }
+ return;
+ }
+
+ try writer.writeAll("[ ");
+ for (list) |elem, i| {
+ try elem.stringify(writer, args);
+ if (i < len - 1) {
+ try writer.writeAll(", ");
+ }
+ }
+ try writer.writeAll(" ]");
+ },
+ .map => |map| {
+ const keys = map.keys();
+ const len = keys.len;
+ if (len == 0) return;
+
+ for (keys) |key, i| {
+ if (!args.should_inline_first_key or i != 0) {
+ try writer.writeByteNTimes(' ', args.indentation);
+ }
+ try writer.print("{s}: ", .{key});
+
+ const value = map.get(key) orelse unreachable;
+ const should_inline = blk: {
+ if (!value.is_compound()) break :blk true;
+ if (value == .list and value.list.len > 0 and !value.list[0].is_compound()) break :blk true;
+ break :blk false;
+ };
+
+ if (should_inline) {
+ try value.stringify(writer, args);
+ } else {
+ try writer.writeByte('\n');
+ try value.stringify(writer, .{
+ .indentation = args.indentation + 4,
+ });
+ }
+
+ if (i < len - 1) {
+ try writer.writeByte('\n');
+ }
+ }
+ },
+ }
+ }
+
+ fn is_compound(self: Value) bool {
+ return switch (self) {
+ .list, .map => true,
+ else => false,
+ };
+ }
+
+ fn fromNode(arena: *Allocator, tree: *const Tree, node: *const Node, type_hint: ?ValueType) YamlError!Value {
+ if (node.cast(Node.Doc)) |doc| {
+ const inner = doc.value orelse {
+ // empty doc
+ return Value{ .empty = .{} };
+ };
+ return Value.fromNode(arena, tree, inner, null);
+ } else if (node.cast(Node.Map)) |map| {
+ var out_map = std.StringArrayHashMap(Value).init(arena);
+ try out_map.ensureUnusedCapacity(map.values.items.len);
+
+ for (map.values.items) |entry| {
+ const key_tok = tree.tokens[entry.key];
+ const key = try arena.dupe(u8, tree.source[key_tok.start..key_tok.end]);
+ const value = try Value.fromNode(arena, tree, entry.value, null);
+
+ out_map.putAssumeCapacityNoClobber(key, value);
+ }
+
+ return Value{ .map = out_map };
+ } else if (node.cast(Node.List)) |list| {
+ var out_list = std.ArrayList(Value).init(arena);
+ try out_list.ensureUnusedCapacity(list.values.items.len);
+
+ if (list.values.items.len > 0) {
+ const hint = if (list.values.items[0].cast(Node.Value)) |value| hint: {
+ const start = tree.tokens[value.start.?];
+ const end = tree.tokens[value.end.?];
+ const raw = tree.source[start.start..end.end];
+ _ = std.fmt.parseInt(i64, raw, 10) catch {
+ _ = std.fmt.parseFloat(f64, raw) catch {
+ break :hint ValueType.string;
+ };
+ break :hint ValueType.float;
+ };
+ break :hint ValueType.int;
+ } else null;
+
+ for (list.values.items) |elem| {
+ const value = try Value.fromNode(arena, tree, elem, hint);
+ out_list.appendAssumeCapacity(value);
+ }
+ }
+
+ return Value{ .list = out_list.toOwnedSlice() };
+ } else if (node.cast(Node.Value)) |value| {
+ const start = tree.tokens[value.start.?];
+ const end = tree.tokens[value.end.?];
+ const raw = tree.source[start.start..end.end];
+
+ if (type_hint) |hint| {
+ return switch (hint) {
+ .int => Value{ .int = try std.fmt.parseInt(i64, raw, 10) },
+ .float => Value{ .float = try std.fmt.parseFloat(f64, raw) },
+ .string => Value{ .string = try arena.dupe(u8, raw) },
+ else => unreachable,
+ };
+ }
+
+ try_int: {
+ // TODO infer base for int
+ const int = std.fmt.parseInt(i64, raw, 10) catch break :try_int;
+ return Value{ .int = int };
+ }
+ try_float: {
+ const float = std.fmt.parseFloat(f64, raw) catch break :try_float;
+ return Value{ .float = float };
+ }
+ return Value{ .string = try arena.dupe(u8, raw) };
+ } else {
+ log.err("Unexpected node type: {}", .{node.tag});
+ return error.UnexpectedNodeType;
+ }
+ }
+};
+
+pub const Yaml = struct {
+ arena: ArenaAllocator,
+ tree: ?Tree = null,
+ docs: std.ArrayList(Value),
+
+ pub fn deinit(self: *Yaml) void {
+ self.arena.deinit();
+ }
+
+ pub fn stringify(self: Yaml, writer: anytype) !void {
+ for (self.docs.items) |doc| {
+ // if (doc.directive) |directive| {
+ // try writer.print("--- !{s}\n", .{directive});
+ // }
+ try doc.stringify(writer, .{});
+ // if (doc.directive != null) {
+ // try writer.writeAll("...\n");
+ // }
+ }
+ }
+
+ pub fn load(allocator: *Allocator, source: []const u8) !Yaml {
+ var arena = ArenaAllocator.init(allocator);
+
+ var tree = Tree.init(&arena.allocator);
+ try tree.parse(source);
+
+ var docs = std.ArrayList(Value).init(&arena.allocator);
+ try docs.ensureUnusedCapacity(tree.docs.items.len);
+
+ for (tree.docs.items) |node| {
+ const value = try Value.fromNode(&arena.allocator, &tree, node, null);
+ docs.appendAssumeCapacity(value);
+ }
+
+ return Yaml{
+ .arena = arena,
+ .tree = tree,
+ .docs = docs,
+ };
+ }
+
+ pub const Error = error{
+ Unimplemented,
+ TypeMismatch,
+ StructFieldMissing,
+ ArraySizeMismatch,
+ UntaggedUnion,
+ UnionTagMissing,
+ Overflow,
+ OutOfMemory,
+ };
+
+ pub fn parse(self: *Yaml, comptime T: type) Error!T {
+ if (self.docs.items.len == 0) {
+ if (@typeInfo(T) == .Void) return {};
+ return error.TypeMismatch;
+ }
+
+ if (self.docs.items.len == 1) {
+ return self.parseValue(T, self.docs.items[0]);
+ }
+
+ switch (@typeInfo(T)) {
+ .Array => |info| {
+ var parsed: T = undefined;
+ for (self.docs.items) |doc, i| {
+ parsed[i] = try self.parseValue(info.child, doc);
+ }
+ return parsed;
+ },
+ .Pointer => |info| {
+ switch (info.size) {
+ .Slice => {
+ var parsed = try self.arena.allocator.alloc(info.child, self.docs.items.len);
+ for (self.docs.items) |doc, i| {
+ parsed[i] = try self.parseValue(info.child, doc);
+ }
+ return parsed;
+ },
+ else => return error.TypeMismatch,
+ }
+ },
+ .Union => return error.Unimplemented,
+ else => return error.TypeMismatch,
+ }
+ }
+
+ fn parseValue(self: *Yaml, comptime T: type, value: Value) Error!T {
+ return switch (@typeInfo(T)) {
+ .Int => math.cast(T, try value.asInt()),
+ .Float => math.lossyCast(T, try value.asFloat()),
+ .Struct => self.parseStruct(T, try value.asMap()),
+ .Union => self.parseUnion(T, value),
+ .Array => self.parseArray(T, try value.asList()),
+ .Pointer => {
+ if (value.asList()) |list| {
+ return self.parsePointer(T, .{ .list = list });
+ } else |_| {
+ return self.parsePointer(T, .{ .string = try value.asString() });
+ }
+ },
+ .Void => error.TypeMismatch,
+ .Optional => unreachable,
+ else => error.Unimplemented,
+ };
+ }
+
+ fn parseUnion(self: *Yaml, comptime T: type, value: Value) Error!T {
+ const union_info = @typeInfo(T).Union;
+
+ if (union_info.tag_type) |_| {
+ inline for (union_info.fields) |field| {
+ if (self.parseValue(field.field_type, value)) |u_value| {
+ return @unionInit(T, field.name, u_value);
+ } else |err| {
+ if (@as(@TypeOf(err) || error{TypeMismatch}, err) != error.TypeMismatch) return err;
+ }
+ }
+ } else return error.UntaggedUnion;
+
+ return error.UnionTagMissing;
+ }
+
+ fn parseOptional(self: *Yaml, comptime T: type, value: ?Value) Error!T {
+ const unwrapped = value orelse return null;
+ const opt_info = @typeInfo(T).Optional;
+ return @as(T, try self.parseValue(opt_info.child, unwrapped));
+ }
+
+ fn parseStruct(self: *Yaml, comptime T: type, map: Map) Error!T {
+ const struct_info = @typeInfo(T).Struct;
+ var parsed: T = undefined;
+
+ inline for (struct_info.fields) |field| {
+ const value: ?Value = map.get(field.name) orelse blk: {
+ const field_name = try mem.replaceOwned(u8, &self.arena.allocator, field.name, "_", "-");
+ break :blk map.get(field_name);
+ };
+
+ if (@typeInfo(field.field_type) == .Optional) {
+ @field(parsed, field.name) = try self.parseOptional(field.field_type, value);
+ continue;
+ }
+
+ const unwrapped = value orelse {
+ log.err("missing struct field: {s}: {s}", .{ field.name, @typeName(field.field_type) });
+ return error.StructFieldMissing;
+ };
+ @field(parsed, field.name) = try self.parseValue(field.field_type, unwrapped);
+ }
+
+ return parsed;
+ }
+
+ fn parsePointer(self: *Yaml, comptime T: type, value: Value) Error!T {
+ const ptr_info = @typeInfo(T).Pointer;
+ const arena = &self.arena.allocator;
+
+ switch (ptr_info.size) {
+ .Slice => {
+ const child_info = @typeInfo(ptr_info.child);
+ if (child_info == .Int and child_info.Int.bits == 8) {
+ return value.asString();
+ }
+
+ var parsed = try arena.alloc(ptr_info.child, value.list.len);
+ for (value.list) |elem, i| {
+ parsed[i] = try self.parseValue(ptr_info.child, elem);
+ }
+ return parsed;
+ },
+ else => return error.Unimplemented,
+ }
+ }
+
+ fn parseArray(self: *Yaml, comptime T: type, list: List) Error!T {
+ const array_info = @typeInfo(T).Array;
+ if (array_info.len != list.len) return error.ArraySizeMismatch;
+
+ var parsed: T = undefined;
+ for (list) |elem, i| {
+ parsed[i] = try self.parseValue(array_info.child, elem);
+ }
+
+ return parsed;
+ }
+};
+
+test {
+ testing.refAllDecls(@This());
+}
+
+test "simple list" {
+ const source =
+ \\- a
+ \\- b
+ \\- c
+ ;
+
+ var yaml = try Yaml.load(testing.allocator, source);
+ defer yaml.deinit();
+
+ try testing.expectEqual(yaml.docs.items.len, 1);
+
+ const list = yaml.docs.items[0].list;
+ try testing.expectEqual(list.len, 3);
+
+ try testing.expect(mem.eql(u8, list[0].string, "a"));
+ try testing.expect(mem.eql(u8, list[1].string, "b"));
+ try testing.expect(mem.eql(u8, list[2].string, "c"));
+}
+
+test "simple list typed as array of strings" {
+ const source =
+ \\- a
+ \\- b
+ \\- c
+ ;
+
+ var yaml = try Yaml.load(testing.allocator, source);
+ defer yaml.deinit();
+
+ try testing.expectEqual(yaml.docs.items.len, 1);
+
+ const arr = try yaml.parse([3][]const u8);
+ try testing.expectEqual(arr.len, 3);
+ try testing.expect(mem.eql(u8, arr[0], "a"));
+ try testing.expect(mem.eql(u8, arr[1], "b"));
+ try testing.expect(mem.eql(u8, arr[2], "c"));
+}
+
+test "simple list typed as array of ints" {
+ const source =
+ \\- 0
+ \\- 1
+ \\- 2
+ ;
+
+ var yaml = try Yaml.load(testing.allocator, source);
+ defer yaml.deinit();
+
+ try testing.expectEqual(yaml.docs.items.len, 1);
+
+ const arr = try yaml.parse([3]u8);
+ try testing.expectEqual(arr.len, 3);
+ try testing.expectEqual(arr[0], 0);
+ try testing.expectEqual(arr[1], 1);
+ try testing.expectEqual(arr[2], 2);
+}
+
+test "list of mixed sign integer" {
+ const source =
+ \\- 0
+ \\- -1
+ \\- 2
+ ;
+
+ var yaml = try Yaml.load(testing.allocator, source);
+ defer yaml.deinit();
+
+ try testing.expectEqual(yaml.docs.items.len, 1);
+
+ const arr = try yaml.parse([3]i8);
+ try testing.expectEqual(arr.len, 3);
+ try testing.expectEqual(arr[0], 0);
+ try testing.expectEqual(arr[1], -1);
+ try testing.expectEqual(arr[2], 2);
+}
+
+test "simple map untyped" {
+ const source =
+ \\a: 0
+ ;
+
+ var yaml = try Yaml.load(testing.allocator, source);
+ defer yaml.deinit();
+
+ try testing.expectEqual(yaml.docs.items.len, 1);
+
+ const map = yaml.docs.items[0].map;
+ try testing.expect(map.contains("a"));
+ try testing.expectEqual(map.get("a").?.int, 0);
+}
+
+test "simple map typed" {
+ const source =
+ \\a: 0
+ \\b: hello there
+ \\c: 'wait, what?'
+ ;
+
+ var yaml = try Yaml.load(testing.allocator, source);
+ defer yaml.deinit();
+
+ const simple = try yaml.parse(struct { a: usize, b: []const u8, c: []const u8 });
+ try testing.expectEqual(simple.a, 0);
+ try testing.expect(mem.eql(u8, simple.b, "hello there"));
+ try testing.expect(mem.eql(u8, simple.c, "wait, what?"));
+}
+
+test "typed nested structs" {
+ const source =
+ \\a:
+ \\ b: hello there
+ \\ c: 'wait, what?'
+ ;
+
+ var yaml = try Yaml.load(testing.allocator, source);
+ defer yaml.deinit();
+
+ const simple = try yaml.parse(struct {
+ a: struct {
+ b: []const u8,
+ c: []const u8,
+ },
+ });
+ try testing.expect(mem.eql(u8, simple.a.b, "hello there"));
+ try testing.expect(mem.eql(u8, simple.a.c, "wait, what?"));
+}
+
+test "multidoc typed as a slice of structs" {
+ const source =
+ \\---
+ \\a: 0
+ \\---
+ \\a: 1
+ \\...
+ ;
+
+ var yaml = try Yaml.load(testing.allocator, source);
+ defer yaml.deinit();
+
+ {
+ const result = try yaml.parse([2]struct { a: usize });
+ try testing.expectEqual(result.len, 2);
+ try testing.expectEqual(result[0].a, 0);
+ try testing.expectEqual(result[1].a, 1);
+ }
+
+ {
+ const result = try yaml.parse([]struct { a: usize });
+ try testing.expectEqual(result.len, 2);
+ try testing.expectEqual(result[0].a, 0);
+ try testing.expectEqual(result[1].a, 1);
+ }
+}
+
+test "multidoc typed as a struct is an error" {
+ const source =
+ \\---
+ \\a: 0
+ \\---
+ \\b: 1
+ \\...
+ ;
+
+ var yaml = try Yaml.load(testing.allocator, source);
+ defer yaml.deinit();
+
+ try testing.expectError(Yaml.Error.TypeMismatch, yaml.parse(struct { a: usize }));
+ try testing.expectError(Yaml.Error.TypeMismatch, yaml.parse(struct { b: usize }));
+ try testing.expectError(Yaml.Error.TypeMismatch, yaml.parse(struct { a: usize, b: usize }));
+}
+
+test "multidoc typed as a slice of structs with optionals" {
+ const source =
+ \\---
+ \\a: 0
+ \\c: 1.0
+ \\---
+ \\a: 1
+ \\b: different field
+ \\...
+ ;
+
+ var yaml = try Yaml.load(testing.allocator, source);
+ defer yaml.deinit();
+
+ const result = try yaml.parse([]struct { a: usize, b: ?[]const u8, c: ?f16 });
+ try testing.expectEqual(result.len, 2);
+
+ try testing.expectEqual(result[0].a, 0);
+ try testing.expect(result[0].b == null);
+ try testing.expect(result[0].c != null);
+ try testing.expectEqual(result[0].c.?, 1.0);
+
+ try testing.expectEqual(result[1].a, 1);
+ try testing.expect(result[1].b != null);
+ try testing.expect(mem.eql(u8, result[1].b.?, "different field"));
+ try testing.expect(result[1].c == null);
+}
+
+test "empty yaml can be represented as void" {
+ const source = "";
+ var yaml = try Yaml.load(testing.allocator, source);
+ defer yaml.deinit();
+ const result = try yaml.parse(void);
+ try testing.expect(@TypeOf(result) == void);
+}
+
+test "nonempty yaml cannot be represented as void" {
+ const source =
+ \\a: b
+ ;
+
+ var yaml = try Yaml.load(testing.allocator, source);
+ defer yaml.deinit();
+
+ try testing.expectError(Yaml.Error.TypeMismatch, yaml.parse(void));
+}
+
+test "typed array size mismatch" {
+ const source =
+ \\- 0
+ \\- 0
+ ;
+
+ var yaml = try Yaml.load(testing.allocator, source);
+ defer yaml.deinit();
+
+ try testing.expectError(Yaml.Error.ArraySizeMismatch, yaml.parse([1]usize));
+ try testing.expectError(Yaml.Error.ArraySizeMismatch, yaml.parse([5]usize));
+}
diff --git a/src/main.zig b/src/main.zig
index c592653e24..86995c5b9f 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -290,6 +290,7 @@ const usage_build_generic =
\\ .c C source code (requires LLVM extensions)
\\ .cpp C++ source code (requires LLVM extensions)
\\ Other C++ extensions: .C .cc .cxx
+ \\ .m Objective-C source code (requires LLVM extensions)
\\
\\General Options:
\\ -h, --help Print this help and exit
@@ -378,6 +379,7 @@ const usage_build_generic =
\\ -T[script], --script [script] Use a custom linker script
\\ --version-script [path] Provide a version .map file
\\ --dynamic-linker [path] Set the dynamic interpreter path (usually ld.so)
+ \\ --sysroot [path] Set the system root directory (usually /)
\\ --version [ver] Dynamic library semver
\\ -fsoname[=name] (Linux) Override the default SONAME value
\\ -fno-soname (Linux) Disable emitting a SONAME
@@ -500,7 +502,7 @@ const Emit = union(enum) {
};
fn optionalBoolEnvVar(arena: *Allocator, name: []const u8) !bool {
- if (std.process.getEnvVarOwned(arena, name)) |value| {
+ if (std.process.getEnvVarOwned(arena, name)) |_| {
return true;
} else |err| switch (err) {
error.EnvironmentVariableNotFound => return false,
@@ -603,6 +605,7 @@ fn buildOutputType(
var link_eh_frame_hdr = false;
var link_emit_relocs = false;
var each_lib_rpath: ?bool = null;
+ var sysroot: ?[]const u8 = null;
var libc_paths_file: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LIBC");
var machine_code_model: std.builtin.CodeModel = .default;
var runtime_args_start: ?usize = null;
@@ -864,6 +867,10 @@ fn buildOutputType(
if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg});
i += 1;
target_dynamic_linker = args[i];
+ } else if (mem.eql(u8, arg, "--sysroot")) {
+ if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg});
+ i += 1;
+ sysroot = args[i];
} else if (mem.eql(u8, arg, "--libc")) {
if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg});
i += 1;
@@ -1072,7 +1079,7 @@ fn buildOutputType(
.object, .static_library, .shared_library => {
try link_objects.append(arg);
},
- .assembly, .c, .cpp, .h, .ll, .bc => {
+ .assembly, .c, .cpp, .h, .ll, .bc, .m => {
try c_source_files.append(.{
.src_path = arg,
.extra_flags = try arena.dupe([]const u8, extra_cflags.items),
@@ -1135,7 +1142,7 @@ fn buildOutputType(
.positional => {
const file_ext = Compilation.classifyFileExt(mem.spanZ(it.only_arg));
switch (file_ext) {
- .assembly, .c, .cpp, .ll, .bc, .h => try c_source_files.append(.{ .src_path = it.only_arg }),
+ .assembly, .c, .cpp, .ll, .bc, .h, .m => try c_source_files.append(.{ .src_path = it.only_arg }),
.unknown, .shared_library, .object, .static_library => {
try link_objects.append(it.only_arg);
},
@@ -1640,7 +1647,9 @@ fn buildOutputType(
want_native_include_dirs = true;
}
- if (cross_target.isNativeOs() and (system_libs.items.len != 0 or want_native_include_dirs)) {
+ if (sysroot == null and cross_target.isNativeOs() and
+ (system_libs.items.len != 0 or want_native_include_dirs))
+ {
const paths = std.zig.system.NativePaths.detect(arena, target_info) catch |err| {
fatal("unable to detect native system paths: {s}", .{@errorName(err)});
};
@@ -1917,6 +1926,7 @@ fn buildOutputType(
.is_native_os = cross_target.isNativeOs(),
.is_native_abi = cross_target.isNativeAbi(),
.dynamic_linker = target_info.dynamic_linker.get(),
+ .sysroot = sysroot,
.output_mode = output_mode,
.root_pkg = root_pkg,
.emit_bin = emit_bin_loc,
@@ -2565,6 +2575,7 @@ pub fn cmdInit(
args: []const []const u8,
output_mode: std.builtin.OutputMode,
) !void {
+ _ = gpa;
{
var i: usize = 0;
while (i < args.len) : (i += 1) {
@@ -2960,7 +2971,6 @@ const Fmt = struct {
};
pub fn cmdFmt(gpa: *Allocator, args: []const []const u8) !void {
- const stderr_file = io.getStdErr();
var color: Color = .auto;
var stdin_flag: bool = false;
var check_flag: bool = false;
@@ -3018,9 +3028,52 @@ pub fn cmdFmt(gpa: *Allocator, args: []const []const u8) !void {
defer tree.deinit(gpa);
for (tree.errors) |parse_error| {
- try printErrMsgToFile(gpa, parse_error, tree, "<stdin>", stderr_file, color);
+ try printErrMsgToStdErr(gpa, parse_error, tree, "<stdin>", color);
+ }
+ var has_ast_error = false;
+ if (check_ast_flag) {
+ const Module = @import("Module.zig");
+ const AstGen = @import("AstGen.zig");
+
+ var file: Module.Scope.File = .{
+ .status = .never_loaded,
+ .source_loaded = true,
+ .zir_loaded = false,
+ .sub_file_path = "<stdin>",
+ .source = source_code,
+ .stat_size = undefined,
+ .stat_inode = undefined,
+ .stat_mtime = undefined,
+ .tree = tree,
+ .tree_loaded = true,
+ .zir = undefined,
+ .pkg = undefined,
+ .root_decl = null,
+ };
+
+ file.zir = try AstGen.generate(gpa, file.tree);
+ file.zir_loaded = true;
+ defer file.zir.deinit(gpa);
+
+ if (file.zir.hasCompileErrors()) {
+ var arena_instance = std.heap.ArenaAllocator.init(gpa);
+ defer arena_instance.deinit();
+ var errors = std.ArrayList(Compilation.AllErrors.Message).init(gpa);
+ defer errors.deinit();
+
+ try Compilation.AllErrors.addZir(&arena_instance.allocator, &errors, &file);
+ const ttyconf: std.debug.TTY.Config = switch (color) {
+ .auto => std.debug.detectTTYConfig(),
+ .on => .escape_codes,
+ .off => .no_color,
+ };
+ for (errors.items) |full_err_msg| {
+ full_err_msg.renderToStdErr(ttyconf);
+ }
+ has_ast_error = true;
+ }
}
- if (tree.errors.len != 0) {
+ if (tree.errors.len != 0 or has_ast_error) {
process.exit(1);
}
const formatted = try tree.render(gpa);
@@ -3163,7 +3216,7 @@ fn fmtPathFile(
defer tree.deinit(fmt.gpa);
for (tree.errors) |parse_error| {
- try printErrMsgToFile(fmt.gpa, parse_error, tree, file_path, std.io.getStdErr(), fmt.color);
+ try printErrMsgToStdErr(fmt.gpa, parse_error, tree, file_path, fmt.color);
}
if (tree.errors.len != 0) {
fmt.any_error = true;
@@ -3239,25 +3292,16 @@ fn fmtPathFile(
}
}
-fn printErrMsgToFile(
+fn printErrMsgToStdErr(
gpa: *mem.Allocator,
parse_error: ast.Error,
tree: ast.Tree,
path: []const u8,
- file: fs.File,
color: Color,
) !void {
- const color_on = switch (color) {
- .auto => file.isTty(),
- .on => true,
- .off => false,
- };
const lok_token = parse_error.token;
-
- const token_starts = tree.tokens.items(.start);
- const token_tags = tree.tokens.items(.tag);
- const first_token_start = token_starts[lok_token];
const start_loc = tree.tokenLocation(0, lok_token);
+ const source_line = tree.source[start_loc.line_start..start_loc.line_end];
var text_buf = std.ArrayList(u8).init(gpa);
defer text_buf.deinit();
@@ -3265,26 +3309,24 @@ fn printErrMsgToFile(
try tree.renderError(parse_error, writer);
const text = text_buf.items;
- const stream = file.writer();
- try stream.print("{s}:{d}:{d}: error: {s}\n", .{ path, start_loc.line + 1, start_loc.column + 1, text });
+ const message: Compilation.AllErrors.Message = .{
+ .src = .{
+ .src_path = path,
+ .msg = text,
+ .byte_offset = @intCast(u32, start_loc.line_start),
+ .line = @intCast(u32, start_loc.line),
+ .column = @intCast(u32, start_loc.column),
+ .source_line = source_line,
+ },
+ };
- if (!color_on) return;
+ const ttyconf: std.debug.TTY.Config = switch (color) {
+ .auto => std.debug.detectTTYConfig(),
+ .on => .escape_codes,
+ .off => .no_color,
+ };
- // Print \r and \t as one space each so that column counts line up
- for (tree.source[start_loc.line_start..start_loc.line_end]) |byte| {
- try stream.writeByte(switch (byte) {
- '\r', '\t' => ' ',
- else => byte,
- });
- }
- try stream.writeByte('\n');
- try stream.writeByteNTimes(' ', start_loc.column);
- if (token_tags[lok_token].lexeme()) |lexeme| {
- try stream.writeByteNTimes('~', lexeme.len);
- try stream.writeByte('\n');
- } else {
- try stream.writeAll("^\n");
- }
+ message.renderToStdErr(ttyconf);
}
pub const info_zen =
@@ -3718,7 +3760,6 @@ pub fn cmdAstCheck(
var color: Color = .auto;
var want_output_text = false;
- var have_zig_source_file = false;
var zig_source_file: ?[]const u8 = null;
var i: usize = 0;
@@ -3800,7 +3841,7 @@ pub fn cmdAstCheck(
defer file.tree.deinit(gpa);
for (file.tree.errors) |parse_error| {
- try printErrMsgToFile(gpa, parse_error, file.tree, file.sub_file_path, io.getStdErr(), color);
+ try printErrMsgToStdErr(gpa, parse_error, file.tree, file.sub_file_path, color);
}
if (file.tree.errors.len != 0) {
process.exit(1);
@@ -3921,7 +3962,7 @@ pub fn cmdChangelist(
defer file.tree.deinit(gpa);
for (file.tree.errors) |parse_error| {
- try printErrMsgToFile(gpa, parse_error, file.tree, old_source_file, io.getStdErr(), .auto);
+ try printErrMsgToStdErr(gpa, parse_error, file.tree, old_source_file, .auto);
}
if (file.tree.errors.len != 0) {
process.exit(1);
@@ -3958,7 +3999,7 @@ pub fn cmdChangelist(
defer new_tree.deinit(gpa);
for (new_tree.errors) |parse_error| {
- try printErrMsgToFile(gpa, parse_error, new_tree, new_source_file, io.getStdErr(), .auto);
+ try printErrMsgToStdErr(gpa, parse_error, new_tree, new_source_file, .auto);
}
if (new_tree.errors.len != 0) {
process.exit(1);
diff --git a/src/mingw.zig b/src/mingw.zig
index ca887dd940..42d1ac47db 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -372,11 +372,9 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
try child.spawn();
- const stdout_reader = child.stdout.?.reader();
const stderr_reader = child.stderr.?.reader();
// TODO https://github.com/ziglang/zig/issues/6343
- const stdout = try stdout_reader.readAllAlloc(arena, std.math.maxInt(u32));
const stderr = try stderr_reader.readAllAlloc(arena, 10 * 1024 * 1024);
const term = child.wait() catch |err| {
diff --git a/src/musl.zig b/src/musl.zig
index 4f641a4c7c..fcdc503ccc 100644
--- a/src/musl.zig
+++ b/src/musl.zig
@@ -143,7 +143,6 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
const dirname = path.dirname(src_file).?;
const basename = path.basename(src_file);
const noextbasename = basename[0 .. basename.len - std.fs.path.extension(basename).len];
- const before_arch_dir = path.dirname(dirname).?;
const dirbasename = path.basename(dirname);
var is_arch_specific = false;
diff --git a/src/print_env.zig b/src/print_env.zig
index d62e1f62fd..8c44e85e65 100644
--- a/src/print_env.zig
+++ b/src/print_env.zig
@@ -5,6 +5,7 @@ const Allocator = std.mem.Allocator;
const fatal = @import("main.zig").fatal;
pub fn cmdEnv(gpa: *Allocator, args: []const []const u8, stdout: std.fs.File.Writer) !void {
+ _ = args;
const self_exe_path = try std.fs.selfExePathAlloc(gpa);
defer gpa.free(self_exe_path);
diff --git a/src/print_targets.zig b/src/print_targets.zig
index e24a2294a1..d0a1d5167a 100644
--- a/src/print_targets.zig
+++ b/src/print_targets.zig
@@ -17,6 +17,7 @@ pub fn cmdTargets(
stdout: anytype,
native_target: Target,
) !void {
+ _ = args;
var zig_lib_directory = introspect.findZigLibDir(allocator) catch |err| {
fatal("unable to find zig installation directory: {s}\n", .{@errorName(err)});
};
diff --git a/src/register_manager.zig b/src/register_manager.zig
index 9f47526f13..96cf4f17b7 100644
--- a/src/register_manager.zig
+++ b/src/register_manager.zig
@@ -265,6 +265,8 @@ fn MockFunction(comptime Register: type) type {
}
pub fn spillInstruction(self: *Self, src: LazySrcLoc, reg: Register, inst: *ir.Inst) !void {
+ _ = src;
+ _ = inst;
try self.spilled.append(self.allocator, reg);
}
};
@@ -281,12 +283,6 @@ test "default state" {
};
defer function.deinit();
- var mock_instruction = ir.Inst{
- .tag = .breakpoint,
- .ty = Type.initTag(.void),
- .src = .unneeded,
- };
-
try expect(!function.register_manager.isRegAllocated(.r2));
try expect(!function.register_manager.isRegAllocated(.r3));
try expect(function.register_manager.isRegFree(.r2));
@@ -365,12 +361,6 @@ test "tryAllocRegs" {
};
defer function.deinit();
- var mock_instruction = ir.Inst{
- .tag = .breakpoint,
- .ty = Type.initTag(.void),
- .src = .unneeded,
- };
-
try expectEqual([_]MockRegister2{ .r0, .r1, .r2 }, function.register_manager.tryAllocRegs(3, .{ null, null, null }, &.{}).?);
// Exceptions
diff --git a/src/stage1.zig b/src/stage1.zig
index 91e7cd8bed..46d70ea335 100644
--- a/src/stage1.zig
+++ b/src/stage1.zig
@@ -407,6 +407,8 @@ export fn stage2_add_link_lib(
symbol_name_ptr: [*c]const u8,
symbol_name_len: usize,
) ?[*:0]const u8 {
+ _ = symbol_name_len;
+ _ = symbol_name_ptr;
const comp = @intToPtr(*Compilation, stage1.userdata);
const lib_name = std.ascii.allocLowerString(comp.gpa, lib_name_ptr[0..lib_name_len]) catch return "out of memory";
const target = comp.getTarget();
diff --git a/src/stage1/analyze.cpp b/src/stage1/analyze.cpp
index f83849d13b..3fb0cb55b7 100644
--- a/src/stage1/analyze.cpp
+++ b/src/stage1/analyze.cpp
@@ -6291,6 +6291,11 @@ ZigValue *create_const_str_lit(CodeGen *g, Buf *str) {
return const_val;
}
+ZigValue *create_sentineled_str_lit(CodeGen *g, Buf *str, ZigValue *sentinel) {
+ ZigValue *array_val = create_const_str_lit(g, str)->data.x_ptr.data.ref.pointee;
+ return create_const_slice(g, array_val, 0, buf_len(str), true, sentinel);
+}
+
void init_const_bigint(ZigValue *const_val, ZigType *type, const BigInt *bigint) {
const_val->special = ConstValSpecialStatic;
const_val->type = type;
@@ -6444,12 +6449,12 @@ ZigValue *create_const_type(CodeGen *g, ZigType *type_value) {
}
void init_const_slice(CodeGen *g, ZigValue *const_val, ZigValue *array_val,
- size_t start, size_t len, bool is_const)
+ size_t start, size_t len, bool is_const, ZigValue *sentinel)
{
assert(array_val->type->id == ZigTypeIdArray);
- ZigType *ptr_type = get_pointer_to_type_extra(g, array_val->type->data.array.child_type,
- is_const, false, PtrLenUnknown, 0, 0, 0, false);
+ ZigType *ptr_type = get_pointer_to_type_extra2(g, array_val->type->data.array.child_type,
+ is_const, false, PtrLenUnknown, 0, 0, 0, false, VECTOR_INDEX_NONE, nullptr, sentinel);
const_val->special = ConstValSpecialStatic;
const_val->type = get_slice_type(g, ptr_type);
@@ -6460,9 +6465,9 @@ void init_const_slice(CodeGen *g, ZigValue *const_val, ZigValue *array_val,
init_const_usize(g, const_val->data.x_struct.fields[slice_len_index], len);
}
-ZigValue *create_const_slice(CodeGen *g, ZigValue *array_val, size_t start, size_t len, bool is_const) {
+ZigValue *create_const_slice(CodeGen *g, ZigValue *array_val, size_t start, size_t len, bool is_const, ZigValue *sentinel) {
ZigValue *const_val = g->pass1_arena->create<ZigValue>();
- init_const_slice(g, const_val, array_val, start, len, is_const);
+ init_const_slice(g, const_val, array_val, start, len, is_const, sentinel);
return const_val;
}
diff --git a/src/stage1/analyze.hpp b/src/stage1/analyze.hpp
index fee83ab7d0..5c232bf4b6 100644
--- a/src/stage1/analyze.hpp
+++ b/src/stage1/analyze.hpp
@@ -144,6 +144,7 @@ ScopeExpr *create_expr_scope(CodeGen *g, AstNode *node, Scope *parent);
void init_const_str_lit(CodeGen *g, ZigValue *const_val, Buf *str, bool move_str);
ZigValue *create_const_str_lit(CodeGen *g, Buf *str);
+ZigValue *create_sentineled_str_lit(CodeGen *g, Buf *str, ZigValue *sentinel);
void init_const_bigint(ZigValue *const_val, ZigType *type, const BigInt *bigint);
ZigValue *create_const_bigint(CodeGen *g, ZigType *type, const BigInt *bigint);
@@ -186,8 +187,8 @@ ZigValue *create_const_ptr_array(CodeGen *g, ZigValue *array_val, size_t elem_in
bool is_const, PtrLen ptr_len);
void init_const_slice(CodeGen *g, ZigValue *const_val, ZigValue *array_val,
- size_t start, size_t len, bool is_const);
-ZigValue *create_const_slice(CodeGen *g, ZigValue *array_val, size_t start, size_t len, bool is_const);
+ size_t start, size_t len, bool is_const, ZigValue *sentinel);
+ZigValue *create_const_slice(CodeGen *g, ZigValue *array_val, size_t start, size_t len, bool is_const, ZigValue *sentinel);
void init_const_null(ZigValue *const_val, ZigType *type);
ZigValue *create_const_null(CodeGen *g, ZigType *type);
diff --git a/src/stage1/astgen.cpp b/src/stage1/astgen.cpp
index 118b920f58..b69cd480c6 100644
--- a/src/stage1/astgen.cpp
+++ b/src/stage1/astgen.cpp
@@ -4599,8 +4599,11 @@ static IrInstSrc *astgen_builtin_fn_call(Stage1AstGen *ag, Scope *scope, AstNode
}
case BuiltinFnIdShuffle:
{
+ // Used for the type expr and the mask expr
+ Scope *comptime_scope = create_comptime_scope(ag->codegen, node, scope);
+
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
- IrInstSrc *arg0_value = astgen_node(ag, arg0_node, scope);
+ IrInstSrc *arg0_value = astgen_node(ag, arg0_node, comptime_scope);
if (arg0_value == ag->codegen->invalid_inst_src)
return arg0_value;
@@ -4615,7 +4618,7 @@ static IrInstSrc *astgen_builtin_fn_call(Stage1AstGen *ag, Scope *scope, AstNode
return arg2_value;
AstNode *arg3_node = node->data.fn_call_expr.params.at(3);
- IrInstSrc *arg3_value = astgen_node(ag, arg3_node, scope);
+ IrInstSrc *arg3_value = astgen_node(ag, arg3_node, comptime_scope);
if (arg3_value == ag->codegen->invalid_inst_src)
return arg3_value;
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index d0ec853f06..9193391a36 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -1022,7 +1022,7 @@ static LLVMValueRef get_panic_msg_ptr_val(CodeGen *g, PanicMsgId msg_id) {
Buf *buf_msg = panic_msg_buf(msg_id);
ZigValue *array_val = create_const_str_lit(g, buf_msg)->data.x_ptr.data.ref.pointee;
- init_const_slice(g, val, array_val, 0, buf_len(buf_msg), true);
+ init_const_slice(g, val, array_val, 0, buf_len(buf_msg), true, nullptr);
render_const_val(g, val, "");
render_const_val_global(g, val, "");
@@ -9289,6 +9289,10 @@ static void init(CodeGen *g) {
ZigLLVMSetModulePIELevel(g->module);
}
+ if (g->code_model != CodeModelDefault) {
+ ZigLLVMSetModuleCodeModel(g->module, to_llvm_code_model(g));
+ }
+
const char *target_specific_cpu_args = "";
const char *target_specific_features = "";
@@ -9424,7 +9428,7 @@ static void update_test_functions_builtin_decl(CodeGen *g) {
ZigValue *name_field = this_val->data.x_struct.fields[0];
ZigValue *name_array_val = create_const_str_lit(g, &test_fn_entry->symbol_name)->data.x_ptr.data.ref.pointee;
- init_const_slice(g, name_field, name_array_val, 0, buf_len(&test_fn_entry->symbol_name), true);
+ init_const_slice(g, name_field, name_array_val, 0, buf_len(&test_fn_entry->symbol_name), true, nullptr);
ZigValue *fn_field = this_val->data.x_struct.fields[1];
fn_field->type = fn_type;
@@ -9448,7 +9452,7 @@ static void update_test_functions_builtin_decl(CodeGen *g) {
}
report_errors_and_maybe_exit(g);
- ZigValue *test_fn_slice = create_const_slice(g, test_fn_array, 0, g->test_fns.length, true);
+ ZigValue *test_fn_slice = create_const_slice(g, test_fn_array, 0, g->test_fns.length, true, nullptr);
update_compile_var(g, buf_create_from_str("test_functions"), test_fn_slice);
assert(g->test_runner_package != nullptr);
diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp
index 053eb87e62..2200e8380d 100644
--- a/src/stage1/ir.cpp
+++ b/src/stage1/ir.cpp
@@ -5190,7 +5190,7 @@ static IrInstGen *ir_resolve_ptr_of_array_to_slice(IrAnalyze *ira, IrInst* sourc
// undef_array->type = array_type;
// IrInstGen *result = ir_const(ira, source_instr, wanted_type);
- // init_const_slice(ira->codegen, result->value, undef_array, 0, 0, false);
+ // init_const_slice(ira->codegen, result->value, undef_array, 0, 0, false, nullptr);
// result->value->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = ConstPtrMutComptimeConst;
// result->value->type = wanted_type;
// return result;
@@ -5217,7 +5217,7 @@ static IrInstGen *ir_resolve_ptr_of_array_to_slice(IrAnalyze *ira, IrInst* sourc
undef_array->type = array_type;
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
- init_const_slice(ira->codegen, result->value, undef_array, 0, 0, false);
+ init_const_slice(ira->codegen, result->value, undef_array, 0, 0, false, nullptr);
result->value->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = ConstPtrMutComptimeConst;
result->value->type = wanted_type;
return result;
@@ -5230,7 +5230,7 @@ static IrInstGen *ir_resolve_ptr_of_array_to_slice(IrAnalyze *ira, IrInst* sourc
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
init_const_slice(ira->codegen, result->value, array_val,
array_ptr_val->data.x_ptr.data.base_array.elem_index,
- array_type->data.array.len, wanted_const);
+ array_type->data.array.len, wanted_const, nullptr);
result->value->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = array_ptr_val->data.x_ptr.mut;
result->value->type = wanted_type;
return result;
@@ -5243,7 +5243,7 @@ static IrInstGen *ir_resolve_ptr_of_array_to_slice(IrAnalyze *ira, IrInst* sourc
assert(array_ptr_val->type->id == ZigTypeIdPointer);
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
- init_const_slice(ira->codegen, result->value, pointee, 0, array_type->data.array.len, wanted_const);
+ init_const_slice(ira->codegen, result->value, pointee, 0, array_type->data.array.len, wanted_const, nullptr);
result->value->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = array_ptr_val->data.x_ptr.mut;
result->value->type = wanted_type;
return result;
@@ -14449,7 +14449,7 @@ static IrInstGen *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstSrcElemP
}
init_const_slice(ira->codegen, array_ptr_val, array_init_val, 0, actual_array_type->data.array.len,
- false);
+ false, nullptr);
array_ptr_val->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = ConstPtrMutInfer;
} else {
ir_add_error_node(ira, elem_ptr_instruction->init_array_type_source_node,
@@ -16864,26 +16864,27 @@ static IrInstGen *ir_analyze_instruction_err_name(IrAnalyze *ira, IrInstSrcErrNa
if (type_is_invalid(casted_value->value->type))
return ira->codegen->invalid_inst_gen;
- ZigType *u8_ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
- true, false, PtrLenUnknown, 0, 0, 0, false);
- ZigType *str_type = get_slice_type(ira->codegen, u8_ptr_type);
if (instr_is_comptime(casted_value)) {
ZigValue *val = ir_resolve_const(ira, casted_value, UndefBad);
if (val == nullptr)
return ira->codegen->invalid_inst_gen;
ErrorTableEntry *err = casted_value->value->data.x_err_set;
if (!err->cached_error_name_val) {
- ZigValue *array_val = create_const_str_lit(ira->codegen, &err->name)->data.x_ptr.data.ref.pointee;
- err->cached_error_name_val = create_const_slice(ira->codegen, array_val, 0, buf_len(&err->name), true);
+ err->cached_error_name_val = create_sentineled_str_lit(
+ ira->codegen, &err->name,
+ ira->codegen->intern.for_zero_byte());
}
IrInstGen *result = ir_const(ira, &instruction->base.base, nullptr);
- copy_const_val(ira->codegen, result->value, err->cached_error_name_val);
- result->value->type = str_type;
+ result->value = err->cached_error_name_val;
return result;
}
ira->codegen->generate_error_name_table = true;
+ ZigType *u8_ptr_type = get_pointer_to_type_extra2(ira->codegen, ira->codegen->builtin_types.entry_u8,
+ true, false, PtrLenUnknown, 0, 0, 0, false,
+ VECTOR_INDEX_NONE, nullptr, ira->codegen->intern.for_zero_byte());
+ ZigType *str_type = get_slice_type(ira->codegen, u8_ptr_type);
return ir_build_err_name_gen(ira, &instruction->base.base, value, str_type);
}
@@ -16898,8 +16899,9 @@ static IrInstGen *ir_analyze_instruction_enum_tag_name(IrAnalyze *ira, IrInstSrc
if (target_type->id == ZigTypeIdEnumLiteral) {
IrInstGen *result = ir_const(ira, &instruction->base.base, nullptr);
Buf *field_name = target->value->data.x_enum_literal;
- ZigValue *array_val = create_const_str_lit(ira->codegen, field_name)->data.x_ptr.data.ref.pointee;
- init_const_slice(ira->codegen, result->value, array_val, 0, buf_len(field_name), true);
+ result->value = create_sentineled_str_lit(
+ ira->codegen, field_name,
+ ira->codegen->intern.for_zero_byte());
return result;
}
@@ -16918,9 +16920,10 @@ static IrInstGen *ir_analyze_instruction_enum_tag_name(IrAnalyze *ira, IrInstSrc
if (can_fold_enum_type(target_type)) {
TypeEnumField *only_field = &target_type->data.enumeration.fields[0];
- ZigValue *array_val = create_const_str_lit(ira->codegen, only_field->name)->data.x_ptr.data.ref.pointee;
IrInstGen *result = ir_const(ira, &instruction->base.base, nullptr);
- init_const_slice(ira->codegen, result->value, array_val, 0, buf_len(only_field->name), true);
+ result->value = create_sentineled_str_lit(
+ ira->codegen, only_field->name,
+ ira->codegen->intern.for_zero_byte());
return result;
}
@@ -16936,16 +16939,17 @@ static IrInstGen *ir_analyze_instruction_enum_tag_name(IrAnalyze *ira, IrInstSrc
buf_sprintf("no tag by value %s", buf_ptr(int_buf)));
return ira->codegen->invalid_inst_gen;
}
- ZigValue *array_val = create_const_str_lit(ira->codegen, field->name)->data.x_ptr.data.ref.pointee;
IrInstGen *result = ir_const(ira, &instruction->base.base, nullptr);
- init_const_slice(ira->codegen, result->value, array_val, 0, buf_len(field->name), true);
+ result->value = create_sentineled_str_lit(
+ ira->codegen, field->name,
+ ira->codegen->intern.for_zero_byte());
return result;
}
- ZigType *u8_ptr_type = get_pointer_to_type_extra(
+ ZigType *u8_ptr_type = get_pointer_to_type_extra2(
ira->codegen, ira->codegen->builtin_types.entry_u8,
- true, false, PtrLenUnknown,
- 0, 0, 0, false);
+ true, false, PtrLenUnknown, 0, 0, 0, false,
+ VECTOR_INDEX_NONE, nullptr, ira->codegen->intern.for_zero_byte());
ZigType *result_type = get_slice_type(ira->codegen, u8_ptr_type);
return ir_build_tag_name_gen(ira, &instruction->base.base, target, result_type);
}
@@ -17249,7 +17253,7 @@ static Error ir_make_type_info_decls(IrAnalyze *ira, IrInst* source_instr, ZigVa
declaration_array->type = get_array_type(ira->codegen, type_info_declaration_type, declaration_count, nullptr);
declaration_array->data.x_array.special = ConstArraySpecialNone;
declaration_array->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(declaration_count);
- init_const_slice(ira->codegen, out_val, declaration_array, 0, declaration_count, false);
+ init_const_slice(ira->codegen, out_val, declaration_array, 0, declaration_count, false, nullptr);
// Loop through the declarations and generate info.
decl_it = decls_scope->decl_table.entry_iterator();
@@ -17272,7 +17276,7 @@ static Error ir_make_type_info_decls(IrAnalyze *ira, IrInst* source_instr, ZigVa
ZigValue **inner_fields = alloc_const_vals_ptrs(ira->codegen, 3);
ZigValue *name = create_const_str_lit(ira->codegen, curr_entry->key)->data.x_ptr.data.ref.pointee;
- init_const_slice(ira->codegen, inner_fields[0], name, 0, buf_len(curr_entry->key), true);
+ init_const_slice(ira->codegen, inner_fields[0], name, 0, buf_len(curr_entry->key), true, nullptr);
inner_fields[1]->special = ConstValSpecialStatic;
inner_fields[1]->type = ira->codegen->builtin_types.entry_bool;
inner_fields[1]->data.x_bool = curr_entry->value->visib_mod == VisibModPub;
@@ -17368,7 +17372,7 @@ static Error ir_make_type_info_decls(IrAnalyze *ira, IrInst* source_instr, ZigVa
if (fn_node->is_extern && fn_node->lib_name != nullptr && buf_len(fn_node->lib_name) > 0) {
ZigValue *slice_val = ira->codegen->pass1_arena->create<ZigValue>();
ZigValue *lib_name = create_const_str_lit(ira->codegen, fn_node->lib_name)->data.x_ptr.data.ref.pointee;
- init_const_slice(ira->codegen, slice_val, lib_name, 0, buf_len(fn_node->lib_name), true);
+ init_const_slice(ira->codegen, slice_val, lib_name, 0, buf_len(fn_node->lib_name), true, nullptr);
set_optional_payload(fn_decl_fields[5], slice_val);
} else {
set_optional_payload(fn_decl_fields[5], nullptr);
@@ -17388,14 +17392,14 @@ static Error ir_make_type_info_decls(IrAnalyze *ira, IrInst* source_instr, ZigVa
fn_arg_name_array->data.x_array.special = ConstArraySpecialNone;
fn_arg_name_array->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(fn_arg_count);
- init_const_slice(ira->codegen, fn_decl_fields[7], fn_arg_name_array, 0, fn_arg_count, false);
+ init_const_slice(ira->codegen, fn_decl_fields[7], fn_arg_name_array, 0, fn_arg_count, false, nullptr);
for (size_t fn_arg_index = 0; fn_arg_index < fn_arg_count; fn_arg_index++) {
ZigVar *arg_var = fn_entry->variable_list.at(fn_arg_index);
ZigValue *fn_arg_name_val = &fn_arg_name_array->data.x_array.data.s_none.elements[fn_arg_index];
ZigValue *arg_name = create_const_str_lit(ira->codegen,
buf_create_from_str(arg_var->name))->data.x_ptr.data.ref.pointee;
- init_const_slice(ira->codegen, fn_arg_name_val, arg_name, 0, strlen(arg_var->name), true);
+ init_const_slice(ira->codegen, fn_arg_name_val, arg_name, 0, strlen(arg_var->name), true, nullptr);
fn_arg_name_val->parent.id = ConstParentIdArray;
fn_arg_name_val->parent.data.p_array.array_val = fn_arg_name_array;
fn_arg_name_val->parent.data.p_array.elem_index = fn_arg_index;
@@ -17531,7 +17535,7 @@ static void make_enum_field_val(IrAnalyze *ira, ZigValue *enum_field_val, TypeEn
inner_fields[1]->type = ira->codegen->builtin_types.entry_num_lit_int;
ZigValue *name = create_const_str_lit(ira->codegen, enum_field->name)->data.x_ptr.data.ref.pointee;
- init_const_slice(ira->codegen, inner_fields[0], name, 0, buf_len(enum_field->name), true);
+ init_const_slice(ira->codegen, inner_fields[0], name, 0, buf_len(enum_field->name), true, nullptr);
bigint_init_bigint(&inner_fields[1]->data.x_bigint, &enum_field->value);
@@ -17732,7 +17736,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
enum_field_array->data.x_array.special = ConstArraySpecialNone;
enum_field_array->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(enum_field_count);
- init_const_slice(ira->codegen, fields[2], enum_field_array, 0, enum_field_count, false);
+ init_const_slice(ira->codegen, fields[2], enum_field_array, 0, enum_field_count, false, nullptr);
for (uint32_t enum_field_index = 0; enum_field_index < enum_field_count; enum_field_index++)
{
@@ -17785,7 +17789,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
error_array->data.x_array.special = ConstArraySpecialNone;
error_array->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(error_count);
- init_const_slice(ira->codegen, slice_val, error_array, 0, error_count, false);
+ init_const_slice(ira->codegen, slice_val, error_array, 0, error_count, false, nullptr);
for (uint32_t error_index = 0; error_index < error_count; error_index++) {
ErrorTableEntry *error = type_entry->data.error_set.errors[error_index];
ZigValue *error_val = &error_array->data.x_array.data.s_none.elements[error_index];
@@ -17800,7 +17804,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
name = error->cached_error_name_val;
if (name == nullptr)
name = create_const_str_lit(ira->codegen, &error->name)->data.x_ptr.data.ref.pointee;
- init_const_slice(ira->codegen, inner_fields[0], name, 0, buf_len(&error->name), true);
+ init_const_slice(ira->codegen, inner_fields[0], name, 0, buf_len(&error->name), true, nullptr);
error_val->data.x_struct.fields = inner_fields;
error_val->parent.id = ConstParentIdArray;
@@ -17881,7 +17885,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
union_field_array->data.x_array.special = ConstArraySpecialNone;
union_field_array->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(union_field_count);
- init_const_slice(ira->codegen, fields[2], union_field_array, 0, union_field_count, false);
+ init_const_slice(ira->codegen, fields[2], union_field_array, 0, union_field_count, false, nullptr);
for (uint32_t union_field_index = 0; union_field_index < union_field_count; union_field_index++) {
TypeUnionField *union_field = &type_entry->data.unionation.fields[union_field_index];
@@ -17902,7 +17906,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
bigint_init_unsigned(&inner_fields[2]->data.x_bigint, union_field->align);
ZigValue *name = create_const_str_lit(ira->codegen, union_field->name)->data.x_ptr.data.ref.pointee;
- init_const_slice(ira->codegen, inner_fields[0], name, 0, buf_len(union_field->name), true);
+ init_const_slice(ira->codegen, inner_fields[0], name, 0, buf_len(union_field->name), true, nullptr);
union_field_val->data.x_struct.fields = inner_fields;
union_field_val->parent.id = ConstParentIdArray;
@@ -17958,7 +17962,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
struct_field_array->data.x_array.special = ConstArraySpecialNone;
struct_field_array->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(struct_field_count);
- init_const_slice(ira->codegen, fields[1], struct_field_array, 0, struct_field_count, false);
+ init_const_slice(ira->codegen, fields[1], struct_field_array, 0, struct_field_count, false, nullptr);
for (uint32_t struct_field_index = 0; struct_field_index < struct_field_count; struct_field_index++) {
TypeStructField *struct_field = type_entry->data.structure.fields[struct_field_index];
@@ -17994,7 +17998,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
bigint_init_unsigned(&inner_fields[4]->data.x_bigint, struct_field->align);
ZigValue *name = create_const_str_lit(ira->codegen, struct_field->name)->data.x_ptr.data.ref.pointee;
- init_const_slice(ira->codegen, inner_fields[0], name, 0, buf_len(struct_field->name), true);
+ init_const_slice(ira->codegen, inner_fields[0], name, 0, buf_len(struct_field->name), true, nullptr);
struct_field_val->data.x_struct.fields = inner_fields;
struct_field_val->parent.id = ConstParentIdArray;
@@ -18074,7 +18078,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
fn_arg_array->data.x_array.special = ConstArraySpecialNone;
fn_arg_array->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(fn_arg_count);
- init_const_slice(ira->codegen, fields[5], fn_arg_array, 0, fn_arg_count, false);
+ init_const_slice(ira->codegen, fields[5], fn_arg_array, 0, fn_arg_count, false, nullptr);
for (size_t fn_arg_index = 0; fn_arg_index < fn_arg_count; fn_arg_index++) {
FnTypeParamInfo *fn_param_info = &type_entry->data.fn.fn_type_id.param_info[fn_arg_index];
@@ -24150,7 +24154,7 @@ static IrInstGen *ir_analyze_instruction_src(IrAnalyze *ira, IrInstSrcSrc *instr
RootStruct *root_struct = import->data.structure.root_struct;
Buf *path = root_struct->path;
ZigValue *file_name = create_const_str_lit(ira->codegen, path)->data.x_ptr.data.ref.pointee;
- init_const_slice(ira->codegen, fields[0], file_name, 0, buf_len(path), true);
+ init_const_slice(ira->codegen, fields[0], file_name, 0, buf_len(path), true, nullptr);
fields[0]->type = u8_slice;
// fn_name: [:0]const u8
@@ -24158,7 +24162,7 @@ static IrInstGen *ir_analyze_instruction_src(IrAnalyze *ira, IrInstSrcSrc *instr
fields[1]->special = ConstValSpecialStatic;
ZigValue *fn_name = create_const_str_lit(ira->codegen, &fn_entry->symbol_name)->data.x_ptr.data.ref.pointee;
- init_const_slice(ira->codegen, fields[1], fn_name, 0, buf_len(&fn_entry->symbol_name), true);
+ init_const_slice(ira->codegen, fields[1], fn_name, 0, buf_len(&fn_entry->symbol_name), true, nullptr);
fields[1]->type = u8_slice;
diff --git a/src/test.zig b/src/test.zig
index a7f11d93df..1d2a552662 100644
--- a/src/test.zig
+++ b/src/test.zig
@@ -70,6 +70,8 @@ const ErrorMsg = union(enum) {
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
+ _ = fmt;
+ _ = options;
switch (self) {
.src => |src| {
return writer.print("{s}:{d}:{d}: {s}: {s}", .{
@@ -592,6 +594,7 @@ pub const TestContext = struct {
thread_pool: *ThreadPool,
global_cache_directory: Compilation.Directory,
) !void {
+ _ = self;
const target_info = try std.zig.system.NativeTargetInfo.detect(allocator, case.target);
const target = target_info.target;
diff --git a/src/tracy.zig b/src/tracy.zig
index 6f56a87ce6..9ea15d721b 100644
--- a/src/tracy.zig
+++ b/src/tracy.zig
@@ -28,7 +28,9 @@ pub const ___tracy_c_zone_context = extern struct {
};
pub const Ctx = if (enable) ___tracy_c_zone_context else struct {
- pub fn end(self: Ctx) void {}
+ pub fn end(self: Ctx) void {
+ _ = self;
+ }
};
pub inline fn trace(comptime src: std.builtin.SourceLocation) Ctx {
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 4b07618391..93ad4a421b 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -151,6 +151,12 @@ const Scope = struct {
return true;
return scope.base.parent.?.contains(name);
}
+
+ fn discardVariable(scope: *Block, c: *Context, name: []const u8) Error!void {
+ const name_node = try Tag.identifier.create(c.arena, name);
+ const discard = try Tag.discard.create(c.arena, name_node);
+ try scope.statements.append(discard);
+ }
};
const Root = struct {
@@ -206,6 +212,7 @@ const Scope = struct {
}
fn findBlockReturnType(inner: *Scope, c: *Context) clang.QualType {
+ _ = c;
var scope = inner;
while (true) {
switch (scope.id) {
@@ -601,7 +608,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
var scope = &block_scope.base;
var param_id: c_uint = 0;
- for (proto_node.data.params) |*param, i| {
+ for (proto_node.data.params) |*param| {
const param_name = param.name orelse {
proto_node.data.is_extern = true;
proto_node.data.is_export = false;
@@ -624,6 +631,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
const redecl_node = try Tag.arg_redecl.create(c.arena, .{ .actual = mangled_param_name, .mangled = arg_name });
try block_scope.statements.append(redecl_node);
}
+ try block_scope.discardVariable(c, mangled_param_name);
param_id += 1;
}
@@ -785,7 +793,7 @@ const builtin_typedef_map = std.ComptimeStringMap([]const u8, .{
});
fn transTypeDef(c: *Context, scope: *Scope, typedef_decl: *const clang.TypedefNameDecl) Error!void {
- if (c.decl_table.get(@ptrToInt(typedef_decl.getCanonicalDecl()))) |name|
+ if (c.decl_table.get(@ptrToInt(typedef_decl.getCanonicalDecl()))) |_|
return; // Avoid processing this decl twice
const toplevel = scope.id == .root;
const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined;
@@ -826,6 +834,7 @@ fn transTypeDef(c: *Context, scope: *Scope, typedef_decl: *const clang.TypedefNa
try addTopLevelDecl(c, name, node);
} else {
try scope.appendNode(node);
+ try bs.discardVariable(c, name);
}
}
@@ -935,7 +944,7 @@ fn hasFlexibleArrayField(c: *Context, record_def: *const clang.RecordDecl) bool
}
fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordDecl) Error!void {
- if (c.decl_table.get(@ptrToInt(record_decl.getCanonicalDecl()))) |name|
+ if (c.decl_table.get(@ptrToInt(record_decl.getCanonicalDecl()))) |_|
return; // Avoid processing this decl twice
const record_loc = record_decl.getLocation();
const toplevel = scope.id == .root;
@@ -1076,11 +1085,12 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordD
try c.alias_list.append(.{ .alias = bare_name, .name = name });
} else {
try scope.appendNode(Node.initPayload(&payload.base));
+ try bs.discardVariable(c, name);
}
}
fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const clang.EnumDecl) Error!void {
- if (c.decl_table.get(@ptrToInt(enum_decl.getCanonicalDecl()))) |name|
+ if (c.decl_table.get(@ptrToInt(enum_decl.getCanonicalDecl()))) |_|
return; // Avoid processing this decl twice
const enum_loc = enum_decl.getLocation();
const toplevel = scope.id == .root;
@@ -1099,27 +1109,39 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const clang.EnumDecl) E
}
name = try std.fmt.allocPrint(c.arena, "enum_{s}", .{bare_name});
}
- if (!toplevel) _ = try bs.makeMangledName(c, name);
+ if (!toplevel) name = try bs.makeMangledName(c, name);
try c.decl_table.putNoClobber(c.gpa, @ptrToInt(enum_decl.getCanonicalDecl()), name);
- const is_pub = toplevel and !is_unnamed;
- var redecls = std.ArrayList(Tag.enum_redecl.Data()).init(c.gpa);
- defer redecls.deinit();
-
- const init_node = if (enum_decl.getDefinition()) |enum_def| blk: {
- var pure_enum = true;
+ const enum_type_node = if (enum_decl.getDefinition()) |enum_def| blk: {
var it = enum_def.enumerator_begin();
- var end_it = enum_def.enumerator_end();
+ const end_it = enum_def.enumerator_end();
while (it.neq(end_it)) : (it = it.next()) {
const enum_const = it.deref();
- if (enum_const.getInitExpr()) |_| {
- pure_enum = false;
- break;
+ var enum_val_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, enum_const).getName_bytes_begin());
+ if (!toplevel) {
+ enum_val_name = try bs.makeMangledName(c, enum_val_name);
}
- }
- var fields = std.ArrayList(ast.Payload.Enum.Field).init(c.gpa);
- defer fields.deinit();
+ const enum_const_qt = @ptrCast(*const clang.ValueDecl, enum_const).getType();
+ const enum_const_loc = @ptrCast(*const clang.Decl, enum_const).getLocation();
+ const enum_const_type_node: ?Node = transQualType(c, scope, enum_const_qt, enum_const_loc) catch |err| switch (err) {
+ error.UnsupportedType => null,
+ else => |e| return e,
+ };
+
+ const enum_const_def = try Tag.enum_constant.create(c.arena, .{
+ .name = enum_val_name,
+ .is_public = toplevel,
+ .type = enum_const_type_node,
+ .value = try transCreateNodeAPInt(c, enum_const.getInitVal()),
+ });
+ if (toplevel)
+ try addTopLevelDecl(c, enum_val_name, enum_const_def)
+ else {
+ try scope.appendNode(enum_const_def);
+ try bs.discardVariable(c, enum_val_name);
+ }
+ }
const int_type = enum_decl.getIntegerType();
// The underlying type may be null in case of forward-declared enum
@@ -1127,61 +1149,27 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const clang.EnumDecl) E
// default to the usual integer type used for all the enums.
// default to c_int since msvc and gcc default to different types
- const init_arg_expr = if (int_type.ptr != null)
+ break :blk if (int_type.ptr != null)
transQualType(c, scope, int_type, enum_loc) catch |err| switch (err) {
error.UnsupportedType => {
- return failDecl(c, enum_loc, name, "unable to translate enum tag type", .{});
+ return failDecl(c, enum_loc, name, "unable to translate enum integer type", .{});
},
else => |e| return e,
}
else
try Tag.type.create(c.arena, "c_int");
-
- it = enum_def.enumerator_begin();
- end_it = enum_def.enumerator_end();
- while (it.neq(end_it)) : (it = it.next()) {
- const enum_const = it.deref();
- const enum_val_name = try c.str(@ptrCast(*const clang.NamedDecl, enum_const).getName_bytes_begin());
-
- const field_name = if (!is_unnamed and mem.startsWith(u8, enum_val_name, bare_name))
- enum_val_name[bare_name.len..]
- else
- enum_val_name;
-
- const int_node = if (!pure_enum)
- try transCreateNodeAPInt(c, enum_const.getInitVal())
- else
- null;
-
- try fields.append(.{
- .name = field_name,
- .value = int_node,
- });
-
- // In C each enum value is in the global namespace. So we put them there too.
- // At this point we can rely on the enum emitting successfully.
- try redecls.append(.{
- .enum_val_name = enum_val_name,
- .field_name = field_name,
- .enum_name = name,
- });
- }
-
- break :blk try Tag.@"enum".create(c.arena, .{
- .int_type = init_arg_expr,
- .fields = try c.arena.dupe(ast.Payload.Enum.Field, fields.items),
- });
} else blk: {
try c.opaque_demotes.put(c.gpa, @ptrToInt(enum_decl.getCanonicalDecl()), {});
break :blk Tag.opaque_literal.init();
};
+ const is_pub = toplevel and !is_unnamed;
const payload = try c.arena.create(ast.Payload.SimpleVarDecl);
payload.* = .{
.base = .{ .tag = ([2]Tag{ .var_simple, .pub_var_simple })[@boolToInt(is_pub)] },
.data = .{
+ .init = enum_type_node,
.name = name,
- .init = init_node,
},
};
@@ -1191,18 +1179,7 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const clang.EnumDecl) E
try c.alias_list.append(.{ .alias = bare_name, .name = name });
} else {
try scope.appendNode(Node.initPayload(&payload.base));
- }
-
- for (redecls.items) |redecl| {
- if (toplevel) {
- try addTopLevelDecl(c, redecl.field_name, try Tag.pub_enum_redecl.create(c.arena, redecl));
- } else {
- try scope.appendNode(try Tag.enum_redecl.create(c.arena, .{
- .enum_val_name = try bs.makeMangledName(c, redecl.enum_val_name),
- .field_name = redecl.field_name,
- .enum_name = redecl.enum_name,
- }));
- }
+ try bs.discardVariable(c, name);
}
}
@@ -1312,6 +1289,7 @@ fn transConvertVectorExpr(
source_loc: clang.SourceLocation,
expr: *const clang.ConvertVectorExpr,
) TransError!Node {
+ _ = source_loc;
const base_stmt = @ptrCast(*const clang.Stmt, expr);
var block_scope = try Scope.Block.init(c, scope, true);
@@ -1321,7 +1299,6 @@ fn transConvertVectorExpr(
const src_type = qualTypeCanon(src_expr.getType());
const src_vector_ty = @ptrCast(*const clang.VectorType, src_type);
const src_element_qt = src_vector_ty.getElementType();
- const src_element_type_node = try transQualType(c, &block_scope.base, src_element_qt, base_stmt.getBeginLoc());
const src_expr_node = try transExpr(c, &block_scope.base, src_expr, .used);
@@ -1387,11 +1364,10 @@ fn makeShuffleMask(c: *Context, scope: *Scope, expr: *const clang.ShuffleVectorE
init.* = converted_index;
}
- const mask_init = try Tag.array_init.create(c.arena, .{
+ return Tag.array_init.create(c.arena, .{
.cond = mask_type,
.cases = init_list,
});
- return Tag.@"comptime".create(c.arena, mask_init);
}
/// @typeInfo(@TypeOf(vec_node)).Vector.<field>
@@ -1434,6 +1410,7 @@ fn transSimpleOffsetOfExpr(
scope: *Scope,
expr: *const clang.OffsetOfExpr,
) TransError!Node {
+ _ = scope;
assert(expr.getNumComponents() == 1);
const component = expr.getComponent(0);
if (component.getKind() == .Field) {
@@ -1800,6 +1777,7 @@ fn transDeclStmtOne(
node = try Tag.static_local_var.create(c.arena, .{ .name = mangled_name, .init = node });
}
try block_scope.statements.append(node);
+ try block_scope.discardVariable(c, mangled_name);
const cleanup_attr = var_decl.getCleanupAttribute();
if (cleanup_attr) |fn_decl| {
@@ -2096,8 +2074,7 @@ fn finishBoolExpr(
},
.Enum => {
// node != 0
- const int_val = try Tag.enum_to_int.create(c.arena, node);
- return Tag.not_equal.create(c.arena, .{ .lhs = int_val, .rhs = Tag.zero_literal.init() });
+ return Tag.not_equal.create(c.arena, .{ .lhs = node, .rhs = Tag.zero_literal.init() });
},
.Elaborated => {
const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty);
@@ -2270,6 +2247,7 @@ fn transStringLiteralInitializer(
/// both operands resolve to addresses. The C standard requires that both operands
/// point to elements of the same array object, but we do not verify that here.
fn cIsPointerDiffExpr(c: *Context, stmt: *const clang.BinaryOperator) bool {
+ _ = c;
const lhs = @ptrCast(*const clang.Stmt, stmt.getLHS());
const rhs = @ptrCast(*const clang.Stmt, stmt.getRHS());
return stmt.getOpcode() == .Sub and
@@ -2309,21 +2287,22 @@ fn transCCast(
if (dst_type.eq(src_type)) return expr;
if (qualTypeIsPtr(dst_type) and qualTypeIsPtr(src_type))
return transCPtrCast(c, scope, loc, dst_type, src_type, expr);
+ if (cIsEnum(dst_type)) return transCCast(c, scope, loc, cIntTypeForEnum(dst_type), src_type, expr);
+ if (cIsEnum(src_type)) return transCCast(c, scope, loc, dst_type, cIntTypeForEnum(src_type), expr);
const dst_node = try transQualType(c, scope, dst_type, loc);
- if (cIsInteger(dst_type) and (cIsInteger(src_type) or cIsEnum(src_type))) {
+ if (cIsInteger(dst_type) and cIsInteger(src_type)) {
// 1. If src_type is an enum, determine the underlying signed int type
// 2. Extend or truncate without changing signed-ness.
// 3. Bit-cast to correct signed-ness
- const src_int_type = if (cIsInteger(src_type)) src_type else cIntTypeForEnum(src_type);
- const src_type_is_signed = cIsSignedInteger(src_int_type);
- var src_int_expr = if (cIsInteger(src_type)) expr else try Tag.enum_to_int.create(c.arena, expr);
+ const src_type_is_signed = cIsSignedInteger(src_type);
+ var src_int_expr = expr;
if (isBoolRes(src_int_expr)) {
src_int_expr = try Tag.bool_to_int.create(c.arena, src_int_expr);
}
- switch (cIntTypeCmp(dst_type, src_int_type)) {
+ switch (cIntTypeCmp(dst_type, src_type)) {
.lt => {
// @truncate(SameSignSmallerInt, src_int_expr)
const ty_node = try transQualTypeIntWidthOf(c, dst_type, src_type_is_signed);
@@ -2376,14 +2355,6 @@ fn transCCast(
const bool_to_int = try Tag.bool_to_int.create(c.arena, expr);
return Tag.as.create(c.arena, .{ .lhs = dst_node, .rhs = bool_to_int });
}
- if (cIsEnum(dst_type)) {
- // import("std").meta.cast(dest_type, val)
- return Tag.helpers_cast.create(c.arena, .{ .lhs = dst_node, .rhs = expr });
- }
- if (cIsEnum(src_type) and !cIsEnum(dst_type)) {
- // @enumToInt(val)
- return Tag.enum_to_int.create(c.arena, expr);
- }
// @as(dest_type, val)
return Tag.as.create(c.arena, .{ .lhs = dst_node, .rhs = expr });
}
@@ -2573,6 +2544,7 @@ fn transInitListExprVector(
expr: *const clang.InitListExpr,
ty: *const clang.Type,
) TransError!Node {
+ _ = ty;
const qt = getExprQualType(c, @ptrCast(*const clang.Expr, expr));
const vector_type = try transQualType(c, scope, qt, loc);
const init_count = expr.getNumInits();
@@ -2722,6 +2694,7 @@ fn transImplicitValueInitExpr(
expr: *const clang.Expr,
used: ResultUsed,
) TransError!Node {
+ _ = used;
const source_loc = expr.getBeginLoc();
const qt = getExprQualType(c, expr);
const ty = qt.getTypePtr();
@@ -3408,6 +3381,7 @@ fn transUnaryExprOrTypeTraitExpr(
stmt: *const clang.UnaryExprOrTypeTraitExpr,
result_used: ResultUsed,
) TransError!Node {
+ _ = result_used;
const loc = stmt.getBeginLoc();
const type_node = try transQualType(c, scope, stmt.getTypeOfArgument(), loc);
@@ -3802,7 +3776,6 @@ fn transBinaryConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang
const res_is_bool = qualTypeIsBoolean(qt);
const casted_stmt = @ptrCast(*const clang.AbstractConditionalOperator, stmt);
const cond_expr = casted_stmt.getCond();
- const true_expr = casted_stmt.getTrueExpr();
const false_expr = casted_stmt.getFalseExpr();
// c: (cond_expr)?:(false_expr)
@@ -3895,6 +3868,7 @@ fn maybeSuppressResult(
used: ResultUsed,
result: Node,
) TransError!Node {
+ _ = scope;
if (used == .used) return result;
return Tag.discard.create(c.arena, result);
}
@@ -4336,12 +4310,10 @@ fn transCreateNodeNumber(c: *Context, num: anytype, num_kind: enum { int, float
}
fn transCreateNodeMacroFn(c: *Context, name: []const u8, ref: Node, proto_alias: *ast.Payload.Func) !Node {
- const scope = &c.global_scope.base;
-
var fn_params = std.ArrayList(ast.Payload.Param).init(c.gpa);
defer fn_params.deinit();
- for (proto_alias.data.params) |param, i| {
+ for (proto_alias.data.params) |param| {
const param_name = param.name orelse
try std.fmt.allocPrint(c.arena, "arg_{d}", .{c.getMangle()});
@@ -4943,6 +4915,10 @@ fn transMacroDefine(c: *Context, m: *MacroCtx) ParseError!void {
const scope = &c.global_scope.base;
const init_node = try parseCExpr(c, m, scope);
+ if (init_node.castTag(.identifier)) |ident_node| {
+ if (mem.eql(u8, "_", ident_node.data))
+ return m.fail(c, "unable to translate C expr: illegal identifier _", .{});
+ }
const last = m.next().?;
if (last != .Eof and last != .Nl)
return m.fail(c, "unable to translate C expr: unexpected token .{s}", .{@tagName(last)});
@@ -4973,7 +4949,7 @@ fn transMacroFnDefine(c: *Context, m: *MacroCtx) ParseError!void {
.name = mangled_name,
.type = Tag.@"anytype".init(),
});
-
+ try block_scope.discardVariable(c, mangled_name);
if (m.peek().? != .Comma) break;
_ = m.next();
}
@@ -5657,6 +5633,7 @@ fn parseCSpecifierQualifierList(c: *Context, m: *MacroCtx, scope: *Scope, allow_
}
fn parseCNumericType(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
+ _ = scope;
const KwCounter = struct {
double: u8 = 0,
long: u8 = 0,
@@ -5758,6 +5735,7 @@ fn parseCNumericType(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
}
fn parseCAbstractDeclarator(c: *Context, m: *MacroCtx, scope: *Scope, node: Node) ParseError!Node {
+ _ = scope;
switch (m.next().?) {
.Asterisk => {
// last token of `node`
@@ -5969,7 +5947,6 @@ fn getContainer(c: *Context, node: Node) ?Node {
switch (node.tag()) {
.@"union",
.@"struct",
- .@"enum",
.address_of,
.bit_not,
.not,
diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig
index 59e84c6ba0..70c8c6559f 100644
--- a/src/translate_c/ast.zig
+++ b/src/translate_c/ast.zig
@@ -64,8 +64,6 @@ pub const Node = extern union {
static_local_var,
func,
warning,
- /// All enums are non-exhaustive
- @"enum",
@"struct",
@"union",
@"comptime",
@@ -146,10 +144,6 @@ pub const Node = extern union {
float_to_int,
/// @intToFloat(lhs, rhs)
int_to_float,
- /// @intToEnum(lhs, rhs)
- int_to_enum,
- /// @enumToInt(operand)
- enum_to_int,
/// @intToPtr(lhs, rhs)
int_to_ptr,
/// @ptrToInt(operand)
@@ -215,9 +209,8 @@ pub const Node = extern union {
var_simple,
/// pub const name = init;
pub_var_simple,
- /// pub const enum_field_name = @enumToInt(enum_name.field_name);
- pub_enum_redecl,
- enum_redecl,
+ /// pub? const name (: type)? = value
+ enum_constant,
/// pub inline fn name(params) return_type body
pub_inline_fn,
@@ -266,7 +259,6 @@ pub const Node = extern union {
.unwrap,
.deref,
.ptr_to_int,
- .enum_to_int,
.empty_array,
.while_true,
.if_not_break,
@@ -324,7 +316,6 @@ pub const Node = extern union {
.float_cast,
.float_to_int,
.int_to_float,
- .int_to_enum,
.int_to_ptr,
.array_cat,
.ellipsis3,
@@ -357,7 +348,6 @@ pub const Node = extern union {
.call => Payload.Call,
.var_decl => Payload.VarDecl,
.func => Payload.Func,
- .@"enum" => Payload.Enum,
.@"struct", .@"union" => Payload.Record,
.tuple => Payload.TupleInit,
.container_init => Payload.ContainerInit,
@@ -369,7 +359,7 @@ pub const Node = extern union {
.arg_redecl, .alias, .fail_decl => Payload.ArgRedecl,
.log2_int_type => Payload.Log2IntType,
.var_simple, .pub_var_simple, .static_local_var => Payload.SimpleVarDecl,
- .pub_enum_redecl, .enum_redecl => Payload.EnumRedecl,
+ .enum_constant => Payload.EnumConstant,
.array_filler => Payload.ArrayFiller,
.pub_inline_fn => Payload.PubInlineFn,
.field_access => Payload.FieldAccess,
@@ -554,19 +544,6 @@ pub const Payload = struct {
type: Node,
};
- pub const Enum = struct {
- base: Payload,
- data: struct {
- int_type: Node,
- fields: []Field,
- },
-
- pub const Field = struct {
- name: []const u8,
- value: ?Node,
- };
- };
-
pub const Record = struct {
base: Payload,
data: struct {
@@ -658,12 +635,13 @@ pub const Payload = struct {
},
};
- pub const EnumRedecl = struct {
+ pub const EnumConstant = struct {
base: Payload,
data: struct {
- enum_val_name: []const u8,
- field_name: []const u8,
- enum_name: []const u8,
+ name: []const u8,
+ is_public: bool,
+ type: ?Node,
+ value: Node,
},
};
@@ -1307,14 +1285,6 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
const payload = node.castTag(.int_to_float).?.data;
return renderBuiltinCall(c, "@intToFloat", &.{ payload.lhs, payload.rhs });
},
- .int_to_enum => {
- const payload = node.castTag(.int_to_enum).?.data;
- return renderBuiltinCall(c, "@intToEnum", &.{ payload.lhs, payload.rhs });
- },
- .enum_to_int => {
- const payload = node.castTag(.enum_to_int).?.data;
- return renderBuiltinCall(c, "@enumToInt", &.{payload});
- },
.int_to_ptr => {
const payload = node.castTag(.int_to_ptr).?.data;
return renderBuiltinCall(c, "@intToPtr", &.{ payload.lhs, payload.rhs });
@@ -1814,91 +1784,28 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
return renderFieldAccess(c, lhs, payload.field_name);
},
.@"struct", .@"union" => return renderRecord(c, node),
- .@"enum" => {
- const payload = node.castTag(.@"enum").?.data;
- _ = try c.addToken(.keyword_extern, "extern");
- const enum_tok = try c.addToken(.keyword_enum, "enum");
- _ = try c.addToken(.l_paren, "(");
- const arg_expr = try renderNode(c, payload.int_type);
- _ = try c.addToken(.r_paren, ")");
- _ = try c.addToken(.l_brace, "{");
- const members = try c.gpa.alloc(NodeIndex, payload.fields.len + 1);
- defer c.gpa.free(members);
- members[0] = 0;
-
- for (payload.fields) |field, i| {
- const name_tok = try c.addIdentifier(field.name);
- const value_expr = if (field.value) |some| blk: {
- _ = try c.addToken(.equal, "=");
- break :blk try renderNode(c, some);
- } else 0;
-
- members[i] = try c.addNode(.{
- .tag = .container_field_init,
- .main_token = name_tok,
- .data = .{
- .lhs = 0,
- .rhs = value_expr,
- },
- });
- _ = try c.addToken(.comma, ",");
- }
- // make non-exhaustive
- members[payload.fields.len] = try c.addNode(.{
- .tag = .container_field_init,
- .main_token = try c.addIdentifier("_"),
- .data = .{
- .lhs = 0,
- .rhs = 0,
- },
- });
- _ = try c.addToken(.comma, ",");
- _ = try c.addToken(.r_brace, "}");
+ .enum_constant => {
+ const payload = node.castTag(.enum_constant).?.data;
- const span = try c.listToSpan(members);
- return c.addNode(.{
- .tag = .container_decl_arg_trailing,
- .main_token = enum_tok,
- .data = .{
- .lhs = arg_expr,
- .rhs = try c.addExtra(NodeSubRange{
- .start = span.start,
- .end = span.end,
- }),
- },
- });
- },
- .pub_enum_redecl, .enum_redecl => {
- const payload = @fieldParentPtr(Payload.EnumRedecl, "base", node.ptr_otherwise).data;
- if (node.tag() == .pub_enum_redecl) _ = try c.addToken(.keyword_pub, "pub");
+ if (payload.is_public) _ = try c.addToken(.keyword_pub, "pub");
const const_tok = try c.addToken(.keyword_const, "const");
- _ = try c.addIdentifier(payload.enum_val_name);
+ _ = try c.addIdentifier(payload.name);
+
+ const type_node = if (payload.type) |enum_const_type| blk: {
+ _ = try c.addToken(.colon, ":");
+ break :blk try renderNode(c, enum_const_type);
+ } else 0;
+
_ = try c.addToken(.equal, "=");
- const enum_to_int_tok = try c.addToken(.builtin, "@enumToInt");
- _ = try c.addToken(.l_paren, "(");
- const enum_name = try c.addNode(.{
- .tag = .identifier,
- .main_token = try c.addIdentifier(payload.enum_name),
- .data = undefined,
- });
- const field_access = try renderFieldAccess(c, enum_name, payload.field_name);
- const init_node = try c.addNode(.{
- .tag = .builtin_call_two,
- .main_token = enum_to_int_tok,
- .data = .{
- .lhs = field_access,
- .rhs = 0,
- },
- });
- _ = try c.addToken(.r_paren, ")");
+ const init_node = try renderNode(c, payload.value);
_ = try c.addToken(.semicolon, ";");
return c.addNode(.{
.tag = .simple_var_decl,
.main_token = const_tok,
.data = .{
- .lhs = 0,
+ .lhs = type_node,
.rhs = init_node,
},
});
@@ -2210,7 +2117,7 @@ fn renderNullSentinelArrayType(c: *Context, len: usize, elem_type: Node) !NodeIn
fn addSemicolonIfNeeded(c: *Context, node: Node) !void {
switch (node.tag()) {
.warning => unreachable,
- .var_decl, .var_simple, .arg_redecl, .alias, .enum_redecl, .block, .empty_block, .block_single, .@"switch" => {},
+ .var_decl, .var_simple, .arg_redecl, .alias, .block, .empty_block, .block_single, .@"switch" => {},
.while_true => {
const payload = node.castTag(.while_true).?.data;
return addSemicolonIfNotBlock(c, payload);
@@ -2258,13 +2165,11 @@ fn renderNodeGrouped(c: *Context, node: Node) !NodeIndex {
.float_cast,
.float_to_int,
.int_to_float,
- .int_to_enum,
.int_to_ptr,
.std_mem_zeroes,
.std_math_Log2Int,
.log2_int_type,
.ptr_to_int,
- .enum_to_int,
.sizeof,
.alignof,
.typeof,
@@ -2340,7 +2245,6 @@ fn renderNodeGrouped(c: *Context, node: Node) !NodeIndex {
.array_cat,
.array_filler,
.@"if",
- .@"enum",
.@"struct",
.@"union",
.array_init,
@@ -2366,8 +2270,7 @@ fn renderNodeGrouped(c: *Context, node: Node) !NodeIndex {
.alias,
.var_simple,
.pub_var_simple,
- .pub_enum_redecl,
- .enum_redecl,
+ .enum_constant,
.@"while",
.@"switch",
.@"break",
diff --git a/src/type.zig b/src/type.zig
index 410773b649..853a4b7914 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -600,9 +600,11 @@ pub const Type = extern union {
pub const HashContext = struct {
pub fn hash(self: @This(), t: Type) u64 {
+ _ = self;
return t.hash();
}
pub fn eql(self: @This(), a: Type, b: Type) bool {
+ _ = self;
return a.eql(b);
}
};
@@ -777,6 +779,7 @@ pub const Type = extern union {
options: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
+ _ = options;
comptime assert(fmt.len == 0);
var ty = start_type;
while (true) {
@@ -1538,7 +1541,7 @@ pub const Type = extern union {
.optional_single_const_pointer,
.optional_single_mut_pointer,
=> {
- if (self.elemType().hasCodeGenBits()) return 1;
+ if (!self.elemType().hasCodeGenBits()) return 1;
return @divExact(target.cpu.arch.ptrBitWidth(), 8);
},
@@ -1550,7 +1553,7 @@ pub const Type = extern union {
.c_mut_pointer,
.pointer,
=> {
- if (self.elemType().hasCodeGenBits()) return 0;
+ if (!self.elemType().hasCodeGenBits()) return 0;
return @divExact(target.cpu.arch.ptrBitWidth(), 8);
},
@@ -3013,7 +3016,7 @@ pub const Type = extern union {
.base = .{ .tag = t },
.data = data,
};
- return Type{ .ptr_otherwise = &ptr.base };
+ return file_struct.Type{ .ptr_otherwise = &ptr.base };
}
pub fn Data(comptime t: Tag) type {
@@ -3163,7 +3166,6 @@ pub const CType = enum {
longdouble,
pub fn sizeInBits(self: CType, target: Target) u16 {
- const arch = target.cpu.arch;
switch (target.os.tag) {
.freestanding, .other => switch (target.cpu.arch) {
.msp430 => switch (self) {
diff --git a/src/value.zig b/src/value.zig
index c358975667..008cc3c2fe 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -626,6 +626,7 @@ pub const Value = extern union {
return std.mem.dupe(allocator, u8, payload.data);
}
if (self.castTag(.repeated)) |payload| {
+ _ = payload;
@panic("TODO implement toAllocatedBytes for this Value tag");
}
if (self.castTag(.decl_ref)) |payload| {
@@ -747,6 +748,7 @@ pub const Value = extern union {
/// Asserts the type is an enum type.
pub fn toEnum(val: Value, enum_ty: Type, comptime E: type) E {
+ _ = enum_ty;
// TODO this needs to resolve other kinds of Value tags rather than
// assuming the tag will be .enum_field_index.
const field_index = val.castTag(.enum_field_index).?.data;
@@ -935,6 +937,7 @@ pub const Value = extern union {
/// Converts an integer or a float to a float.
/// Returns `error.Overflow` if the value does not fit in the new type.
pub fn floatCast(self: Value, allocator: *Allocator, ty: Type, target: Target) !Value {
+ _ = target;
switch (ty.tag()) {
.f16 => {
@panic("TODO add __trunctfhf2 to compiler-rt");
@@ -979,6 +982,26 @@ pub const Value = extern union {
};
}
+ /// Asserts the value is numeric
+ pub fn isZero(self: Value) bool {
+ return switch (self.tag()) {
+ .zero => true,
+ .one => false,
+
+ .int_u64 => self.castTag(.int_u64).?.data == 0,
+ .int_i64 => self.castTag(.int_i64).?.data == 0,
+
+ .float_16 => self.castTag(.float_16).?.data == 0,
+ .float_32 => self.castTag(.float_32).?.data == 0,
+ .float_64 => self.castTag(.float_64).?.data == 0,
+ .float_128 => self.castTag(.float_128).?.data == 0,
+
+ .int_big_positive => self.castTag(.int_big_positive).?.asBigInt().eqZero(),
+ .int_big_negative => self.castTag(.int_big_negative).?.asBigInt().eqZero(),
+ else => unreachable,
+ };
+ }
+
pub fn orderAgainstZero(lhs: Value) std.math.Order {
return switch (lhs.tag()) {
.zero,
@@ -1272,17 +1295,21 @@ pub const Value = extern union {
pub const ArrayHashContext = struct {
pub fn hash(self: @This(), v: Value) u32 {
+ _ = self;
return v.hash_u32();
}
pub fn eql(self: @This(), a: Value, b: Value) bool {
+ _ = self;
return a.eql(b);
}
};
pub const HashContext = struct {
pub fn hash(self: @This(), v: Value) u64 {
+ _ = self;
return v.hash();
}
pub fn eql(self: @This(), a: Value, b: Value) bool {
+ _ = self;
return a.eql(b);
}
};
@@ -1325,6 +1352,7 @@ pub const Value = extern union {
}
pub fn fieldValue(val: Value, allocator: *Allocator, index: usize) error{OutOfMemory}!Value {
+ _ = allocator;
switch (val.tag()) {
.@"struct" => {
const field_values = val.castTag(.@"struct").?.data;
diff --git a/src/zig_clang.cpp b/src/zig_clang.cpp
index 8a60a1210e..7c923021d8 100644
--- a/src/zig_clang.cpp
+++ b/src/zig_clang.cpp
@@ -3228,12 +3228,6 @@ bool ZigClangEnumDecl_enumerator_iterator_neq(
return casted_a != casted_b;
}
-const struct ZigClangExpr *ZigClangEnumConstantDecl_getInitExpr(const struct ZigClangEnumConstantDecl *self) {
- auto casted = reinterpret_cast<const clang::EnumConstantDecl *>(self);
- const clang::Expr *result = casted->getInitExpr();
- return reinterpret_cast<const ZigClangExpr *>(result);
-}
-
const struct ZigClangAPSInt *ZigClangEnumConstantDecl_getInitVal(const struct ZigClangEnumConstantDecl *self) {
auto casted = reinterpret_cast<const clang::EnumConstantDecl *>(self);
const llvm::APSInt *result = &casted->getInitVal();
diff --git a/src/zig_clang.h b/src/zig_clang.h
index 5ffba627df..634534dfb4 100644
--- a/src/zig_clang.h
+++ b/src/zig_clang.h
@@ -1326,6 +1326,5 @@ ZIG_EXTERN_C struct ZigClangSourceLocation ZigClangFieldDecl_getLocation(const s
ZIG_EXTERN_C const struct ZigClangRecordDecl *ZigClangFieldDecl_getParent(const struct ZigClangFieldDecl *);
ZIG_EXTERN_C unsigned ZigClangFieldDecl_getFieldIndex(const struct ZigClangFieldDecl *);
-ZIG_EXTERN_C const struct ZigClangExpr *ZigClangEnumConstantDecl_getInitExpr(const struct ZigClangEnumConstantDecl *);
ZIG_EXTERN_C const struct ZigClangAPSInt *ZigClangEnumConstantDecl_getInitVal(const struct ZigClangEnumConstantDecl *);
#endif
diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp
index 722b77406e..8c54af4bb4 100644
--- a/src/zig_llvm.cpp
+++ b/src/zig_llvm.cpp
@@ -969,6 +969,12 @@ void ZigLLVMSetModulePIELevel(LLVMModuleRef module) {
unwrap(module)->setPIELevel(PIELevel::Level::Large);
}
+void ZigLLVMSetModuleCodeModel(LLVMModuleRef module, LLVMCodeModel code_model) {
+ bool JIT;
+ unwrap(module)->setCodeModel(*unwrap(code_model, JIT));
+ assert(!JIT);
+}
+
static AtomicOrdering mapFromLLVMOrdering(LLVMAtomicOrdering Ordering) {
switch (Ordering) {
case LLVMAtomicOrderingNotAtomic: return AtomicOrdering::NotAtomic;
diff --git a/src/zig_llvm.h b/src/zig_llvm.h
index 0d08980835..32a969f70e 100644
--- a/src/zig_llvm.h
+++ b/src/zig_llvm.h
@@ -208,6 +208,7 @@ ZIG_EXTERN_C void ZigLLVMAddModuleDebugInfoFlag(LLVMModuleRef module);
ZIG_EXTERN_C void ZigLLVMAddModuleCodeViewFlag(LLVMModuleRef module);
ZIG_EXTERN_C void ZigLLVMSetModulePICLevel(LLVMModuleRef module);
ZIG_EXTERN_C void ZigLLVMSetModulePIELevel(LLVMModuleRef module);
+ZIG_EXTERN_C void ZigLLVMSetModuleCodeModel(LLVMModuleRef module, LLVMCodeModel code_model);
ZIG_EXTERN_C void ZigLLVMSetCurrentDebugLocation(LLVMBuilderRef builder, int line, int column,
struct ZigLLVMDIScope *scope);