aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJakub Konka <kubkon@jakubkonka.com>2021-09-13 23:12:19 +0200
committerJakub Konka <kubkon@jakubkonka.com>2021-09-13 23:40:38 +0200
commita38b636045c0384faad1565d47dfbf774821021e (patch)
treea78193473fd5b725feaf2762b10cfc189a26a325 /src
parent760241ce50eaa9031339f6b591358b53f5797486 (diff)
parentf011f13933b72f4d63a5f635c7646b68beee726e (diff)
downloadzig-a38b636045c0384faad1565d47dfbf774821021e.tar.gz
zig-a38b636045c0384faad1565d47dfbf774821021e.zip
Merge remote-tracking branch 'origin/master' into zld-incr
Diffstat (limited to 'src')
-rw-r--r--src/Air.zig5
-rw-r--r--src/AstGen.zig1011
-rw-r--r--src/BuiltinFn.zig32
-rw-r--r--src/Compilation.zig10
-rw-r--r--src/Liveness.zig1
-rw-r--r--src/Module.zig115
-rw-r--r--src/Sema.zig289
-rw-r--r--src/Zir.zig62
-rw-r--r--src/clang_options_data.zig8
-rw-r--r--src/codegen.zig101
-rw-r--r--src/codegen/aarch64.zig2
-rw-r--r--src/codegen/arm.zig125
-rw-r--r--src/codegen/c.zig1
-rw-r--r--src/codegen/llvm.zig169
-rw-r--r--src/codegen/llvm/bindings.zig278
-rw-r--r--src/codegen/riscv64.zig4
-rw-r--r--src/codegen/spirv/spec.zig4
-rw-r--r--src/codegen/x86.zig16
-rw-r--r--src/codegen/x86_64.zig34
-rw-r--r--src/libc_installation.zig32
-rw-r--r--src/libunwind.zig2
-rw-r--r--src/link.zig6
-rw-r--r--src/link/Elf.zig172
-rw-r--r--src/link/MachO.zig69
-rw-r--r--src/link/MachO/Atom.zig9
-rw-r--r--src/link/MachO/DebugSymbols.zig140
-rw-r--r--src/link/MachO/Object.zig4
-rw-r--r--src/link/tapi/parse/test.zig4
-rw-r--r--src/main.zig131
-rw-r--r--src/mingw.zig4
-rw-r--r--src/print_air.zig1
-rw-r--r--src/stage1/all_types.hpp9
-rw-r--r--src/stage1/analyze.cpp13
-rw-r--r--src/stage1/astgen.cpp122
-rw-r--r--src/stage1/bigint.cpp78
-rw-r--r--src/stage1/bigint.hpp4
-rw-r--r--src/stage1/codegen.cpp44
-rw-r--r--src/stage1/ir.cpp36
-rw-r--r--src/stage1/ir_print.cpp8
-rw-r--r--src/stage1/parser.cpp19
-rw-r--r--src/stage1/parser.hpp1
-rw-r--r--src/translate_c.zig123
-rw-r--r--src/translate_c/ast.zig122
-rw-r--r--src/type.zig11
-rw-r--r--src/value.zig2
-rw-r--r--src/windows_sdk.cpp20
-rw-r--r--src/windows_sdk.zig21
-rw-r--r--src/zig_llvm.cpp52
-rw-r--r--src/zig_llvm.h9
49 files changed, 2280 insertions, 1255 deletions
diff --git a/src/Air.zig b/src/Air.zig
index 6e4125be44..60e5fdce1c 100644
--- a/src/Air.zig
+++ b/src/Air.zig
@@ -69,6 +69,10 @@ pub const Inst = struct {
/// is the same as both operands.
/// Uses the `bin_op` field.
div,
+ /// Integer or float remainder.
+ /// Both operands are guaranteed to be the same type, and the result type is the same as both operands.
+ /// Uses the `bin_op` field.
+ rem,
/// Add an offset to a pointer, returning a new pointer.
/// The offset is in element type units, not bytes.
/// Wrapping is undefined behavior.
@@ -462,6 +466,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.mul,
.mulwrap,
.div,
+ .rem,
.bit_and,
.bit_or,
.xor,
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 2b2bbd4f22..98937c7923 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -2,18 +2,20 @@
const AstGen = @This();
const std = @import("std");
-const ast = std.zig.ast;
+const Ast = std.zig.Ast;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const ArrayListUnmanaged = std.ArrayListUnmanaged;
+const StringIndexAdapter = std.hash_map.StringIndexAdapter;
+const StringIndexContext = std.hash_map.StringIndexContext;
const Zir = @import("Zir.zig");
const trace = @import("tracy.zig").trace;
const BuiltinFn = @import("BuiltinFn.zig");
gpa: *Allocator,
-tree: *const ast.Tree,
+tree: *const Ast,
instructions: std.MultiArrayList(Zir.Inst) = .{},
extra: ArrayListUnmanaged(u32) = .{},
string_bytes: ArrayListUnmanaged(u8) = .{},
@@ -30,13 +32,13 @@ source_column: u32 = 0,
/// Used for temporary allocations; freed after AstGen is complete.
/// The resulting ZIR code has no references to anything in this arena.
arena: *Allocator,
-string_table: std.StringHashMapUnmanaged(u32) = .{},
+string_table: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.default_max_load_percentage) = .{},
compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .{},
/// The topmost block of the current function.
fn_block: ?*GenZir = null,
/// Maps string table indexes to the first `@import` ZIR instruction
/// that uses this string as the operand.
-imports: std.AutoArrayHashMapUnmanaged(u32, ast.TokenIndex) = .{},
+imports: std.AutoArrayHashMapUnmanaged(u32, Ast.TokenIndex) = .{},
const InnerError = error{ OutOfMemory, AnalysisFail };
@@ -70,7 +72,7 @@ fn appendRefsAssumeCapacity(astgen: *AstGen, refs: []const Zir.Inst.Ref) void {
astgen.extra.appendSliceAssumeCapacity(coerced);
}
-pub fn generate(gpa: *Allocator, tree: ast.Tree) Allocator.Error!Zir {
+pub fn generate(gpa: *Allocator, tree: Ast) Allocator.Error!Zir {
var arena = std.heap.ArenaAllocator.init(gpa);
defer arena.deinit();
@@ -106,7 +108,7 @@ pub fn generate(gpa: *Allocator, tree: ast.Tree) Allocator.Error!Zir {
};
defer gen_scope.instructions.deinit(gpa);
- const container_decl: ast.full.ContainerDecl = .{
+ const container_decl: Ast.full.ContainerDecl = .{
.layout_token = null,
.ast = .{
.main_token = undefined,
@@ -265,7 +267,7 @@ pub const bool_rl: ResultLoc = .{ .ty = .bool_type };
pub const type_rl: ResultLoc = .{ .ty = .type_type };
pub const coerced_type_rl: ResultLoc = .{ .coerced_ty = .type_type };
-fn typeExpr(gz: *GenZir, scope: *Scope, type_node: ast.Node.Index) InnerError!Zir.Inst.Ref {
+fn typeExpr(gz: *GenZir, scope: *Scope, type_node: Ast.Node.Index) InnerError!Zir.Inst.Ref {
const prev_force_comptime = gz.force_comptime;
gz.force_comptime = true;
defer gz.force_comptime = prev_force_comptime;
@@ -278,8 +280,8 @@ fn reachableExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- src_node: ast.Node.Index,
+ node: Ast.Node.Index,
+ src_node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const result_inst = try expr(gz, scope, rl, node);
if (gz.refIsNoReturn(result_inst)) {
@@ -290,7 +292,7 @@ fn reachableExpr(
return result_inst;
}
-fn lvalExpr(gz: *GenZir, scope: *Scope, node: ast.Node.Index) InnerError!Zir.Inst.Ref {
+fn lvalExpr(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
const node_tags = tree.nodes.items(.tag);
@@ -370,10 +372,6 @@ fn lvalExpr(gz: *GenZir, scope: *Scope, node: ast.Node.Index) InnerError!Zir.Ins
.bool_not,
.address_of,
.float_literal,
- .undefined_literal,
- .true_literal,
- .false_literal,
- .null_literal,
.optional_type,
.block,
.block_semicolon,
@@ -485,7 +483,7 @@ fn lvalExpr(gz: *GenZir, scope: *Scope, node: ast.Node.Index) InnerError!Zir.Ins
/// When `rl` is discard, ptr, inferred_ptr, or inferred_ptr, the
/// result instruction can be used to inspect whether it is isNoReturn() but that is it,
/// it must otherwise not be used.
-fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerError!Zir.Inst.Ref {
+fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
const main_tokens = tree.nodes.items(.main_token);
@@ -644,13 +642,13 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
.builtin_call_two, .builtin_call_two_comma => {
if (node_datas[node].lhs == 0) {
- const params = [_]ast.Node.Index{};
+ const params = [_]Ast.Node.Index{};
return builtinCall(gz, scope, rl, node, &params);
} else if (node_datas[node].rhs == 0) {
- const params = [_]ast.Node.Index{node_datas[node].lhs};
+ const params = [_]Ast.Node.Index{node_datas[node].lhs};
return builtinCall(gz, scope, rl, node, &params);
} else {
- const params = [_]ast.Node.Index{ node_datas[node].lhs, node_datas[node].rhs };
+ const params = [_]Ast.Node.Index{ node_datas[node].lhs, node_datas[node].rhs };
return builtinCall(gz, scope, rl, node, &params);
}
},
@@ -660,7 +658,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
},
.call_one, .call_one_comma, .async_call_one, .async_call_one_comma => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
return callExpr(gz, scope, rl, node, tree.callOne(&params, node));
},
.call, .call_comma, .async_call, .async_call_comma => {
@@ -698,11 +696,17 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
.lhs = lhs,
.start = start,
});
- return rvalue(gz, rl, result, node);
+ switch (rl) {
+ .ref, .none_or_ref => return result,
+ else => {
+ const dereffed = try gz.addUnNode(.load, result, node);
+ return rvalue(gz, rl, dereffed, node);
+ },
+ }
},
.slice => {
const lhs = try expr(gz, scope, .ref, node_datas[node].lhs);
- const extra = tree.extraData(node_datas[node].rhs, ast.Node.Slice);
+ const extra = tree.extraData(node_datas[node].rhs, Ast.Node.Slice);
const start = try expr(gz, scope, .{ .ty = .usize_type }, extra.start);
const end = try expr(gz, scope, .{ .ty = .usize_type }, extra.end);
const result = try gz.addPlNode(.slice_end, node, Zir.Inst.SliceEnd{
@@ -710,11 +714,17 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
.start = start,
.end = end,
});
- return rvalue(gz, rl, result, node);
+ switch (rl) {
+ .ref, .none_or_ref => return result,
+ else => {
+ const dereffed = try gz.addUnNode(.load, result, node);
+ return rvalue(gz, rl, dereffed, node);
+ },
+ }
},
.slice_sentinel => {
const lhs = try expr(gz, scope, .ref, node_datas[node].lhs);
- const extra = tree.extraData(node_datas[node].rhs, ast.Node.SliceSentinel);
+ const extra = tree.extraData(node_datas[node].rhs, Ast.Node.SliceSentinel);
const start = try expr(gz, scope, .{ .ty = .usize_type }, extra.start);
const end = if (extra.end != 0) try expr(gz, scope, .{ .ty = .usize_type }, extra.end) else .none;
const sentinel = try expr(gz, scope, .{ .ty = .usize_type }, extra.sentinel);
@@ -724,7 +734,13 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
.end = end,
.sentinel = sentinel,
});
- return rvalue(gz, rl, result, node);
+ switch (rl) {
+ .ref, .none_or_ref => return result,
+ else => {
+ const dereffed = try gz.addUnNode(.load, result, node);
+ return rvalue(gz, rl, dereffed, node);
+ },
+ }
},
.deref => {
@@ -741,10 +757,6 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
const result = try expr(gz, scope, .ref, node_datas[node].lhs);
return rvalue(gz, rl, result, node);
},
- .undefined_literal => return rvalue(gz, rl, .undef, node),
- .true_literal => return rvalue(gz, rl, .bool_true, node),
- .false_literal => return rvalue(gz, rl, .bool_false, node),
- .null_literal => return rvalue(gz, rl, .null_value, node),
.optional_type => {
const operand = try typeExpr(gz, scope, node_datas[node].lhs);
const result = try gz.addUnNode(.optional_type, operand, node);
@@ -763,7 +775,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
), node),
},
.block_two, .block_two_semicolon => {
- const statements = [2]ast.Node.Index{ node_datas[node].lhs, node_datas[node].rhs };
+ const statements = [2]Ast.Node.Index{ node_datas[node].lhs, node_datas[node].rhs };
if (node_datas[node].lhs == 0) {
return blockExpr(gz, scope, rl, node, statements[0..0]);
} else if (node_datas[node].rhs == 0) {
@@ -786,7 +798,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
},
.@"catch" => {
const catch_token = main_tokens[node];
- const payload_token: ?ast.TokenIndex = if (token_tags[catch_token + 1] == .pipe)
+ const payload_token: ?Ast.TokenIndex = if (token_tags[catch_token + 1] == .pipe)
catch_token + 2
else
null;
@@ -853,7 +865,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
.container_decl_trailing,
=> return containerDecl(gz, scope, rl, node, tree.containerDecl(node)),
.container_decl_two, .container_decl_two_trailing => {
- var buffer: [2]ast.Node.Index = undefined;
+ var buffer: [2]Ast.Node.Index = undefined;
return containerDecl(gz, scope, rl, node, tree.containerDeclTwo(&buffer, node));
},
.container_decl_arg,
@@ -864,7 +876,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
.tagged_union_trailing,
=> return containerDecl(gz, scope, rl, node, tree.taggedUnion(node)),
.tagged_union_two, .tagged_union_two_trailing => {
- var buffer: [2]ast.Node.Index = undefined;
+ var buffer: [2]Ast.Node.Index = undefined;
return containerDecl(gz, scope, rl, node, tree.taggedUnionTwo(&buffer, node));
},
.tagged_union_enum_tag,
@@ -890,11 +902,11 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
.@"try" => return tryExpr(gz, scope, rl, node, node_datas[node].lhs),
.array_init_one, .array_init_one_comma => {
- var elements: [1]ast.Node.Index = undefined;
+ var elements: [1]Ast.Node.Index = undefined;
return arrayInitExpr(gz, scope, rl, node, tree.arrayInitOne(&elements, node));
},
.array_init_dot_two, .array_init_dot_two_comma => {
- var elements: [2]ast.Node.Index = undefined;
+ var elements: [2]Ast.Node.Index = undefined;
return arrayInitExpr(gz, scope, rl, node, tree.arrayInitDotTwo(&elements, node));
},
.array_init_dot,
@@ -905,11 +917,11 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
=> return arrayInitExpr(gz, scope, rl, node, tree.arrayInit(node)),
.struct_init_one, .struct_init_one_comma => {
- var fields: [1]ast.Node.Index = undefined;
+ var fields: [1]Ast.Node.Index = undefined;
return structInitExpr(gz, scope, rl, node, tree.structInitOne(&fields, node));
},
.struct_init_dot_two, .struct_init_dot_two_comma => {
- var fields: [2]ast.Node.Index = undefined;
+ var fields: [2]Ast.Node.Index = undefined;
return structInitExpr(gz, scope, rl, node, tree.structInitDotTwo(&fields, node));
},
.struct_init_dot,
@@ -920,14 +932,14 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr
=> return structInitExpr(gz, scope, rl, node, tree.structInit(node)),
.fn_proto_simple => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
return fnProtoExpr(gz, scope, rl, tree.fnProtoSimple(&params, node));
},
.fn_proto_multi => {
return fnProtoExpr(gz, scope, rl, tree.fnProtoMulti(node));
},
.fn_proto_one => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
return fnProtoExpr(gz, scope, rl, tree.fnProtoOne(&params, node));
},
.fn_proto => {
@@ -940,7 +952,7 @@ fn nosuspendExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
@@ -961,7 +973,7 @@ fn nosuspendExpr(
fn suspendExpr(
gz: *GenZir,
scope: *Scope,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const gpa = astgen.gpa;
@@ -1001,7 +1013,7 @@ fn awaitExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
@@ -1023,7 +1035,7 @@ fn resumeExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
@@ -1038,7 +1050,7 @@ fn fnProtoExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- fn_proto: ast.full.FnProto,
+ fn_proto: Ast.full.FnProto,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const gpa = astgen.gpa;
@@ -1149,8 +1161,8 @@ fn arrayInitExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- array_init: ast.full.ArrayInit,
+ node: Ast.Node.Index,
+ array_init: Ast.full.ArrayInit,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
@@ -1169,7 +1181,7 @@ fn arrayInitExpr(
};
infer: {
- const array_type: ast.full.ArrayType = switch (node_tags[array_init.ast.type_expr]) {
+ const array_type: Ast.full.ArrayType = switch (node_tags[array_init.ast.type_expr]) {
.array_type => tree.arrayType(array_init.ast.type_expr),
.array_type_sentinel => tree.arrayTypeSentinel(array_init.ast.type_expr),
else => break :infer,
@@ -1246,8 +1258,8 @@ fn arrayInitExpr(
fn arrayInitExprRlNone(
gz: *GenZir,
scope: *Scope,
- node: ast.Node.Index,
- elements: []const ast.Node.Index,
+ node: Ast.Node.Index,
+ elements: []const Ast.Node.Index,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
@@ -1268,8 +1280,8 @@ fn arrayInitExprRlNone(
fn arrayInitExprRlTy(
gz: *GenZir,
scope: *Scope,
- node: ast.Node.Index,
- elements: []const ast.Node.Index,
+ node: Ast.Node.Index,
+ elements: []const Ast.Node.Index,
elem_ty_inst: Zir.Inst.Ref,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
@@ -1294,8 +1306,8 @@ fn arrayInitExprRlTy(
fn arrayInitExprRlPtr(
gz: *GenZir,
scope: *Scope,
- node: ast.Node.Index,
- elements: []const ast.Node.Index,
+ node: Ast.Node.Index,
+ elements: []const Ast.Node.Index,
result_ptr: Zir.Inst.Ref,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
@@ -1324,8 +1336,8 @@ fn structInitExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- struct_init: ast.full.StructInit,
+ node: Ast.Node.Index,
+ struct_init: Ast.full.StructInit,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
@@ -1337,7 +1349,7 @@ fn structInitExpr(
} else array: {
const node_tags = tree.nodes.items(.tag);
const main_tokens = tree.nodes.items(.main_token);
- const array_type: ast.full.ArrayType = switch (node_tags[struct_init.ast.type_expr]) {
+ const array_type: Ast.full.ArrayType = switch (node_tags[struct_init.ast.type_expr]) {
.array_type => tree.arrayType(struct_init.ast.type_expr),
.array_type_sentinel => tree.arrayTypeSentinel(struct_init.ast.type_expr),
else => break :array,
@@ -1410,8 +1422,8 @@ fn structInitExpr(
fn structInitExprRlNone(
gz: *GenZir,
scope: *Scope,
- node: ast.Node.Index,
- struct_init: ast.full.StructInit,
+ node: Ast.Node.Index,
+ struct_init: Ast.full.StructInit,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
@@ -1444,8 +1456,8 @@ fn structInitExprRlNone(
fn structInitExprRlPtr(
gz: *GenZir,
scope: *Scope,
- node: ast.Node.Index,
- struct_init: ast.full.StructInit,
+ node: Ast.Node.Index,
+ struct_init: Ast.full.StructInit,
result_ptr: Zir.Inst.Ref,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
@@ -1478,8 +1490,8 @@ fn structInitExprRlPtr(
fn structInitExprRlTy(
gz: *GenZir,
scope: *Scope,
- node: ast.Node.Index,
- struct_init: ast.full.StructInit,
+ node: Ast.Node.Index,
+ struct_init: Ast.full.StructInit,
ty_inst: Zir.Inst.Ref,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
@@ -1520,7 +1532,7 @@ fn comptimeExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const prev_force_comptime = gz.force_comptime;
gz.force_comptime = true;
@@ -1536,7 +1548,7 @@ fn comptimeExprAst(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
if (gz.force_comptime) {
@@ -1551,7 +1563,7 @@ fn comptimeExprAst(
return result;
}
-fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: ast.Node.Index) InnerError!Zir.Inst.Ref {
+fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref {
const astgen = parent_gz.astgen;
const tree = astgen.tree;
const node_datas = tree.nodes.items(.data);
@@ -1626,7 +1638,7 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: ast.Node.Index) Inn
}
}
-fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: ast.Node.Index) InnerError!Zir.Inst.Ref {
+fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref {
const astgen = parent_gz.astgen;
const tree = astgen.tree;
const node_datas = tree.nodes.items(.data);
@@ -1684,8 +1696,8 @@ fn blockExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- block_node: ast.Node.Index,
- statements: []const ast.Node.Index,
+ block_node: Ast.Node.Index,
+ statements: []const Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -1706,7 +1718,7 @@ fn blockExpr(
return rvalue(gz, rl, .void_value, block_node);
}
-fn checkLabelRedefinition(astgen: *AstGen, parent_scope: *Scope, label: ast.TokenIndex) !void {
+fn checkLabelRedefinition(astgen: *AstGen, parent_scope: *Scope, label: Ast.TokenIndex) !void {
// Look for the label in the scope.
var scope = parent_scope;
while (true) {
@@ -1742,8 +1754,8 @@ fn labeledBlockExpr(
gz: *GenZir,
parent_scope: *Scope,
rl: ResultLoc,
- block_node: ast.Node.Index,
- statements: []const ast.Node.Index,
+ block_node: Ast.Node.Index,
+ statements: []const Ast.Node.Index,
zir_tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const tracy = trace(@src());
@@ -1819,7 +1831,7 @@ fn labeledBlockExpr(
}
}
-fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const ast.Node.Index) !void {
+fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Node.Index) !void {
const astgen = gz.astgen;
const tree = astgen.tree;
const node_tags = tree.nodes.items(.tag);
@@ -1827,7 +1839,7 @@ fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const ast.Nod
var block_arena = std.heap.ArenaAllocator.init(gz.astgen.gpa);
defer block_arena.deinit();
- var noreturn_src_node: ast.Node.Index = 0;
+ var noreturn_src_node: Ast.Node.Index = 0;
var scope = parent_scope;
for (statements) |statement| {
if (noreturn_src_node != 0) {
@@ -1882,12 +1894,12 @@ fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const ast.Nod
/// Returns AST source node of the thing that is noreturn if the statement is definitely `noreturn`.
/// Otherwise returns 0.
-fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: ast.Node.Index) InnerError!ast.Node.Index {
+fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) InnerError!Ast.Node.Index {
try emitDbgNode(gz, statement);
// We need to emit an error if the result is not `noreturn` or `void`, but
// we want to avoid adding the ZIR instruction if possible for performance.
const maybe_unused_result = try expr(gz, scope, .none, statement);
- var noreturn_src_node: ast.Node.Index = 0;
+ var noreturn_src_node: Ast.Node.Index = 0;
const elide_check = if (refToIndex(maybe_unused_result)) |inst| b: {
// Note that this array becomes invalid after appending more items to it
// in the above while loop.
@@ -2334,7 +2346,7 @@ fn checkUsed(
fn makeDeferScope(
scope: *Scope,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
block_arena: *Allocator,
scope_tag: Scope.Tag,
) InnerError!*Scope {
@@ -2350,9 +2362,9 @@ fn makeDeferScope(
fn varDecl(
gz: *GenZir,
scope: *Scope,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
block_arena: *Allocator,
- var_decl: ast.full.VarDecl,
+ var_decl: Ast.full.VarDecl,
) InnerError!*Scope {
try emitDbgNode(gz, node);
const astgen = gz.astgen;
@@ -2367,7 +2379,7 @@ fn varDecl(
}
const ident_name = try astgen.identAsString(name_token);
- try astgen.detectLocalShadowing(scope, ident_name, name_token);
+ try astgen.detectLocalShadowing(scope, ident_name, name_token, ident_name_raw);
if (var_decl.ast.init_node == 0) {
return astgen.failNode(node, "variables must be initialized", .{});
@@ -2564,7 +2576,7 @@ fn varDecl(
}
}
-fn emitDbgNode(gz: *GenZir, node: ast.Node.Index) !void {
+fn emitDbgNode(gz: *GenZir, node: Ast.Node.Index) !void {
// The instruction emitted here is for debugging runtime code.
// If the current block will be evaluated only during semantic analysis
// then no dbg_stmt ZIR instruction is needed.
@@ -2588,7 +2600,7 @@ fn emitDbgNode(gz: *GenZir, node: ast.Node.Index) !void {
} });
}
-fn assign(gz: *GenZir, scope: *Scope, infix_node: ast.Node.Index) InnerError!void {
+fn assign(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerError!void {
try emitDbgNode(gz, infix_node);
const astgen = gz.astgen;
const tree = astgen.tree;
@@ -2613,7 +2625,7 @@ fn assign(gz: *GenZir, scope: *Scope, infix_node: ast.Node.Index) InnerError!voi
fn assignOp(
gz: *GenZir,
scope: *Scope,
- infix_node: ast.Node.Index,
+ infix_node: Ast.Node.Index,
op_inst_tag: Zir.Inst.Tag,
) InnerError!void {
try emitDbgNode(gz, infix_node);
@@ -2636,7 +2648,7 @@ fn assignOp(
fn assignShift(
gz: *GenZir,
scope: *Scope,
- infix_node: ast.Node.Index,
+ infix_node: Ast.Node.Index,
op_inst_tag: Zir.Inst.Tag,
) InnerError!void {
try emitDbgNode(gz, infix_node);
@@ -2656,7 +2668,7 @@ fn assignShift(
_ = try gz.addBin(.store, lhs_ptr, result);
}
-fn boolNot(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerError!Zir.Inst.Ref {
+fn boolNot(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
const node_datas = tree.nodes.items(.data);
@@ -2666,7 +2678,7 @@ fn boolNot(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) Inne
return rvalue(gz, rl, result, node);
}
-fn bitNot(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerError!Zir.Inst.Ref {
+fn bitNot(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
const node_datas = tree.nodes.items(.data);
@@ -2680,7 +2692,7 @@ fn negation(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
@@ -2696,8 +2708,8 @@ fn ptrType(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- ptr_info: ast.full.PtrType,
+ node: Ast.Node.Index,
+ ptr_info: Ast.full.PtrType,
) InnerError!Zir.Inst.Ref {
const elem_type = try typeExpr(gz, scope, ptr_info.ast.child_type);
@@ -2778,7 +2790,7 @@ fn ptrType(
return rvalue(gz, rl, result, node);
}
-fn arrayType(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) !Zir.Inst.Ref {
+fn arrayType(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) !Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
const node_datas = tree.nodes.items(.data);
@@ -2798,13 +2810,13 @@ fn arrayType(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) !Z
return rvalue(gz, rl, result, node);
}
-fn arrayTypeSentinel(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) !Zir.Inst.Ref {
+fn arrayTypeSentinel(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) !Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
const node_datas = tree.nodes.items(.data);
const node_tags = tree.nodes.items(.tag);
const main_tokens = tree.nodes.items(.main_token);
- const extra = tree.extraData(node_datas[node].rhs, ast.Node.ArrayTypeSentinel);
+ const extra = tree.extraData(node_datas[node].rhs, Ast.Node.ArrayTypeSentinel);
const len_node = node_datas[node].lhs;
if (node_tags[len_node] == .identifier and
@@ -2860,21 +2872,18 @@ fn fnDecl(
gz: *GenZir,
scope: *Scope,
wip_decls: *WipDecls,
- decl_node: ast.Node.Index,
- body_node: ast.Node.Index,
- fn_proto: ast.full.FnProto,
+ decl_node: Ast.Node.Index,
+ body_node: Ast.Node.Index,
+ fn_proto: Ast.full.FnProto,
) InnerError!void {
const gpa = astgen.gpa;
const tree = astgen.tree;
const token_tags = tree.tokens.items(.tag);
- const fn_name_token = fn_proto.name_token orelse {
- return astgen.failTok(fn_proto.ast.fn_token, "missing function name", .{});
- };
+ // missing function name already happened in scanDecls()
+ const fn_name_token = fn_proto.name_token orelse return error.AnalysisFail;
const fn_name_str_index = try astgen.identAsString(fn_name_token);
- try astgen.declareNewName(scope, fn_name_str_index, decl_node);
-
// We insert this at the beginning so that its instruction index marks the
// start of the top level declaration.
const block_inst = try gz.addBlock(.block_inline, fn_proto.ast.proto_node);
@@ -2934,12 +2943,13 @@ fn fnDecl(
} else false;
const param_name: u32 = if (param.name_token) |name_token| blk: {
- if (mem.eql(u8, "_", tree.tokenSlice(name_token)))
+ const name_bytes = tree.tokenSlice(name_token);
+ if (mem.eql(u8, "_", name_bytes))
break :blk 0;
const param_name = try astgen.identAsString(name_token);
if (!is_extern) {
- try astgen.detectLocalShadowing(params_scope, param_name, name_token);
+ try astgen.detectLocalShadowing(params_scope, param_name, name_token, name_bytes);
}
break :blk param_name;
} else if (!is_extern) {
@@ -3127,8 +3137,8 @@ fn globalVarDecl(
gz: *GenZir,
scope: *Scope,
wip_decls: *WipDecls,
- node: ast.Node.Index,
- var_decl: ast.full.VarDecl,
+ node: Ast.Node.Index,
+ var_decl: Ast.full.VarDecl,
) InnerError!void {
const gpa = astgen.gpa;
const tree = astgen.tree;
@@ -3142,8 +3152,6 @@ fn globalVarDecl(
const name_token = var_decl.ast.mut_token + 1;
const name_str_index = try astgen.identAsString(name_token);
- try astgen.declareNewName(scope, name_str_index, node);
-
var block_scope: GenZir = .{
.parent = scope,
.decl_node_index = node,
@@ -3273,7 +3281,7 @@ fn comptimeDecl(
gz: *GenZir,
scope: *Scope,
wip_decls: *WipDecls,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
) InnerError!void {
const gpa = astgen.gpa;
const tree = astgen.tree;
@@ -3320,7 +3328,7 @@ fn usingnamespaceDecl(
gz: *GenZir,
scope: *Scope,
wip_decls: *WipDecls,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
) InnerError!void {
const gpa = astgen.gpa;
const tree = astgen.tree;
@@ -3371,7 +3379,7 @@ fn testDecl(
gz: *GenZir,
scope: *Scope,
wip_decls: *WipDecls,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
) InnerError!void {
const gpa = astgen.gpa;
const tree = astgen.tree;
@@ -3462,8 +3470,8 @@ fn testDecl(
fn structDeclInner(
gz: *GenZir,
scope: *Scope,
- node: ast.Node.Index,
- container_decl: ast.full.ContainerDecl,
+ node: Ast.Node.Index,
+ container_decl: Ast.full.ContainerDecl,
layout: std.builtin.TypeInfo.ContainerLayout,
) InnerError!Zir.Inst.Ref {
if (container_decl.ast.members.len == 0) {
@@ -3498,9 +3506,11 @@ fn structDeclInner(
};
defer block_scope.instructions.deinit(gpa);
- var namespace: Scope.Namespace = .{ .parent = scope };
+ var namespace: Scope.Namespace = .{ .parent = scope, .node = node };
defer namespace.decls.deinit(gpa);
+ try astgen.scanDecls(&namespace, container_decl.ast.members);
+
var wip_decls: WipDecls = .{};
defer wip_decls.deinit(gpa);
@@ -3529,7 +3539,7 @@ fn structDeclInner(
const body = node_datas[member_node].rhs;
switch (node_tags[fn_proto]) {
.fn_proto_simple => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoSimple(&params, fn_proto)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
@@ -3544,7 +3554,7 @@ fn structDeclInner(
continue;
},
.fn_proto_one => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoOne(&params, fn_proto)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
@@ -3562,7 +3572,7 @@ fn structDeclInner(
}
},
.fn_proto_simple => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoSimple(&params, member_node)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
@@ -3577,7 +3587,7 @@ fn structDeclInner(
continue;
},
.fn_proto_one => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoOne(&params, member_node)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
@@ -3660,7 +3670,7 @@ fn structDeclInner(
const field_type: Zir.Inst.Ref = if (node_tags[member.ast.type_expr] == .@"anytype")
.none
else
- try typeExpr(&block_scope, &block_scope.base, member.ast.type_expr);
+ try typeExpr(&block_scope, &namespace.base, member.ast.type_expr);
fields_data.appendAssumeCapacity(@enumToInt(field_type));
known_has_bits = known_has_bits or nodeImpliesRuntimeBits(tree, member.ast.type_expr);
@@ -3676,13 +3686,13 @@ fn structDeclInner(
(@as(u32, @boolToInt(unused)) << 31);
if (have_align) {
- const align_inst = try expr(&block_scope, &block_scope.base, align_rl, member.ast.align_expr);
+ const align_inst = try expr(&block_scope, &namespace.base, align_rl, member.ast.align_expr);
fields_data.appendAssumeCapacity(@enumToInt(align_inst));
}
if (have_value) {
const rl: ResultLoc = if (field_type == .none) .none else .{ .ty = field_type };
- const default_inst = try expr(&block_scope, &block_scope.base, rl, member.ast.value_expr);
+ const default_inst = try expr(&block_scope, &namespace.base, rl, member.ast.value_expr);
fields_data.appendAssumeCapacity(@enumToInt(default_inst));
} else if (member.comptime_token) |comptime_token| {
return astgen.failTok(comptime_token, "comptime field without default initialization value", .{});
@@ -3742,10 +3752,10 @@ fn structDeclInner(
fn unionDeclInner(
gz: *GenZir,
scope: *Scope,
- node: ast.Node.Index,
- members: []const ast.Node.Index,
+ node: Ast.Node.Index,
+ members: []const Ast.Node.Index,
layout: std.builtin.TypeInfo.ContainerLayout,
- arg_inst: Zir.Inst.Ref,
+ arg_node: Ast.Node.Index,
have_auto_enum: bool,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
@@ -3767,9 +3777,16 @@ fn unionDeclInner(
};
defer block_scope.instructions.deinit(gpa);
- var namespace: Scope.Namespace = .{ .parent = scope };
+ var namespace: Scope.Namespace = .{ .parent = scope, .node = node };
defer namespace.decls.deinit(gpa);
+ try astgen.scanDecls(&namespace, members);
+
+ const arg_inst: Zir.Inst.Ref = if (arg_node != 0)
+ try typeExpr(gz, &namespace.base, arg_node)
+ else
+ .none;
+
var wip_decls: WipDecls = .{};
defer wip_decls.deinit(gpa);
@@ -3797,7 +3814,7 @@ fn unionDeclInner(
const body = node_datas[member_node].rhs;
switch (node_tags[fn_proto]) {
.fn_proto_simple => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoSimple(&params, fn_proto)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
@@ -3812,7 +3829,7 @@ fn unionDeclInner(
continue;
},
.fn_proto_one => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoOne(&params, fn_proto)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
@@ -3830,7 +3847,7 @@ fn unionDeclInner(
}
},
.fn_proto_simple => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoSimple(&params, member_node)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
@@ -3845,7 +3862,7 @@ fn unionDeclInner(
continue;
},
.fn_proto_one => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoOne(&params, member_node)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
@@ -3935,7 +3952,7 @@ fn unionDeclInner(
(@as(u32, @boolToInt(unused)) << 31);
if (have_type and node_tags[member.ast.type_expr] != .@"anytype") {
- const field_type = try typeExpr(&block_scope, &block_scope.base, member.ast.type_expr);
+ const field_type = try typeExpr(&block_scope, &namespace.base, member.ast.type_expr);
fields_data.appendAssumeCapacity(@enumToInt(field_type));
}
if (have_align) {
@@ -4018,8 +4035,8 @@ fn containerDecl(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- container_decl: ast.full.ContainerDecl,
+ node: Ast.Node.Index,
+ container_decl: Ast.full.ContainerDecl,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const gpa = astgen.gpa;
@@ -4035,11 +4052,6 @@ fn containerDecl(
// We must not create any types until Sema. Here the goal is only to generate
// ZIR for all the field types, alignments, and default value expressions.
- const arg_inst: Zir.Inst.Ref = if (container_decl.ast.arg != 0)
- try comptimeExpr(gz, scope, .{ .ty = .type_type }, container_decl.ast.arg)
- else
- .none;
-
switch (token_tags[container_decl.ast.main_token]) {
.keyword_struct => {
const layout = if (container_decl.layout_token) |t| switch (token_tags[t]) {
@@ -4048,7 +4060,7 @@ fn containerDecl(
else => unreachable,
} else std.builtin.TypeInfo.ContainerLayout.Auto;
- assert(arg_inst == .none);
+ assert(container_decl.ast.arg == 0);
const result = try structDeclInner(gz, scope, node, container_decl, layout);
return rvalue(gz, rl, result, node);
@@ -4062,7 +4074,7 @@ fn containerDecl(
const have_auto_enum = container_decl.ast.enum_token != null;
- const result = try unionDeclInner(gz, scope, node, container_decl.ast.members, layout, arg_inst, have_auto_enum);
+ const result = try unionDeclInner(gz, scope, node, container_decl.ast.members, layout, container_decl.ast.arg, have_auto_enum);
return rvalue(gz, rl, result, node);
},
.keyword_enum => {
@@ -4074,7 +4086,7 @@ fn containerDecl(
var values: usize = 0;
var total_fields: usize = 0;
var decls: usize = 0;
- var nonexhaustive_node: ast.Node.Index = 0;
+ var nonexhaustive_node: Ast.Node.Index = 0;
for (container_decl.ast.members) |member_node| {
const member = switch (node_tags[member_node]) {
.container_field_init => tree.containerFieldInit(member_node),
@@ -4129,7 +4141,7 @@ fn containerDecl(
}
total_fields += 1;
if (member.ast.value_expr != 0) {
- if (arg_inst == .none) {
+ if (container_decl.ast.arg == 0) {
return astgen.failNode(member.ast.value_expr, "value assigned to enum tag with inferred tag type", .{});
}
values += 1;
@@ -4148,7 +4160,7 @@ fn containerDecl(
// must be at least one tag.
return astgen.failNode(node, "enum declarations must have at least one tag", .{});
}
- if (counts.nonexhaustive_node != 0 and arg_inst == .none) {
+ if (counts.nonexhaustive_node != 0 and container_decl.ast.arg == 0) {
return astgen.failNodeNotes(
node,
"non-exhaustive enum missing integer tag type",
@@ -4178,9 +4190,16 @@ fn containerDecl(
};
defer block_scope.instructions.deinit(gpa);
- var namespace: Scope.Namespace = .{ .parent = scope };
+ var namespace: Scope.Namespace = .{ .parent = scope, .node = node };
defer namespace.decls.deinit(gpa);
+ try astgen.scanDecls(&namespace, container_decl.ast.members);
+
+ const arg_inst: Zir.Inst.Ref = if (container_decl.ast.arg != 0)
+ try comptimeExpr(gz, &namespace.base, .{ .ty = .type_type }, container_decl.ast.arg)
+ else
+ .none;
+
var wip_decls: WipDecls = .{};
defer wip_decls.deinit(gpa);
@@ -4208,7 +4227,7 @@ fn containerDecl(
const body = node_datas[member_node].rhs;
switch (node_tags[fn_proto]) {
.fn_proto_simple => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoSimple(&params, fn_proto)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
@@ -4223,7 +4242,7 @@ fn containerDecl(
continue;
},
.fn_proto_one => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoOne(&params, fn_proto)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
@@ -4241,7 +4260,7 @@ fn containerDecl(
}
},
.fn_proto_simple => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoSimple(&params, member_node)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
@@ -4256,7 +4275,7 @@ fn containerDecl(
continue;
},
.fn_proto_one => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoOne(&params, member_node)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
@@ -4353,7 +4372,7 @@ fn containerDecl(
},
);
}
- const tag_value_inst = try expr(&block_scope, &block_scope.base, .{ .ty = arg_inst }, member.ast.value_expr);
+ const tag_value_inst = try expr(&block_scope, &namespace.base, .{ .ty = arg_inst }, member.ast.value_expr);
fields_data.appendAssumeCapacity(@enumToInt(tag_value_inst));
}
@@ -4405,9 +4424,13 @@ fn containerDecl(
return rvalue(gz, rl, indexToRef(decl_inst), node);
},
.keyword_opaque => {
- var namespace: Scope.Namespace = .{ .parent = scope };
+ assert(container_decl.ast.arg == 0);
+
+ var namespace: Scope.Namespace = .{ .parent = scope, .node = node };
defer namespace.decls.deinit(gpa);
+ try astgen.scanDecls(&namespace, container_decl.ast.members);
+
var wip_decls: WipDecls = .{};
defer wip_decls.deinit(gpa);
@@ -4420,7 +4443,7 @@ fn containerDecl(
const body = node_datas[member_node].rhs;
switch (node_tags[fn_proto]) {
.fn_proto_simple => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoSimple(&params, fn_proto)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
@@ -4435,7 +4458,7 @@ fn containerDecl(
continue;
},
.fn_proto_one => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoOne(&params, fn_proto)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
@@ -4453,7 +4476,7 @@ fn containerDecl(
}
},
.fn_proto_simple => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoSimple(&params, member_node)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
@@ -4468,7 +4491,7 @@ fn containerDecl(
continue;
},
.fn_proto_one => {
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoOne(&params, member_node)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
@@ -4569,7 +4592,7 @@ fn containerDecl(
}
}
-fn errorSetDecl(gz: *GenZir, rl: ResultLoc, node: ast.Node.Index) InnerError!Zir.Inst.Ref {
+fn errorSetDecl(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const gpa = astgen.gpa;
const tree = astgen.tree;
@@ -4608,8 +4631,8 @@ fn tryExpr(
parent_gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- operand_node: ast.Node.Index,
+ node: Ast.Node.Index,
+ operand_node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = parent_gz.astgen;
@@ -4684,13 +4707,13 @@ fn orelseCatchExpr(
parent_gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- lhs: ast.Node.Index,
+ node: Ast.Node.Index,
+ lhs: Ast.Node.Index,
cond_op: Zir.Inst.Tag,
unwrap_op: Zir.Inst.Tag,
unwrap_code_op: Zir.Inst.Tag,
- rhs: ast.Node.Index,
- payload_token: ?ast.TokenIndex,
+ rhs: Ast.Node.Index,
+ payload_token: ?Ast.TokenIndex,
) InnerError!Zir.Inst.Ref {
const astgen = parent_gz.astgen;
const tree = astgen.tree;
@@ -4775,7 +4798,7 @@ fn orelseCatchExpr(
fn finishThenElseBlock(
parent_gz: *GenZir,
rl: ResultLoc,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
block_scope: *GenZir,
then_scope: *GenZir,
else_scope: *GenZir,
@@ -4831,7 +4854,7 @@ fn finishThenElseBlock(
/// tokens without allocating.
/// OK in theory it could do it without allocating. This implementation
/// allocates when the @"" form is used.
-fn tokenIdentEql(astgen: *AstGen, token1: ast.TokenIndex, token2: ast.TokenIndex) !bool {
+fn tokenIdentEql(astgen: *AstGen, token1: Ast.TokenIndex, token2: Ast.TokenIndex) !bool {
const ident_name_1 = try astgen.identifierTokenString(token1);
const ident_name_2 = try astgen.identifierTokenString(token2);
return mem.eql(u8, ident_name_1, ident_name_2);
@@ -4841,7 +4864,7 @@ fn fieldAccess(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
@@ -4868,7 +4891,7 @@ fn arrayAccess(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
@@ -4891,7 +4914,7 @@ fn simpleBinOp(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
op_inst_tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
@@ -4908,8 +4931,8 @@ fn simpleBinOp(
fn simpleStrTok(
gz: *GenZir,
rl: ResultLoc,
- ident_token: ast.TokenIndex,
- node: ast.Node.Index,
+ ident_token: Ast.TokenIndex,
+ node: Ast.Node.Index,
op_inst_tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
@@ -4922,7 +4945,7 @@ fn boolBinOp(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
zir_tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
@@ -4948,8 +4971,8 @@ fn ifExpr(
parent_gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- if_full: ast.full.If,
+ node: Ast.Node.Index,
+ if_full: Ast.full.If,
) InnerError!Zir.Inst.Ref {
const astgen = parent_gz.astgen;
const tree = astgen.tree;
@@ -5017,7 +5040,7 @@ fn ifExpr(
const token_name_str = tree.tokenSlice(token_name_index);
if (mem.eql(u8, "_", token_name_str))
break :s &then_scope.base;
- try astgen.detectLocalShadowing(&then_scope.base, ident_name, token_name_index);
+ try astgen.detectLocalShadowing(&then_scope.base, ident_name, token_name_index, token_name_str);
payload_val_scope = .{
.parent = &then_scope.base,
.gen_zir = &then_scope,
@@ -5036,11 +5059,12 @@ fn ifExpr(
.optional_payload_unsafe_ptr
else
.optional_payload_unsafe;
- if (mem.eql(u8, "_", tree.tokenSlice(ident_token)))
+ const ident_bytes = tree.tokenSlice(ident_token);
+ if (mem.eql(u8, "_", ident_bytes))
break :s &then_scope.base;
const payload_inst = try then_scope.addUnNode(tag, cond.inst, node);
const ident_name = try astgen.identAsString(ident_token);
- try astgen.detectLocalShadowing(&then_scope.base, ident_name, ident_token);
+ try astgen.detectLocalShadowing(&then_scope.base, ident_name, ident_token, ident_bytes);
payload_val_scope = .{
.parent = &then_scope.base,
.gen_zir = &then_scope,
@@ -5067,7 +5091,7 @@ fn ifExpr(
const else_node = if_full.ast.else_expr;
const else_info: struct {
- src: ast.Node.Index,
+ src: Ast.Node.Index,
result: Zir.Inst.Ref,
} = if (else_node != 0) blk: {
block_scope.break_count += 1;
@@ -5082,7 +5106,7 @@ fn ifExpr(
const error_token_str = tree.tokenSlice(error_token);
if (mem.eql(u8, "_", error_token_str))
break :s &else_scope.base;
- try astgen.detectLocalShadowing(&else_scope.base, ident_name, error_token);
+ try astgen.detectLocalShadowing(&else_scope.base, ident_name, error_token, error_token_str);
payload_val_scope = .{
.parent = &else_scope.base,
.gen_zir = &else_scope,
@@ -5193,8 +5217,8 @@ fn whileExpr(
parent_gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- while_full: ast.full.While,
+ node: Ast.Node.Index,
+ while_full: Ast.full.While,
) InnerError!Zir.Inst.Ref {
const astgen = parent_gz.astgen;
const tree = astgen.tree;
@@ -5273,11 +5297,12 @@ fn whileExpr(
.err_union_payload_unsafe;
const payload_inst = try then_scope.addUnNode(tag, cond.inst, node);
const ident_token = if (payload_is_ref) payload_token + 1 else payload_token;
- if (mem.eql(u8, "_", tree.tokenSlice(ident_token)))
+ const ident_bytes = tree.tokenSlice(ident_token);
+ if (mem.eql(u8, "_", ident_bytes))
break :s &then_scope.base;
const payload_name_loc = payload_token + @boolToInt(payload_is_ref);
const ident_name = try astgen.identAsString(payload_name_loc);
- try astgen.detectLocalShadowing(&then_scope.base, ident_name, payload_name_loc);
+ try astgen.detectLocalShadowing(&then_scope.base, ident_name, payload_name_loc, ident_bytes);
payload_val_scope = .{
.parent = &then_scope.base,
.gen_zir = &then_scope,
@@ -5298,9 +5323,10 @@ fn whileExpr(
.optional_payload_unsafe;
const payload_inst = try then_scope.addUnNode(tag, cond.inst, node);
const ident_name = try astgen.identAsString(ident_token);
- if (mem.eql(u8, "_", tree.tokenSlice(ident_token)))
+ const ident_bytes = tree.tokenSlice(ident_token);
+ if (mem.eql(u8, "_", ident_bytes))
break :s &then_scope.base;
- try astgen.detectLocalShadowing(&then_scope.base, ident_name, ident_token);
+ try astgen.detectLocalShadowing(&then_scope.base, ident_name, ident_token, ident_bytes);
payload_val_scope = .{
.parent = &then_scope.base,
.gen_zir = &then_scope,
@@ -5344,7 +5370,7 @@ fn whileExpr(
const else_node = while_full.ast.else_expr;
const else_info: struct {
- src: ast.Node.Index,
+ src: Ast.Node.Index,
result: Zir.Inst.Ref,
} = if (else_node != 0) blk: {
loop_scope.break_count += 1;
@@ -5356,9 +5382,10 @@ fn whileExpr(
.err_union_code;
const payload_inst = try else_scope.addUnNode(tag, cond.inst, node);
const ident_name = try astgen.identAsString(error_token);
- if (mem.eql(u8, tree.tokenSlice(error_token), "_"))
+ const ident_bytes = tree.tokenSlice(error_token);
+ if (mem.eql(u8, ident_bytes, "_"))
break :s &else_scope.base;
- try astgen.detectLocalShadowing(&else_scope.base, ident_name, error_token);
+ try astgen.detectLocalShadowing(&else_scope.base, ident_name, error_token, ident_bytes);
payload_val_scope = .{
.parent = &else_scope.base,
.gen_zir = &else_scope,
@@ -5410,20 +5437,27 @@ fn forExpr(
parent_gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- for_full: ast.full.While,
+ node: Ast.Node.Index,
+ for_full: Ast.full.While,
) InnerError!Zir.Inst.Ref {
const astgen = parent_gz.astgen;
if (for_full.label_token) |label_token| {
try astgen.checkLabelRedefinition(scope, label_token);
}
+
// Set up variables and constants.
const is_inline = parent_gz.force_comptime or for_full.inline_token != null;
const tree = astgen.tree;
const token_tags = tree.tokens.items(.tag);
- const array_ptr = try expr(parent_gz, scope, .none_or_ref, for_full.ast.cond_expr);
+ const payload_is_ref = if (for_full.payload_token) |payload_token|
+ token_tags[payload_token] == .asterisk
+ else
+ false;
+
+ const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none;
+ const array_ptr = try expr(parent_gz, scope, cond_rl, for_full.ast.cond_expr);
const len = try parent_gz.addUnNode(.indexable_ptr_len, array_ptr, for_full.ast.cond_expr);
const index_ptr = blk: {
@@ -5498,7 +5532,7 @@ fn forExpr(
const name_str_index = try astgen.identAsString(ident);
const tag: Zir.Inst.Tag = if (is_ptr) .elem_ptr else .elem_val;
const payload_inst = try then_scope.addBin(tag, array_ptr, index);
- try astgen.detectLocalShadowing(&then_scope.base, name_str_index, ident);
+ try astgen.detectLocalShadowing(&then_scope.base, name_str_index, ident, value_name);
payload_val_scope = .{
.parent = &then_scope.base,
.gen_zir = &then_scope,
@@ -5518,11 +5552,12 @@ fn forExpr(
ident + 2
else
break :blk payload_sub_scope;
- if (mem.eql(u8, tree.tokenSlice(index_token), "_")) {
+ const token_bytes = tree.tokenSlice(index_token);
+ if (mem.eql(u8, token_bytes, "_")) {
return astgen.failTok(index_token, "discard of index capture; omit it instead", .{});
}
const index_name = try astgen.identAsString(index_token);
- try astgen.detectLocalShadowing(payload_sub_scope, index_name, index_token);
+ try astgen.detectLocalShadowing(payload_sub_scope, index_name, index_token, token_bytes);
index_scope = .{
.parent = payload_sub_scope,
.gen_zir = &then_scope,
@@ -5544,7 +5579,7 @@ fn forExpr(
const else_node = for_full.ast.else_expr;
const else_info: struct {
- src: ast.Node.Index,
+ src: Ast.Node.Index,
result: Zir.Inst.Ref,
} = if (else_node != 0) blk: {
loop_scope.break_count += 1;
@@ -5585,7 +5620,7 @@ fn switchExpr(
parent_gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- switch_node: ast.Node.Index,
+ switch_node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = parent_gz.astgen;
const gpa = astgen.gpa;
@@ -5595,7 +5630,7 @@ fn switchExpr(
const main_tokens = tree.nodes.items(.main_token);
const token_tags = tree.tokens.items(.tag);
const operand_node = node_datas[switch_node].lhs;
- const extra = tree.extraData(node_datas[switch_node].rhs, ast.Node.SubRange);
+ const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange);
const case_nodes = tree.extra_data[extra.start..extra.end];
// We perform two passes over the AST. This first pass is to collect information
@@ -5605,9 +5640,9 @@ fn switchExpr(
var scalar_cases_len: u32 = 0;
var multi_cases_len: u32 = 0;
var special_prong: Zir.SpecialProng = .none;
- var special_node: ast.Node.Index = 0;
- var else_src: ?ast.TokenIndex = null;
- var underscore_src: ?ast.TokenIndex = null;
+ var special_node: Ast.Node.Index = 0;
+ var else_src: ?Ast.TokenIndex = null;
+ var underscore_src: ?Ast.TokenIndex = null;
for (case_nodes) |case_node| {
const case = switch (node_tags[case_node]) {
.switch_case_one => tree.switchCaseOne(case_node),
@@ -6179,7 +6214,7 @@ fn switchExpr(
}
}
-fn ret(gz: *GenZir, scope: *Scope, node: ast.Node.Index) InnerError!Zir.Inst.Ref {
+fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
const node_datas = tree.nodes.items(.data);
@@ -6278,7 +6313,7 @@ fn identifier(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- ident: ast.Node.Index,
+ ident: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -6294,106 +6329,101 @@ fn identifier(
}
const ident_name = try astgen.identifierTokenString(ident_token);
- if (simple_types.get(ident_name)) |zir_const_ref| {
- return rvalue(gz, rl, zir_const_ref, ident);
- }
+ if (ident_name_raw[0] != '@') {
+ if (simple_types.get(ident_name)) |zir_const_ref| {
+ return rvalue(gz, rl, zir_const_ref, ident);
+ }
- if (ident_name.len >= 2) integer: {
- const first_c = ident_name[0];
- if (first_c == 'i' or first_c == 'u') {
- const signedness: std.builtin.Signedness = switch (first_c == 'i') {
- true => .signed,
- false => .unsigned,
- };
- const bit_count = std.fmt.parseInt(u16, ident_name[1..], 10) catch |err| switch (err) {
- error.Overflow => return astgen.failNode(
- ident,
- "primitive integer type '{s}' exceeds maximum bit width of 65535",
- .{ident_name},
- ),
- error.InvalidCharacter => break :integer,
- };
- const result = try gz.add(.{
- .tag = .int_type,
- .data = .{ .int_type = .{
- .src_node = gz.nodeIndexToRelative(ident),
- .signedness = signedness,
- .bit_count = bit_count,
- } },
- });
- return rvalue(gz, rl, result, ident);
+ if (ident_name.len >= 2) integer: {
+ const first_c = ident_name[0];
+ if (first_c == 'i' or first_c == 'u') {
+ const signedness: std.builtin.Signedness = switch (first_c == 'i') {
+ true => .signed,
+ false => .unsigned,
+ };
+ const bit_count = std.fmt.parseInt(u16, ident_name[1..], 10) catch |err| switch (err) {
+ error.Overflow => return astgen.failNode(
+ ident,
+ "primitive integer type '{s}' exceeds maximum bit width of 65535",
+ .{ident_name},
+ ),
+ error.InvalidCharacter => break :integer,
+ };
+ const result = try gz.add(.{
+ .tag = .int_type,
+ .data = .{ .int_type = .{
+ .src_node = gz.nodeIndexToRelative(ident),
+ .signedness = signedness,
+ .bit_count = bit_count,
+ } },
+ });
+ return rvalue(gz, rl, result, ident);
+ }
}
}
// Local variables, including function parameters.
const name_str_index = try astgen.identAsString(ident_token);
- {
- var s = scope;
- var found_already: ?ast.Node.Index = null; // we have found a decl with the same name already
- var hit_namespace = false;
- while (true) switch (s.tag) {
- .local_val => {
- const local_val = s.cast(Scope.LocalVal).?;
-
- if (local_val.name == name_str_index) {
- local_val.used = true;
- // Captures of non-locals need to be emitted as decl_val or decl_ref.
- // This *might* be capturable depending on if it is comptime known.
- if (!hit_namespace) {
- return rvalue(gz, rl, local_val.inst, ident);
- }
+ var s = scope;
+ var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already
+ var hit_namespace: Ast.Node.Index = 0;
+ while (true) switch (s.tag) {
+ .local_val => {
+ const local_val = s.cast(Scope.LocalVal).?;
+
+ if (local_val.name == name_str_index) {
+ local_val.used = true;
+ // Locals cannot shadow anything, so we do not need to look for ambiguous
+ // references in this case.
+ return rvalue(gz, rl, local_val.inst, ident);
+ }
+ s = local_val.parent;
+ },
+ .local_ptr => {
+ const local_ptr = s.cast(Scope.LocalPtr).?;
+ if (local_ptr.name == name_str_index) {
+ local_ptr.used = true;
+ if (hit_namespace != 0 and !local_ptr.maybe_comptime) {
+ return astgen.failNodeNotes(ident, "mutable '{s}' not accessible from here", .{ident_name}, &.{
+ try astgen.errNoteTok(local_ptr.token_src, "declared mutable here", .{}),
+ try astgen.errNoteNode(hit_namespace, "crosses namespace boundary here", .{}),
+ });
}
- s = local_val.parent;
- },
- .local_ptr => {
- const local_ptr = s.cast(Scope.LocalPtr).?;
- if (local_ptr.name == name_str_index) {
- local_ptr.used = true;
- if (hit_namespace) {
- if (local_ptr.maybe_comptime)
- break
- else
- return astgen.failNodeNotes(ident, "'{s}' not accessible from inner function", .{ident_name}, &.{
- try astgen.errNoteTok(local_ptr.token_src, "declared here", .{}),
- // TODO add crossed function definition here note.
- // Maybe add a note to the error about it being because of the var,
- // maybe recommend copying it into a const variable. -SpexGuy
- });
- }
- switch (rl) {
- .ref, .none_or_ref => return local_ptr.ptr,
- else => {
- const loaded = try gz.addUnNode(.load, local_ptr.ptr, ident);
- return rvalue(gz, rl, loaded, ident);
- },
- }
+ switch (rl) {
+ .ref, .none_or_ref => return local_ptr.ptr,
+ else => {
+ const loaded = try gz.addUnNode(.load, local_ptr.ptr, ident);
+ return rvalue(gz, rl, loaded, ident);
+ },
}
- s = local_ptr.parent;
- },
- .gen_zir => s = s.cast(GenZir).?.parent,
- .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
- // look for ambiguous references to decls
- .namespace => {
- const ns = s.cast(Scope.Namespace).?;
- if (ns.decls.get(name_str_index)) |i| {
- if (found_already) |f|
- return astgen.failNodeNotes(ident, "ambiguous reference", .{}, &.{
- try astgen.errNoteNode(i, "declared here", .{}),
- try astgen.errNoteNode(f, "also declared here", .{}),
- })
- else
- found_already = i;
+ }
+ s = local_ptr.parent;
+ },
+ .gen_zir => s = s.cast(GenZir).?.parent,
+ .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
+ .namespace => {
+ const ns = s.cast(Scope.Namespace).?;
+ if (ns.decls.get(name_str_index)) |i| {
+ if (found_already) |f| {
+ return astgen.failNodeNotes(ident, "ambiguous reference", .{}, &.{
+ try astgen.errNoteNode(f, "declared here", .{}),
+ try astgen.errNoteNode(i, "also declared here", .{}),
+ });
}
- hit_namespace = true;
- s = ns.parent;
- },
- .top => break,
- };
+ // We found a match but must continue looking for ambiguous references to decls.
+ found_already = i;
+ }
+ hit_namespace = ns.node;
+ s = ns.parent;
+ },
+ .top => break,
+ };
+ if (found_already == null) {
+ return astgen.failNode(ident, "use of undeclared identifier '{s}'", .{ident_name});
}
- // We can't look up Decls until Sema because the same ZIR code is supposed to be
- // used for multiple generic instantiations, and this may refer to a different Decl
- // depending on the scope, determined by the generic instantiation.
+ // Decl references happen by name rather than ZIR index so that when unrelated
+ // decls are modified, ZIR code containing references to them can be unmodified.
switch (rl) {
.ref, .none_or_ref => return gz.addStrTok(.decl_ref, name_str_index, ident_token),
else => {
@@ -6406,7 +6436,7 @@ fn identifier(
fn stringLiteral(
gz: *GenZir,
rl: ResultLoc,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
@@ -6426,7 +6456,7 @@ fn stringLiteral(
fn multilineStringLiteral(
gz: *GenZir,
rl: ResultLoc,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const str = try astgen.strLitNodeAsString(node);
@@ -6440,7 +6470,7 @@ fn multilineStringLiteral(
return rvalue(gz, rl, result, node);
}
-fn charLiteral(gz: *GenZir, rl: ResultLoc, node: ast.Node.Index) !Zir.Inst.Ref {
+fn charLiteral(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index) !Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
const main_tokens = tree.nodes.items(.main_token);
@@ -6519,7 +6549,7 @@ fn charLiteral(gz: *GenZir, rl: ResultLoc, node: ast.Node.Index) !Zir.Inst.Ref {
}
}
-fn integerLiteral(gz: *GenZir, rl: ResultLoc, node: ast.Node.Index) InnerError!Zir.Inst.Ref {
+fn integerLiteral(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
const main_tokens = tree.nodes.items(.main_token);
@@ -6565,7 +6595,7 @@ fn integerLiteral(gz: *GenZir, rl: ResultLoc, node: ast.Node.Index) InnerError!Z
return rvalue(gz, rl, result, node);
}
-fn floatLiteral(gz: *GenZir, rl: ResultLoc, node: ast.Node.Index) InnerError!Zir.Inst.Ref {
+fn floatLiteral(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
const main_tokens = tree.nodes.items(.main_token);
@@ -6605,8 +6635,8 @@ fn asmExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- full: ast.full.Asm,
+ node: Ast.Node.Index,
+ full: Ast.full.Asm,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
@@ -6763,9 +6793,9 @@ fn as(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- lhs: ast.Node.Index,
- rhs: ast.Node.Index,
+ node: Ast.Node.Index,
+ lhs: Ast.Node.Index,
+ rhs: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const dest_type = try typeExpr(gz, scope, lhs);
switch (rl) {
@@ -6786,8 +6816,8 @@ fn unionInit(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- params: []const ast.Node.Index,
+ node: Ast.Node.Index,
+ params: []const Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const union_type = try typeExpr(gz, scope, params[0]);
const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]);
@@ -6812,9 +6842,9 @@ fn unionInit(
fn unionInitRlPtr(
parent_gz: *GenZir,
scope: *Scope,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
result_ptr: Zir.Inst.Ref,
- expr_node: ast.Node.Index,
+ expr_node: Ast.Node.Index,
union_type: Zir.Inst.Ref,
field_name: Zir.Inst.Ref,
) InnerError!Zir.Inst.Ref {
@@ -6831,9 +6861,9 @@ fn asRlPtr(
parent_gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- src_node: ast.Node.Index,
+ src_node: Ast.Node.Index,
result_ptr: Zir.Inst.Ref,
- operand_node: ast.Node.Index,
+ operand_node: Ast.Node.Index,
dest_type: Zir.Inst.Ref,
) InnerError!Zir.Inst.Ref {
// Detect whether this expr() call goes into rvalue() to store the result into the
@@ -6871,9 +6901,9 @@ fn bitCast(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- lhs: ast.Node.Index,
- rhs: ast.Node.Index,
+ node: Ast.Node.Index,
+ lhs: Ast.Node.Index,
+ rhs: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const dest_type = try typeExpr(gz, scope, lhs);
@@ -6901,10 +6931,10 @@ fn bitCast(
fn bitCastRlPtr(
gz: *GenZir,
scope: *Scope,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
dest_type: Zir.Inst.Ref,
result_ptr: Zir.Inst.Ref,
- rhs: ast.Node.Index,
+ rhs: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const casted_result_ptr = try gz.addPlNode(.bitcast_result_ptr, node, Zir.Inst.Bin{
.lhs = dest_type,
@@ -6917,8 +6947,8 @@ fn typeOf(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- params: []const ast.Node.Index,
+ node: Ast.Node.Index,
+ params: []const Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
if (params.len < 1) {
return gz.astgen.failNode(node, "expected at least 1 argument, found 0", .{});
@@ -6942,8 +6972,8 @@ fn builtinCall(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- params: []const ast.Node.Index,
+ node: Ast.Node.Index,
+ params: []const Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
@@ -7275,6 +7305,11 @@ fn builtinCall(
return rvalue(gz, rl, result, node);
},
+ .add_with_saturation => return saturatingArithmetic(gz, scope, rl, node, params, .add_with_saturation),
+ .sub_with_saturation => return saturatingArithmetic(gz, scope, rl, node, params, .sub_with_saturation),
+ .mul_with_saturation => return saturatingArithmetic(gz, scope, rl, node, params, .mul_with_saturation),
+ .shl_with_saturation => return saturatingArithmetic(gz, scope, rl, node, params, .shl_with_saturation),
+
.atomic_load => {
const int_type = try typeExpr(gz, scope, params[0]);
const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{
@@ -7430,7 +7465,7 @@ fn builtinCall(
fn simpleNoOpVoid(
gz: *GenZir,
rl: ResultLoc,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
_ = try gz.addNode(tag, node);
@@ -7441,9 +7476,9 @@ fn hasDeclOrField(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- lhs_node: ast.Node.Index,
- rhs_node: ast.Node.Index,
+ node: Ast.Node.Index,
+ lhs_node: Ast.Node.Index,
+ rhs_node: Ast.Node.Index,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const container_type = try typeExpr(gz, scope, lhs_node);
@@ -7459,9 +7494,9 @@ fn typeCast(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- lhs_node: ast.Node.Index,
- rhs_node: ast.Node.Index,
+ node: Ast.Node.Index,
+ lhs_node: Ast.Node.Index,
+ rhs_node: Ast.Node.Index,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{
@@ -7475,8 +7510,8 @@ fn simpleUnOpType(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- operand_node: ast.Node.Index,
+ node: Ast.Node.Index,
+ operand_node: Ast.Node.Index,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const operand = try typeExpr(gz, scope, operand_node);
@@ -7488,9 +7523,9 @@ fn simpleUnOp(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
operand_rl: ResultLoc,
- operand_node: ast.Node.Index,
+ operand_node: Ast.Node.Index,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const operand = try expr(gz, scope, operand_rl, operand_node);
@@ -7502,8 +7537,8 @@ fn cmpxchg(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- params: []const ast.Node.Index,
+ node: Ast.Node.Index,
+ params: []const Ast.Node.Index,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const int_type = try typeExpr(gz, scope, params[0]);
@@ -7532,9 +7567,9 @@ fn bitBuiltin(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- int_type_node: ast.Node.Index,
- operand_node: ast.Node.Index,
+ node: Ast.Node.Index,
+ int_type_node: Ast.Node.Index,
+ operand_node: Ast.Node.Index,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const int_type = try typeExpr(gz, scope, int_type_node);
@@ -7547,9 +7582,9 @@ fn divBuiltin(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- lhs_node: ast.Node.Index,
- rhs_node: ast.Node.Index,
+ node: Ast.Node.Index,
+ lhs_node: Ast.Node.Index,
+ rhs_node: Ast.Node.Index,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{
@@ -7563,8 +7598,8 @@ fn simpleCBuiltin(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- operand_node: ast.Node.Index,
+ node: Ast.Node.Index,
+ operand_node: Ast.Node.Index,
tag: Zir.Inst.Extended,
) InnerError!Zir.Inst.Ref {
const operand = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, operand_node);
@@ -7579,9 +7614,9 @@ fn offsetOf(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- lhs_node: ast.Node.Index,
- rhs_node: ast.Node.Index,
+ node: Ast.Node.Index,
+ lhs_node: Ast.Node.Index,
+ rhs_node: Ast.Node.Index,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const type_inst = try typeExpr(gz, scope, lhs_node);
@@ -7597,9 +7632,9 @@ fn shiftOp(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- lhs_node: ast.Node.Index,
- rhs_node: ast.Node.Index,
+ node: Ast.Node.Index,
+ lhs_node: Ast.Node.Index,
+ rhs_node: Ast.Node.Index,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const lhs = try expr(gz, scope, .none, lhs_node);
@@ -7616,8 +7651,8 @@ fn cImport(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- body_node: ast.Node.Index,
+ node: Ast.Node.Index,
+ body_node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const gpa = astgen.gpa;
@@ -7641,8 +7676,8 @@ fn overflowArithmetic(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- params: []const ast.Node.Index,
+ node: Ast.Node.Index,
+ params: []const Ast.Node.Index,
tag: Zir.Inst.Extended,
) InnerError!Zir.Inst.Ref {
const int_type = try typeExpr(gz, scope, params[0]);
@@ -7667,12 +7702,30 @@ fn overflowArithmetic(
return rvalue(gz, rl, result, node);
}
+fn saturatingArithmetic(
+ gz: *GenZir,
+ scope: *Scope,
+ rl: ResultLoc,
+ node: Ast.Node.Index,
+ params: []const Ast.Node.Index,
+ tag: Zir.Inst.Extended,
+) InnerError!Zir.Inst.Ref {
+ const lhs = try expr(gz, scope, .none, params[0]);
+ const rhs = try expr(gz, scope, .none, params[1]);
+ const result = try gz.addExtendedPayload(tag, Zir.Inst.SaturatingArithmetic{
+ .node = gz.nodeIndexToRelative(node),
+ .lhs = lhs,
+ .rhs = rhs,
+ });
+ return rvalue(gz, rl, result, node);
+}
+
fn callExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
- node: ast.Node.Index,
- call: ast.full.Call,
+ node: Ast.Node.Index,
+ call: Ast.full.Call,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const lhs = try expr(gz, scope, .none, call.ast.fn_expr);
@@ -7761,7 +7814,7 @@ pub const simple_types = std.ComptimeStringMap(Zir.Inst.Ref, .{
.{ "void", .void_type },
});
-fn nodeMayNeedMemoryLocation(tree: *const ast.Tree, start_node: ast.Node.Index) bool {
+fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index) bool {
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const main_tokens = tree.nodes.items(.main_token);
@@ -7819,10 +7872,6 @@ fn nodeMayNeedMemoryLocation(tree: *const ast.Tree, start_node: ast.Node.Index)
.string_literal,
.multiline_string_literal,
.char_literal,
- .true_literal,
- .false_literal,
- .null_literal,
- .undefined_literal,
.unreachable_literal,
.identifier,
.error_set_decl,
@@ -7974,7 +8023,7 @@ fn nodeMayNeedMemoryLocation(tree: *const ast.Tree, start_node: ast.Node.Index)
}
}
-fn nodeMayEvalToError(tree: *const ast.Tree, start_node: ast.Node.Index) enum { never, always, maybe } {
+fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) enum { never, always, maybe } {
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const main_tokens = tree.nodes.items(.main_token);
@@ -8059,10 +8108,6 @@ fn nodeMayEvalToError(tree: *const ast.Tree, start_node: ast.Node.Index) enum {
.string_literal,
.multiline_string_literal,
.char_literal,
- .true_literal,
- .false_literal,
- .null_literal,
- .undefined_literal,
.unreachable_literal,
.error_set_decl,
.container_decl,
@@ -8187,7 +8232,7 @@ fn nodeMayEvalToError(tree: *const ast.Tree, start_node: ast.Node.Index) enum {
}
}
-fn nodeImpliesRuntimeBits(tree: *const ast.Tree, start_node: ast.Node.Index) bool {
+fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool {
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
@@ -8232,10 +8277,6 @@ fn nodeImpliesRuntimeBits(tree: *const ast.Tree, start_node: ast.Node.Index) boo
.string_literal,
.multiline_string_literal,
.char_literal,
- .true_literal,
- .false_literal,
- .null_literal,
- .undefined_literal,
.unreachable_literal,
.identifier,
.error_set_decl,
@@ -8370,7 +8411,7 @@ fn nodeImpliesRuntimeBits(tree: *const ast.Tree, start_node: ast.Node.Index) boo
}
}
-/// Applies `rl` semantics to `inst`. Expressions which do not do their own handling of
+/// Applies `rl` semantics to `result`. Expressions which do not do their own handling of
/// result locations must call this function on their result.
/// As an example, if the `ResultLoc` is `ptr`, it will write the result to the pointer.
/// If the `ResultLoc` is `ty`, it will coerce the result to the type.
@@ -8378,8 +8419,9 @@ fn rvalue(
gz: *GenZir,
rl: ResultLoc,
result: Zir.Inst.Ref,
- src_node: ast.Node.Index,
+ src_node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
+ if (gz.endsWithNoReturn()) return result;
switch (rl) {
.none, .none_or_ref, .coerced_ty => return result,
.discard => {
@@ -8482,7 +8524,7 @@ fn rvalue(
/// and allocates the result within `astgen.arena`.
/// Otherwise, returns a reference to the source code bytes directly.
/// See also `appendIdentStr` and `parseStrLit`.
-fn identifierTokenString(astgen: *AstGen, token: ast.TokenIndex) InnerError![]const u8 {
+fn identifierTokenString(astgen: *AstGen, token: Ast.TokenIndex) InnerError![]const u8 {
const tree = astgen.tree;
const token_tags = tree.tokens.items(.tag);
assert(token_tags[token] == .identifier);
@@ -8502,7 +8544,7 @@ fn identifierTokenString(astgen: *AstGen, token: ast.TokenIndex) InnerError![]co
/// See also `identifierTokenString` and `parseStrLit`.
fn appendIdentStr(
astgen: *AstGen,
- token: ast.TokenIndex,
+ token: Ast.TokenIndex,
buf: *ArrayListUnmanaged(u8),
) InnerError!void {
const tree = astgen.tree;
@@ -8519,7 +8561,7 @@ fn appendIdentStr(
/// Appends the result to `buf`.
fn parseStrLit(
astgen: *AstGen,
- token: ast.TokenIndex,
+ token: Ast.TokenIndex,
buf: *ArrayListUnmanaged(u8),
bytes: []const u8,
offset: u32,
@@ -8583,7 +8625,7 @@ fn parseStrLit(
fn failNode(
astgen: *AstGen,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
comptime format: []const u8,
args: anytype,
) InnerError {
@@ -8592,7 +8634,7 @@ fn failNode(
fn failNodeNotes(
astgen: *AstGen,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
comptime format: []const u8,
args: anytype,
notes: []const u32,
@@ -8624,7 +8666,7 @@ fn failNodeNotes(
fn failTok(
astgen: *AstGen,
- token: ast.TokenIndex,
+ token: Ast.TokenIndex,
comptime format: []const u8,
args: anytype,
) InnerError {
@@ -8633,7 +8675,7 @@ fn failTok(
fn failTokNotes(
astgen: *AstGen,
- token: ast.TokenIndex,
+ token: Ast.TokenIndex,
comptime format: []const u8,
args: anytype,
notes: []const u32,
@@ -8666,7 +8708,7 @@ fn failTokNotes(
/// Same as `fail`, except given an absolute byte offset.
fn failOff(
astgen: *AstGen,
- token: ast.TokenIndex,
+ token: Ast.TokenIndex,
byte_offset: u32,
comptime format: []const u8,
args: anytype,
@@ -8691,7 +8733,7 @@ fn failOff(
fn errNoteTok(
astgen: *AstGen,
- token: ast.TokenIndex,
+ token: Ast.TokenIndex,
comptime format: []const u8,
args: anytype,
) Allocator.Error!u32 {
@@ -8714,7 +8756,7 @@ fn errNoteTok(
fn errNoteNode(
astgen: *AstGen,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
comptime format: []const u8,
args: anytype,
) Allocator.Error!u32 {
@@ -8735,22 +8777,22 @@ fn errNoteNode(
});
}
-fn identAsString(astgen: *AstGen, ident_token: ast.TokenIndex) !u32 {
+fn identAsString(astgen: *AstGen, ident_token: Ast.TokenIndex) !u32 {
const gpa = astgen.gpa;
const string_bytes = &astgen.string_bytes;
const str_index = @intCast(u32, string_bytes.items.len);
try astgen.appendIdentStr(ident_token, string_bytes);
const key = string_bytes.items[str_index..];
- const gop = try astgen.string_table.getOrPut(gpa, key);
+ const gop = try astgen.string_table.getOrPutContextAdapted(gpa, @as([]const u8, key), StringIndexAdapter{
+ .bytes = string_bytes,
+ }, StringIndexContext{
+ .bytes = string_bytes,
+ });
if (gop.found_existing) {
string_bytes.shrinkRetainingCapacity(str_index);
- return gop.value_ptr.*;
+ return gop.key_ptr.*;
} else {
- // We have to dupe the key into the arena, otherwise the memory
- // becomes invalidated when string_bytes gets data appended.
- // TODO https://github.com/ziglang/zig/issues/8528
- gop.key_ptr.* = try astgen.arena.dupe(u8, key);
- gop.value_ptr.* = str_index;
+ gop.key_ptr.* = str_index;
try string_bytes.append(gpa, 0);
return str_index;
}
@@ -8758,26 +8800,26 @@ fn identAsString(astgen: *AstGen, ident_token: ast.TokenIndex) !u32 {
const IndexSlice = struct { index: u32, len: u32 };
-fn strLitAsString(astgen: *AstGen, str_lit_token: ast.TokenIndex) !IndexSlice {
+fn strLitAsString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !IndexSlice {
const gpa = astgen.gpa;
const string_bytes = &astgen.string_bytes;
const str_index = @intCast(u32, string_bytes.items.len);
const token_bytes = astgen.tree.tokenSlice(str_lit_token);
try astgen.parseStrLit(str_lit_token, string_bytes, token_bytes, 0);
const key = string_bytes.items[str_index..];
- const gop = try astgen.string_table.getOrPut(gpa, key);
+ const gop = try astgen.string_table.getOrPutContextAdapted(gpa, @as([]const u8, key), StringIndexAdapter{
+ .bytes = string_bytes,
+ }, StringIndexContext{
+ .bytes = string_bytes,
+ });
if (gop.found_existing) {
string_bytes.shrinkRetainingCapacity(str_index);
return IndexSlice{
- .index = gop.value_ptr.*,
+ .index = gop.key_ptr.*,
.len = @intCast(u32, key.len),
};
} else {
- // We have to dupe the key into the arena, otherwise the memory
- // becomes invalidated when string_bytes gets data appended.
- // TODO https://github.com/ziglang/zig/issues/8528
- gop.key_ptr.* = try astgen.arena.dupe(u8, key);
- gop.value_ptr.* = str_index;
+ gop.key_ptr.* = str_index;
// Still need a null byte because we are using the same table
// to lookup null terminated strings, so if we get a match, it has to
// be null terminated for that to work.
@@ -8789,7 +8831,7 @@ fn strLitAsString(astgen: *AstGen, str_lit_token: ast.TokenIndex) !IndexSlice {
}
}
-fn strLitNodeAsString(astgen: *AstGen, node: ast.Node.Index) !IndexSlice {
+fn strLitNodeAsString(astgen: *AstGen, node: Ast.Node.Index) !IndexSlice {
const tree = astgen.tree;
const node_datas = tree.nodes.items(.data);
@@ -8824,7 +8866,7 @@ fn strLitNodeAsString(astgen: *AstGen, node: ast.Node.Index) !IndexSlice {
};
}
-fn testNameString(astgen: *AstGen, str_lit_token: ast.TokenIndex) !u32 {
+fn testNameString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !u32 {
const gpa = astgen.gpa;
const string_bytes = &astgen.string_bytes;
const str_index = @intCast(u32, string_bytes.items.len);
@@ -8881,7 +8923,7 @@ const Scope = struct {
gen_zir: *GenZir,
inst: Zir.Inst.Ref,
/// Source location of the corresponding variable declaration.
- token_src: ast.TokenIndex,
+ token_src: Ast.TokenIndex,
/// String table index.
name: u32,
id_cat: IdCat,
@@ -8900,7 +8942,7 @@ const Scope = struct {
gen_zir: *GenZir,
ptr: Zir.Inst.Ref,
/// Source location of the corresponding variable declaration.
- token_src: ast.TokenIndex,
+ token_src: Ast.TokenIndex,
/// String table index.
name: u32,
id_cat: IdCat,
@@ -8915,7 +8957,7 @@ const Scope = struct {
base: Scope,
/// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`.
parent: *Scope,
- defer_node: ast.Node.Index,
+ defer_node: Ast.Node.Index,
};
/// Represents a global scope that has any number of declarations in it.
@@ -8927,7 +8969,8 @@ const Scope = struct {
parent: *Scope,
/// Maps string table index to the source location of declaration,
/// for the purposes of reporting name shadowing compile errors.
- decls: std.AutoHashMapUnmanaged(u32, ast.Node.Index) = .{},
+ decls: std.AutoHashMapUnmanaged(u32, Ast.Node.Index) = .{},
+ node: Ast.Node.Index,
};
const Top = struct {
@@ -8946,7 +8989,7 @@ const GenZir = struct {
/// How decls created in this scope should be named.
anon_name_strategy: Zir.Inst.NameStrategy = .anon,
/// The containing decl AST node.
- decl_node_index: ast.Node.Index,
+ decl_node_index: Ast.Node.Index,
/// The containing decl line index, absolute.
decl_line: u32,
parent: *Scope,
@@ -8981,8 +9024,8 @@ const GenZir = struct {
/// a result location pointer.
labeled_store_to_block_ptr_list: ArrayListUnmanaged(Zir.Inst.Index) = .{},
- suspend_node: ast.Node.Index = 0,
- nosuspend_node: ast.Node.Index = 0,
+ suspend_node: Ast.Node.Index = 0,
+ nosuspend_node: Ast.Node.Index = 0,
fn makeSubBlock(gz: *GenZir, scope: *Scope) GenZir {
return .{
@@ -8998,7 +9041,7 @@ const GenZir = struct {
}
const Label = struct {
- token: ast.TokenIndex,
+ token: Ast.TokenIndex,
block_inst: Zir.Inst.Index,
used: bool = false,
};
@@ -9019,7 +9062,7 @@ const GenZir = struct {
return false;
}
- fn calcLine(gz: GenZir, node: ast.Node.Index) u32 {
+ fn calcLine(gz: GenZir, node: Ast.Node.Index) u32 {
const astgen = gz.astgen;
const tree = astgen.tree;
const source = tree.source;
@@ -9031,23 +9074,15 @@ const GenZir = struct {
return @intCast(u32, gz.decl_line + astgen.source_line);
}
- fn tokSrcLoc(gz: GenZir, token_index: ast.TokenIndex) LazySrcLoc {
- return .{ .token_offset = token_index - gz.srcToken() };
- }
-
- fn nodeSrcLoc(gz: GenZir, node_index: ast.Node.Index) LazySrcLoc {
- return .{ .node_offset = gz.nodeIndexToRelative(node_index) };
- }
-
- fn nodeIndexToRelative(gz: GenZir, node_index: ast.Node.Index) i32 {
+ fn nodeIndexToRelative(gz: GenZir, node_index: Ast.Node.Index) i32 {
return @bitCast(i32, node_index) - @bitCast(i32, gz.decl_node_index);
}
- fn tokenIndexToRelative(gz: GenZir, token: ast.TokenIndex) u32 {
+ fn tokenIndexToRelative(gz: GenZir, token: Ast.TokenIndex) u32 {
return token - gz.srcToken();
}
- fn srcToken(gz: GenZir) ast.TokenIndex {
+ fn srcToken(gz: GenZir) Ast.TokenIndex {
return gz.astgen.tree.firstToken(gz.decl_node_index);
}
@@ -9132,7 +9167,7 @@ const GenZir = struct {
}
fn addFunc(gz: *GenZir, args: struct {
- src_node: ast.Node.Index,
+ src_node: Ast.Node.Index,
body: []const Zir.Inst.Index,
param_block: Zir.Inst.Index,
ret_ty: []const Zir.Inst.Index,
@@ -9325,7 +9360,7 @@ const GenZir = struct {
callee: Zir.Inst.Ref,
args: []const Zir.Inst.Ref,
/// Absolute node index. This function does the conversion to offset from Decl.
- src_node: ast.Node.Index,
+ src_node: Ast.Node.Index,
) !Zir.Inst.Ref {
assert(callee != .none);
assert(src_node != 0);
@@ -9416,7 +9451,7 @@ const GenZir = struct {
tag: Zir.Inst.Tag,
operand: Zir.Inst.Ref,
/// Absolute node index. This function does the conversion to offset from Decl.
- src_node: ast.Node.Index,
+ src_node: Ast.Node.Index,
) !Zir.Inst.Ref {
assert(operand != .none);
return gz.add(.{
@@ -9432,7 +9467,7 @@ const GenZir = struct {
gz: *GenZir,
tag: Zir.Inst.Tag,
/// Absolute node index. This function does the conversion to offset from Decl.
- src_node: ast.Node.Index,
+ src_node: Ast.Node.Index,
extra: anytype,
) !Zir.Inst.Ref {
const gpa = gz.astgen.gpa;
@@ -9456,7 +9491,7 @@ const GenZir = struct {
gz: *GenZir,
tag: Zir.Inst.Tag,
/// Absolute token index. This function does the conversion to Decl offset.
- abs_tok_index: ast.TokenIndex,
+ abs_tok_index: Ast.TokenIndex,
name: u32,
body: []const u32,
) !Zir.Inst.Index {
@@ -9511,7 +9546,7 @@ const GenZir = struct {
fn addExtendedMultiOp(
gz: *GenZir,
opcode: Zir.Inst.Extended,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
operands: []const Zir.Inst.Ref,
) !Zir.Inst.Ref {
const astgen = gz.astgen;
@@ -9572,7 +9607,7 @@ const GenZir = struct {
tag: Zir.Inst.Tag,
operand: Zir.Inst.Ref,
/// Absolute token index. This function does the conversion to Decl offset.
- abs_tok_index: ast.TokenIndex,
+ abs_tok_index: Ast.TokenIndex,
) !Zir.Inst.Ref {
assert(operand != .none);
return gz.add(.{
@@ -9589,7 +9624,7 @@ const GenZir = struct {
tag: Zir.Inst.Tag,
str_index: u32,
/// Absolute token index. This function does the conversion to Decl offset.
- abs_tok_index: ast.TokenIndex,
+ abs_tok_index: Ast.TokenIndex,
) !Zir.Inst.Ref {
return gz.add(.{
.tag = tag,
@@ -9636,7 +9671,7 @@ const GenZir = struct {
gz: *GenZir,
tag: Zir.Inst.Tag,
decl_index: u32,
- src_node: ast.Node.Index,
+ src_node: Ast.Node.Index,
) !Zir.Inst.Ref {
return gz.add(.{
.tag = tag,
@@ -9651,7 +9686,7 @@ const GenZir = struct {
gz: *GenZir,
tag: Zir.Inst.Tag,
/// Absolute node index. This function does the conversion to offset from Decl.
- src_node: ast.Node.Index,
+ src_node: Ast.Node.Index,
) !Zir.Inst.Ref {
return gz.add(.{
.tag = tag,
@@ -9663,7 +9698,7 @@ const GenZir = struct {
gz: *GenZir,
opcode: Zir.Inst.Extended,
/// Absolute node index. This function does the conversion to offset from Decl.
- src_node: ast.Node.Index,
+ src_node: Ast.Node.Index,
) !Zir.Inst.Ref {
return gz.add(.{
.tag = .extended,
@@ -9679,7 +9714,7 @@ const GenZir = struct {
gz: *GenZir,
args: struct {
/// Absolute node index. This function does the conversion to offset from Decl.
- node: ast.Node.Index,
+ node: Ast.Node.Index,
type_inst: Zir.Inst.Ref,
align_inst: Zir.Inst.Ref,
is_const: bool,
@@ -9730,7 +9765,7 @@ const GenZir = struct {
gz: *GenZir,
args: struct {
/// Absolute node index. This function does the conversion to offset from Decl.
- node: ast.Node.Index,
+ node: Ast.Node.Index,
asm_source: u32,
output_type_bits: u32,
is_volatile: bool,
@@ -9787,7 +9822,7 @@ const GenZir = struct {
/// Note that this returns a `Zir.Inst.Index` not a ref.
/// Does *not* append the block instruction to the scope.
/// Leaves the `payload_index` field undefined.
- fn addBlock(gz: *GenZir, tag: Zir.Inst.Tag, node: ast.Node.Index) !Zir.Inst.Index {
+ fn addBlock(gz: *GenZir, tag: Zir.Inst.Tag, node: Ast.Node.Index) !Zir.Inst.Index {
const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
const gpa = gz.astgen.gpa;
try gz.astgen.instructions.append(gpa, .{
@@ -9802,7 +9837,7 @@ const GenZir = struct {
/// Note that this returns a `Zir.Inst.Index` not a ref.
/// Leaves the `payload_index` field undefined.
- fn addCondBr(gz: *GenZir, tag: Zir.Inst.Tag, node: ast.Node.Index) !Zir.Inst.Index {
+ fn addCondBr(gz: *GenZir, tag: Zir.Inst.Tag, node: Ast.Node.Index) !Zir.Inst.Index {
const gpa = gz.astgen.gpa;
try gz.instructions.ensureUnusedCapacity(gpa, 1);
const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
@@ -9818,7 +9853,7 @@ const GenZir = struct {
}
fn setStruct(gz: *GenZir, inst: Zir.Inst.Index, args: struct {
- src_node: ast.Node.Index,
+ src_node: Ast.Node.Index,
body_len: u32,
fields_len: u32,
decls_len: u32,
@@ -9863,7 +9898,7 @@ const GenZir = struct {
}
fn setUnion(gz: *GenZir, inst: Zir.Inst.Index, args: struct {
- src_node: ast.Node.Index,
+ src_node: Ast.Node.Index,
tag_type: Zir.Inst.Ref,
body_len: u32,
fields_len: u32,
@@ -9913,7 +9948,7 @@ const GenZir = struct {
}
fn setEnum(gz: *GenZir, inst: Zir.Inst.Index, args: struct {
- src_node: ast.Node.Index,
+ src_node: Ast.Node.Index,
tag_type: Zir.Inst.Ref,
body_len: u32,
fields_len: u32,
@@ -9986,7 +10021,7 @@ const GenZir = struct {
return new_index;
}
- fn addRet(gz: *GenZir, rl: ResultLoc, operand: Zir.Inst.Ref, node: ast.Node.Index) !void {
+ fn addRet(gz: *GenZir, rl: ResultLoc, operand: Zir.Inst.Ref, node: Ast.Node.Index) !void {
switch (rl) {
.ptr => |ret_ptr| _ = try gz.addUnNode(.ret_load, ret_ptr, node),
.ty => _ = try gz.addUnNode(.ret_node, operand, node),
@@ -10001,37 +10036,16 @@ fn nullTerminatedString(astgen: AstGen, index: usize) [*:0]const u8 {
return @ptrCast([*:0]const u8, astgen.string_bytes.items.ptr) + index;
}
-fn declareNewName(
- astgen: *AstGen,
- start_scope: *Scope,
- name_index: u32,
- node: ast.Node.Index,
-) !void {
- const gpa = astgen.gpa;
- var scope = start_scope;
- while (true) {
- switch (scope.tag) {
- .gen_zir => scope = scope.cast(GenZir).?.parent,
- .local_val => scope = scope.cast(Scope.LocalVal).?.parent,
- .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent,
- .defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent,
- .namespace => {
- const ns = scope.cast(Scope.Namespace).?;
- const gop = try ns.decls.getOrPut(gpa, name_index);
- if (gop.found_existing) {
- const name = try gpa.dupe(u8, mem.spanZ(astgen.nullTerminatedString(name_index)));
- defer gpa.free(name);
- return astgen.failNodeNotes(node, "redeclaration of '{s}'", .{
- name,
- }, &[_]u32{
- try astgen.errNoteNode(gop.value_ptr.*, "other declaration here", .{}),
- });
- }
- gop.value_ptr.* = node;
- break;
- },
- .top => break,
- }
+fn isPrimitive(name: []const u8) bool {
+ if (simple_types.get(name) != null) return true;
+ if (name.len < 2) return false;
+ const first_c = name[0];
+ if (first_c != 'i' and first_c != 'u') return false;
+ if (std.fmt.parseInt(u16, name[1..], 10)) |_| {
+ return true;
+ } else |err| switch (err) {
+ error.Overflow => return true,
+ error.InvalidCharacter => return false,
}
}
@@ -10040,16 +10054,27 @@ fn detectLocalShadowing(
astgen: *AstGen,
scope: *Scope,
ident_name: u32,
- name_token: ast.TokenIndex,
+ name_token: Ast.TokenIndex,
+ token_bytes: []const u8,
) !void {
const gpa = astgen.gpa;
+ if (token_bytes[0] != '@' and isPrimitive(token_bytes)) {
+ return astgen.failTokNotes(name_token, "name shadows primitive '{s}'", .{
+ token_bytes,
+ }, &[_]u32{
+ try astgen.errNoteTok(name_token, "consider using @\"{s}\" to disambiguate", .{
+ token_bytes,
+ }),
+ });
+ }
var s = scope;
while (true) switch (s.tag) {
.local_val => {
const local_val = s.cast(Scope.LocalVal).?;
if (local_val.name == ident_name) {
- const name = try gpa.dupe(u8, mem.spanZ(astgen.nullTerminatedString(ident_name)));
+ const name_slice = mem.span(astgen.nullTerminatedString(ident_name));
+ const name = try gpa.dupe(u8, name_slice);
defer gpa.free(name);
return astgen.failTokNotes(name_token, "redeclaration of {s} '{s}'", .{
@tagName(local_val.id_cat), name,
@@ -10066,7 +10091,8 @@ fn detectLocalShadowing(
.local_ptr => {
const local_ptr = s.cast(Scope.LocalPtr).?;
if (local_ptr.name == ident_name) {
- const name = try gpa.dupe(u8, mem.spanZ(astgen.nullTerminatedString(ident_name)));
+ const name_slice = mem.span(astgen.nullTerminatedString(ident_name));
+ const name = try gpa.dupe(u8, name_slice);
defer gpa.free(name);
return astgen.failTokNotes(name_token, "redeclaration of {s} '{s}'", .{
@tagName(local_ptr.id_cat), name,
@@ -10086,7 +10112,8 @@ fn detectLocalShadowing(
s = ns.parent;
continue;
};
- const name = try gpa.dupe(u8, mem.spanZ(astgen.nullTerminatedString(ident_name)));
+ const name_slice = mem.span(astgen.nullTerminatedString(ident_name));
+ const name = try gpa.dupe(u8, name_slice);
defer gpa.free(name);
return astgen.failTokNotes(name_token, "local shadows declaration of '{s}'", .{
name,
@@ -10131,3 +10158,67 @@ fn refToIndex(inst: Zir.Inst.Ref) ?Zir.Inst.Index {
return null;
}
}
+
+fn scanDecls(astgen: *AstGen, namespace: *Scope.Namespace, members: []const Ast.Node.Index) !void {
+ const gpa = astgen.gpa;
+ const tree = astgen.tree;
+ const node_tags = tree.nodes.items(.tag);
+ const main_tokens = tree.nodes.items(.main_token);
+ const token_tags = tree.tokens.items(.tag);
+ for (members) |member_node| {
+ const name_token = switch (node_tags[member_node]) {
+ .fn_proto_simple,
+ .fn_proto_multi,
+ .fn_proto_one,
+ .fn_proto,
+ .global_var_decl,
+ .local_var_decl,
+ .simple_var_decl,
+ .aligned_var_decl,
+ => main_tokens[member_node] + 1,
+
+ .fn_decl => blk: {
+ const ident = main_tokens[member_node] + 1;
+ if (token_tags[ident] != .identifier) {
+ switch (astgen.failNode(member_node, "missing function name", .{})) {
+ error.AnalysisFail => continue,
+ error.OutOfMemory => return error.OutOfMemory,
+ }
+ }
+ break :blk ident;
+ },
+
+ else => continue,
+ };
+
+ const token_bytes = astgen.tree.tokenSlice(name_token);
+ if (token_bytes[0] != '@' and isPrimitive(token_bytes)) {
+ switch (astgen.failTokNotes(name_token, "name shadows primitive '{s}'", .{
+ token_bytes,
+ }, &[_]u32{
+ try astgen.errNoteTok(name_token, "consider using @\"{s}\" to disambiguate", .{
+ token_bytes,
+ }),
+ })) {
+ error.AnalysisFail => continue,
+ error.OutOfMemory => return error.OutOfMemory,
+ }
+ }
+
+ const name_str_index = try astgen.identAsString(name_token);
+ const gop = try namespace.decls.getOrPut(gpa, name_str_index);
+ if (gop.found_existing) {
+ const name = try gpa.dupe(u8, mem.span(astgen.nullTerminatedString(name_str_index)));
+ defer gpa.free(name);
+ switch (astgen.failNodeNotes(member_node, "redeclaration of '{s}'", .{
+ name,
+ }, &[_]u32{
+ try astgen.errNoteNode(gop.value_ptr.*, "other declaration here", .{}),
+ })) {
+ error.AnalysisFail => continue,
+ error.OutOfMemory => return error.OutOfMemory,
+ }
+ }
+ gop.value_ptr.* = member_node;
+ }
+}
diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig
index 8f23ec86d7..e415d27a3a 100644
--- a/src/BuiltinFn.zig
+++ b/src/BuiltinFn.zig
@@ -2,6 +2,7 @@ const std = @import("std");
pub const Tag = enum {
add_with_overflow,
+ add_with_saturation,
align_cast,
align_of,
as,
@@ -65,6 +66,7 @@ pub const Tag = enum {
wasm_memory_grow,
mod,
mul_with_overflow,
+ mul_with_saturation,
panic,
pop_count,
ptr_cast,
@@ -79,10 +81,12 @@ pub const Tag = enum {
set_runtime_safety,
shl_exact,
shl_with_overflow,
+ shl_with_saturation,
shr_exact,
shuffle,
size_of,
splat,
+ sub_with_saturation,
reduce,
src,
sqrt,
@@ -528,6 +532,34 @@ pub const list = list: {
},
},
.{
+ "@addWithSaturation",
+ .{
+ .tag = .add_with_saturation,
+ .param_count = 2,
+ },
+ },
+ .{
+ "@subWithSaturation",
+ .{
+ .tag = .sub_with_saturation,
+ .param_count = 2,
+ },
+ },
+ .{
+ "@mulWithSaturation",
+ .{
+ .tag = .mul_with_saturation,
+ .param_count = 2,
+ },
+ },
+ .{
+ "@shlWithSaturation",
+ .{
+ .tag = .shl_with_saturation,
+ .param_count = 2,
+ },
+ },
+ .{
"@memcpy",
.{
.tag = .memcpy,
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 7dc726aeb2..26c8bf2a70 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -708,6 +708,10 @@ pub const InitOptions = struct {
disable_c_depfile: bool = false,
linker_z_nodelete: bool = false,
linker_z_defs: bool = false,
+ linker_z_origin: bool = false,
+ linker_z_noexecstack: bool = false,
+ linker_z_now: bool = false,
+ linker_z_relro: bool = false,
linker_tsaware: bool = false,
linker_nxcompat: bool = false,
linker_dynamicbase: bool = false,
@@ -1382,6 +1386,10 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
.bind_global_refs_locally = options.linker_bind_global_refs_locally orelse false,
.z_nodelete = options.linker_z_nodelete,
.z_defs = options.linker_z_defs,
+ .z_origin = options.linker_z_origin,
+ .z_noexecstack = options.linker_z_noexecstack,
+ .z_now = options.linker_z_now,
+ .z_relro = options.linker_z_relro,
.tsaware = options.linker_tsaware,
.nxcompat = options.linker_nxcompat,
.dynamicbase = options.linker_dynamicbase,
@@ -2408,7 +2416,7 @@ const AstGenSrc = union(enum) {
root,
import: struct {
importing_file: *Module.Scope.File,
- import_tok: std.zig.ast.TokenIndex,
+ import_tok: std.zig.Ast.TokenIndex,
},
};
diff --git a/src/Liveness.zig b/src/Liveness.zig
index 6a47bfe597..2f1ecc9c43 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -231,6 +231,7 @@ fn analyzeInst(
.mul,
.mulwrap,
.div,
+ .rem,
.ptr_add,
.ptr_sub,
.bit_and,
diff --git a/src/Module.zig b/src/Module.zig
index 4ed39c9954..07b86c0d51 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -11,7 +11,7 @@ const log = std.log.scoped(.module);
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const Target = std.Target;
-const ast = std.zig.ast;
+const Ast = std.zig.Ast;
const Module = @This();
const Compilation = @import("Compilation.zig");
@@ -291,7 +291,7 @@ pub const Decl = struct {
generation: u32,
/// The AST node index of this declaration.
/// Must be recomputed when the corresponding source file is modified.
- src_node: ast.Node.Index,
+ src_node: Ast.Node.Index,
/// Line number corresponding to `src_node`. Stored separately so that source files
/// do not need to be loaded into memory in order to compute debug line numbers.
src_line: u32,
@@ -365,6 +365,8 @@ pub const Decl = struct {
/// Decl is marked alive, then it sends the Decl to the linker. Otherwise it
/// deletes the Decl on the spot.
alive: bool,
+ /// Whether the Decl is a `usingnamespace` declaration.
+ is_usingnamespace: bool,
/// Represents the position of the code in the output file.
/// This is populated regardless of semantic analysis and code generation.
@@ -497,19 +499,19 @@ pub const Decl = struct {
return decl.src_line + offset;
}
- pub fn relativeToNodeIndex(decl: Decl, offset: i32) ast.Node.Index {
- return @bitCast(ast.Node.Index, offset + @bitCast(i32, decl.src_node));
+ pub fn relativeToNodeIndex(decl: Decl, offset: i32) Ast.Node.Index {
+ return @bitCast(Ast.Node.Index, offset + @bitCast(i32, decl.src_node));
}
- pub fn nodeIndexToRelative(decl: Decl, node_index: ast.Node.Index) i32 {
+ pub fn nodeIndexToRelative(decl: Decl, node_index: Ast.Node.Index) i32 {
return @bitCast(i32, node_index) - @bitCast(i32, decl.src_node);
}
- pub fn tokSrcLoc(decl: Decl, token_index: ast.TokenIndex) LazySrcLoc {
+ pub fn tokSrcLoc(decl: Decl, token_index: Ast.TokenIndex) LazySrcLoc {
return .{ .token_offset = token_index - decl.srcToken() };
}
- pub fn nodeSrcLoc(decl: Decl, node_index: ast.Node.Index) LazySrcLoc {
+ pub fn nodeSrcLoc(decl: Decl, node_index: Ast.Node.Index) LazySrcLoc {
return .{ .node_offset = decl.nodeIndexToRelative(node_index) };
}
@@ -525,7 +527,7 @@ pub const Decl = struct {
};
}
- pub fn srcToken(decl: Decl) ast.TokenIndex {
+ pub fn srcToken(decl: Decl) Ast.TokenIndex {
const tree = &decl.namespace.file_scope.tree;
return tree.firstToken(decl.src_node);
}
@@ -1008,6 +1010,11 @@ pub const Scope = struct {
anon_decls: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{},
+ /// Key is usingnamespace Decl itself. To find the namespace being included,
+ /// the Decl Value has to be resolved as a Type which has a Namespace.
+ /// Value is whether the usingnamespace decl is marked `pub`.
+ usingnamespace_set: std.AutoHashMapUnmanaged(*Decl, bool) = .{},
+
pub fn deinit(ns: *Namespace, mod: *Module) void {
ns.destroyDecls(mod);
ns.* = undefined;
@@ -1114,7 +1121,7 @@ pub const Scope = struct {
/// Whether this is populated depends on `status`.
stat_mtime: i128,
/// Whether this is populated or not depends on `tree_loaded`.
- tree: ast.Tree,
+ tree: Ast,
/// Whether this is populated or not depends on `zir_loaded`.
zir: Zir,
/// Package that this file is a part of, managed externally.
@@ -1213,7 +1220,7 @@ pub const Scope = struct {
return source;
}
- pub fn getTree(file: *File, gpa: *Allocator) !*const ast.Tree {
+ pub fn getTree(file: *File, gpa: *Allocator) !*const Ast {
if (file.tree_loaded) return &file.tree;
const source = try file.getSource(gpa);
@@ -1558,17 +1565,17 @@ pub const ErrorMsg = struct {
pub const SrcLoc = struct {
file_scope: *Scope.File,
/// Might be 0 depending on tag of `lazy`.
- parent_decl_node: ast.Node.Index,
+ parent_decl_node: Ast.Node.Index,
/// Relative to `parent_decl_node`.
lazy: LazySrcLoc,
- pub fn declSrcToken(src_loc: SrcLoc) ast.TokenIndex {
+ pub fn declSrcToken(src_loc: SrcLoc) Ast.TokenIndex {
const tree = src_loc.file_scope.tree;
return tree.firstToken(src_loc.parent_decl_node);
}
- pub fn declRelativeToNodeIndex(src_loc: SrcLoc, offset: i32) ast.TokenIndex {
- return @bitCast(ast.Node.Index, offset + @bitCast(i32, src_loc.parent_decl_node));
+ pub fn declRelativeToNodeIndex(src_loc: SrcLoc, offset: i32) Ast.TokenIndex {
+ return @bitCast(Ast.Node.Index, offset + @bitCast(i32, src_loc.parent_decl_node));
}
pub fn byteOffset(src_loc: SrcLoc, gpa: *Allocator) !u32 {
@@ -1694,7 +1701,7 @@ pub const SrcLoc = struct {
const tree = try src_loc.file_scope.getTree(gpa);
const node_tags = tree.nodes.items(.tag);
const node = src_loc.declRelativeToNodeIndex(node_off);
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
const full = switch (node_tags[node]) {
.call_one,
.call_one_comma,
@@ -1824,7 +1831,7 @@ pub const SrcLoc = struct {
const node_datas = tree.nodes.items(.data);
const node_tags = tree.nodes.items(.tag);
const main_tokens = tree.nodes.items(.main_token);
- const extra = tree.extraData(node_datas[switch_node].rhs, ast.Node.SubRange);
+ const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange);
const case_nodes = tree.extra_data[extra.start..extra.end];
for (case_nodes) |case_node| {
const case = switch (node_tags[case_node]) {
@@ -1850,7 +1857,7 @@ pub const SrcLoc = struct {
const node_datas = tree.nodes.items(.data);
const node_tags = tree.nodes.items(.tag);
const main_tokens = tree.nodes.items(.main_token);
- const extra = tree.extraData(node_datas[switch_node].rhs, ast.Node.SubRange);
+ const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange);
const case_nodes = tree.extra_data[extra.start..extra.end];
for (case_nodes) |case_node| {
const case = switch (node_tags[case_node]) {
@@ -1879,7 +1886,7 @@ pub const SrcLoc = struct {
const node_datas = tree.nodes.items(.data);
const node_tags = tree.nodes.items(.tag);
const node = src_loc.declRelativeToNodeIndex(node_off);
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
const full = switch (node_tags[node]) {
.fn_proto_simple => tree.fnProtoSimple(&params, node),
.fn_proto_multi => tree.fnProtoMulti(node),
@@ -1904,7 +1911,7 @@ pub const SrcLoc = struct {
const tree = try src_loc.file_scope.getTree(gpa);
const node_tags = tree.nodes.items(.tag);
const node = src_loc.declRelativeToNodeIndex(node_off);
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
const full = switch (node_tags[node]) {
.fn_proto_simple => tree.fnProtoSimple(&params, node),
.fn_proto_multi => tree.fnProtoMulti(node),
@@ -1934,7 +1941,7 @@ pub const SrcLoc = struct {
const node_datas = tree.nodes.items(.data);
const node_tags = tree.nodes.items(.tag);
const parent_node = src_loc.declRelativeToNodeIndex(node_off);
- var params: [1]ast.Node.Index = undefined;
+ var params: [1]Ast.Node.Index = undefined;
const full = switch (node_tags[parent_node]) {
.fn_proto_simple => tree.fnProtoSimple(&params, parent_node),
.fn_proto_multi => tree.fnProtoMulti(parent_node),
@@ -2648,7 +2655,10 @@ pub fn astGenFile(mod: *Module, file: *Scope.File) !void {
undefined;
defer if (data_has_safety_tag) gpa.free(safety_buffer);
const data_ptr = if (data_has_safety_tag)
- @ptrCast([*]const u8, safety_buffer.ptr)
+ if (file.zir.instructions.len == 0)
+ @as([*]const u8, undefined)
+ else
+ @ptrCast([*]const u8, safety_buffer.ptr)
else
@ptrCast([*]const u8, file.zir.instructions.items(.data).ptr);
if (data_has_safety_tag) {
@@ -3171,6 +3181,31 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
errdefer decl_arena.deinit();
const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State);
+ if (decl.is_usingnamespace) {
+ const ty_ty = Type.initTag(.type);
+ if (!decl_tv.ty.eql(ty_ty)) {
+ return mod.fail(&block_scope.base, src, "expected type, found {}", .{decl_tv.ty});
+ }
+ var buffer: Value.ToTypeBuffer = undefined;
+ const ty = decl_tv.val.toType(&buffer);
+ if (ty.getNamespace() == null) {
+ return mod.fail(&block_scope.base, src, "type {} has no namespace", .{ty});
+ }
+
+ decl.ty = ty_ty;
+ decl.val = try Value.Tag.ty.create(&decl_arena.allocator, ty);
+ decl.align_val = Value.initTag(.null_value);
+ decl.linksection_val = Value.initTag(.null_value);
+ decl.has_tv = true;
+ decl.owns_tv = false;
+ decl_arena_state.* = decl_arena.state;
+ decl.value_arena = decl_arena_state;
+ decl.analysis = .complete;
+ decl.generation = mod.generation;
+
+ return true;
+ }
+
if (decl_tv.val.castTag(.function)) |fn_payload| {
const func = fn_payload.data;
const owns_tv = func.owner_decl == decl;
@@ -3266,16 +3301,13 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
if (type_changed and mod.emit_h != null) {
try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl });
}
- } else if (decl_tv.ty.zigTypeTag() == .Type) {
- // In case this Decl is a struct or union, we need to resolve the fields
- // while we still have the `Sema` in scope, so that the field type expressions
- // can use the resolved AIR instructions that they possibly reference.
- // We do this after the decl is populated and set to `complete` so that a `Decl`
- // may reference itself.
- var buffer: Value.ToTypeBuffer = undefined;
- const ty = decl.val.toType(&buffer);
- try sema.resolveDeclFields(&block_scope, src, ty);
}
+ // In case this Decl is a struct or union, we need to resolve the fields
+ // while we still have the `Sema` in scope, so that the field type expressions
+ // can use the resolved AIR instructions that they possibly reference.
+ // We do this after the decl is populated and set to `complete` so that a `Decl`
+ // may reference itself.
+ try sema.resolvePendingTypes(&block_scope);
if (decl.is_exported) {
const export_src = src; // TODO point to the export token
@@ -3491,7 +3523,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
// zig fmt: off
const is_pub = (flags & 0b0001) != 0;
- const is_exported = (flags & 0b0010) != 0;
+ const export_bit = (flags & 0b0010) != 0;
const has_align = (flags & 0b0100) != 0;
const has_linksection = (flags & 0b1000) != 0;
// zig fmt: on
@@ -3506,7 +3538,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
var is_named_test = false;
const decl_name: [:0]const u8 = switch (decl_name_index) {
0 => name: {
- if (is_exported) {
+ if (export_bit) {
const i = iter.usingnamespace_index;
iter.usingnamespace_index += 1;
break :name try std.fmt.allocPrintZ(gpa, "usingnamespace_{d}", .{i});
@@ -3532,11 +3564,17 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
}
},
};
+ const is_exported = export_bit and decl_name_index != 0;
+ const is_usingnamespace = export_bit and decl_name_index == 0;
+ if (is_usingnamespace) try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1);
// We create a Decl for it regardless of analysis status.
const gop = try namespace.decls.getOrPut(gpa, decl_name);
if (!gop.found_existing) {
const new_decl = try mod.allocateNewDecl(namespace, decl_node);
+ if (is_usingnamespace) {
+ namespace.usingnamespace_set.putAssumeCapacity(new_decl, is_pub);
+ }
log.debug("scan new {*} ({s}) into {*}", .{ new_decl, decl_name, namespace });
new_decl.src_line = line;
new_decl.name = decl_name;
@@ -3545,7 +3583,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
// test decls if in test mode, get analyzed.
const decl_pkg = namespace.file_scope.pkg;
const want_analysis = is_exported or switch (decl_name_index) {
- 0 => true, // comptime decl
+ 0 => true, // comptime or usingnamespace decl
1 => blk: {
// test decl with no name. Skip the part where we check against
// the test name filter.
@@ -3568,6 +3606,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
}
new_decl.is_pub = is_pub;
new_decl.is_exported = is_exported;
+ new_decl.is_usingnamespace = is_usingnamespace;
new_decl.has_align = has_align;
new_decl.has_linksection = has_linksection;
new_decl.zir_decl_index = @intCast(u32, decl_sub_index);
@@ -3584,6 +3623,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
decl.is_pub = is_pub;
decl.is_exported = is_exported;
+ decl.is_usingnamespace = is_usingnamespace;
decl.has_align = has_align;
decl.has_linksection = has_linksection;
decl.zir_decl_index = @intCast(u32, decl_sub_index);
@@ -3927,7 +3967,7 @@ fn markOutdatedDecl(mod: *Module, decl: *Decl) !void {
decl.analysis = .outdated;
}
-pub fn allocateNewDecl(mod: *Module, namespace: *Scope.Namespace, src_node: ast.Node.Index) !*Decl {
+pub fn allocateNewDecl(mod: *Module, namespace: *Scope.Namespace, src_node: Ast.Node.Index) !*Decl {
// If we have emit-h then we must allocate a bigger structure to store the emit-h state.
const new_decl: *Decl = if (mod.emit_h != null) blk: {
const parent_struct = try mod.gpa.create(DeclPlusEmitH);
@@ -3976,6 +4016,7 @@ pub fn allocateNewDecl(mod: *Module, namespace: *Scope.Namespace, src_node: ast.
.has_linksection = false,
.has_align = false,
.alive = false,
+ .is_usingnamespace = false,
};
return new_decl;
}
@@ -4196,7 +4237,7 @@ pub fn fail(
pub fn failTok(
mod: *Module,
scope: *Scope,
- token_index: ast.TokenIndex,
+ token_index: Ast.TokenIndex,
comptime format: []const u8,
args: anytype,
) CompileError {
@@ -4209,7 +4250,7 @@ pub fn failTok(
pub fn failNode(
mod: *Module,
scope: *Scope,
- node_index: ast.Node.Index,
+ node_index: Ast.Node.Index,
comptime format: []const u8,
args: anytype,
) CompileError {
@@ -4414,7 +4455,7 @@ pub const SwitchProngSrc = union(enum) {
const main_tokens = tree.nodes.items(.main_token);
const node_datas = tree.nodes.items(.data);
const node_tags = tree.nodes.items(.tag);
- const extra = tree.extraData(node_datas[switch_node].rhs, ast.Node.SubRange);
+ const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange);
const case_nodes = tree.extra_data[extra.start..extra.end];
var multi_i: u32 = 0;
diff --git a/src/Sema.zig b/src/Sema.zig
index a11bdec66d..46c1f1122e 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -58,6 +58,9 @@ comptime_args_fn_inst: Zir.Inst.Index = 0,
/// extra hash table lookup in the `monomorphed_funcs` set.
/// Sema will set this to null when it takes ownership.
preallocated_new_func: ?*Module.Fn = null,
+/// Collects struct, union, enum, and opaque decls which need to have their
+/// fields resolved before this Sema is deinitialized.
+types_pending_resolution: std.ArrayListUnmanaged(Type) = .{},
const std = @import("std");
const mem = std.mem;
@@ -90,6 +93,7 @@ pub fn deinit(sema: *Sema) void {
sema.air_values.deinit(gpa);
sema.inst_map.deinit(gpa);
sema.decl_val_table.deinit(gpa);
+ sema.types_pending_resolution.deinit(gpa);
sema.* = undefined;
}
@@ -570,6 +574,10 @@ fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr
.c_define => return sema.zirCDefine( block, extended),
.wasm_memory_size => return sema.zirWasmMemorySize( block, extended),
.wasm_memory_grow => return sema.zirWasmMemoryGrow( block, extended),
+ .add_with_saturation=> return sema.zirSatArithmetic( block, extended),
+ .sub_with_saturation=> return sema.zirSatArithmetic( block, extended),
+ .mul_with_saturation=> return sema.zirSatArithmetic( block, extended),
+ .shl_with_saturation=> return sema.zirSatArithmetic( block, extended),
// zig fmt: on
}
}
@@ -904,7 +912,9 @@ fn zirStructDecl(
&struct_obj.namespace, new_decl, new_decl.name,
});
try sema.analyzeStructDecl(new_decl, inst, struct_obj);
+ try sema.types_pending_resolution.ensureUnusedCapacity(sema.gpa, 1);
try new_decl.finalizeNewArena(&new_decl_arena);
+ sema.types_pending_resolution.appendAssumeCapacity(struct_ty);
return sema.analyzeDeclVal(block, src, new_decl);
}
@@ -1194,7 +1204,9 @@ fn zirUnionDecl(
_ = try sema.mod.scanNamespace(&union_obj.namespace, extra_index, decls_len, new_decl);
+ try sema.types_pending_resolution.ensureUnusedCapacity(sema.gpa, 1);
try new_decl.finalizeNewArena(&new_decl_arena);
+ sema.types_pending_resolution.appendAssumeCapacity(union_ty);
return sema.analyzeDeclVal(block, src, new_decl);
}
@@ -2320,42 +2332,105 @@ fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr
}
fn lookupIdentifier(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, name: []const u8) !*Decl {
- // TODO emit a compile error if more than one decl would be matched.
var namespace = sema.namespace;
while (true) {
- if (try sema.lookupInNamespace(namespace, name)) |decl| {
+ if (try sema.lookupInNamespace(block, src, namespace, name, false)) |decl| {
return decl;
}
namespace = namespace.parent orelse break;
}
- return sema.mod.fail(&block.base, src, "use of undeclared identifier '{s}'", .{name});
+ unreachable; // AstGen detects use of undeclared identifier errors.
}
/// This looks up a member of a specific namespace. It is affected by `usingnamespace` but
/// only for ones in the specified namespace.
fn lookupInNamespace(
sema: *Sema,
+ block: *Scope.Block,
+ src: LazySrcLoc,
namespace: *Scope.Namespace,
ident_name: []const u8,
+ observe_usingnamespace: bool,
) CompileError!?*Decl {
+ const mod = sema.mod;
+
const namespace_decl = namespace.getDecl();
if (namespace_decl.analysis == .file_failure) {
- try sema.mod.declareDeclDependency(sema.owner_decl, namespace_decl);
+ try mod.declareDeclDependency(sema.owner_decl, namespace_decl);
return error.AnalysisFail;
}
- // TODO implement usingnamespace
- if (namespace.decls.get(ident_name)) |decl| {
- try sema.mod.declareDeclDependency(sema.owner_decl, decl);
+ if (observe_usingnamespace and namespace.usingnamespace_set.count() != 0) {
+ const src_file = block.src_decl.namespace.file_scope;
+
+ const gpa = sema.gpa;
+ var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Scope.Namespace, void) = .{};
+ defer checked_namespaces.deinit(gpa);
+
+ // Keep track of name conflicts for error notes.
+ var candidates: std.ArrayListUnmanaged(*Decl) = .{};
+ defer candidates.deinit(gpa);
+
+ try checked_namespaces.put(gpa, namespace, {});
+ var check_i: usize = 0;
+
+ while (check_i < checked_namespaces.count()) : (check_i += 1) {
+ const check_ns = checked_namespaces.keys()[check_i];
+ if (check_ns.decls.get(ident_name)) |decl| {
+ // Skip decls which are not marked pub, which are in a different
+ // file than the `a.b`/`@hasDecl` syntax.
+ if (decl.is_pub or src_file == decl.namespace.file_scope) {
+ try candidates.append(gpa, decl);
+ }
+ }
+ var it = check_ns.usingnamespace_set.iterator();
+ while (it.next()) |entry| {
+ const sub_usingnamespace_decl = entry.key_ptr.*;
+ const sub_is_pub = entry.value_ptr.*;
+ if (!sub_is_pub and src_file != sub_usingnamespace_decl.namespace.file_scope) {
+ // Skip usingnamespace decls which are not marked pub, which are in
+ // a different file than the `a.b`/`@hasDecl` syntax.
+ continue;
+ }
+ try sema.ensureDeclAnalyzed(sub_usingnamespace_decl);
+ const ns_ty = sub_usingnamespace_decl.val.castTag(.ty).?.data;
+ const sub_ns = ns_ty.getNamespace().?;
+ try checked_namespaces.put(gpa, sub_ns, {});
+ }
+ }
+
+ switch (candidates.items.len) {
+ 0 => {},
+ 1 => {
+ const decl = candidates.items[0];
+ try mod.declareDeclDependency(sema.owner_decl, decl);
+ return decl;
+ },
+ else => {
+ const msg = msg: {
+ const msg = try mod.errMsg(&block.base, src, "ambiguous reference", .{});
+ errdefer msg.destroy(gpa);
+ for (candidates.items) |candidate| {
+ const src_loc = candidate.srcLoc();
+ try mod.errNoteNonLazy(src_loc, msg, "declared here", .{});
+ }
+ break :msg msg;
+ };
+ return mod.failWithOwnedErrorMsg(&block.base, msg);
+ },
+ }
+ } else if (namespace.decls.get(ident_name)) |decl| {
+ try mod.declareDeclDependency(sema.owner_decl, decl);
return decl;
}
+
log.debug("{*} ({s}) depends on non-existence of '{s}' in {*} ({s})", .{
sema.owner_decl, sema.owner_decl.name, ident_name, namespace_decl, namespace_decl.name,
});
// TODO This dependency is too strong. Really, it should only be a dependency
// on the non-existence of `ident_name` in the namespace. We can lessen the number of
// outdated declarations by making this dependency more sophisticated.
- try sema.mod.declareDeclDependency(sema.owner_decl, namespace_decl);
+ try mod.declareDeclDependency(sema.owner_decl, namespace_decl);
return null;
}
@@ -2723,10 +2798,7 @@ fn analyzeCall(
// we need to resolve the field type expressions right here, right now, while
// the child `Sema` is still available, with the AIR instruction map intact,
// because the field type expressions may reference into it.
- if (sema.typeOf(result).zigTypeTag() == .Type) {
- const ty = try sema.analyzeAsType(&child_block, call_src, result);
- try sema.resolveDeclFields(&child_block, call_src, ty);
- }
+ try sema.resolvePendingTypes(&child_block);
}
break :res2 result;
@@ -5328,6 +5400,7 @@ fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr
fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
+ const src = inst_data.src();
const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const container_type = try sema.resolveType(block, lhs_src, extra.lhs);
@@ -5340,7 +5413,7 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr
"expected struct, enum, union, or opaque, found '{}'",
.{container_type},
);
- if (try sema.lookupInNamespace(namespace, decl_name)) |decl| {
+ if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl| {
if (decl.is_pub or decl.namespace.file_scope == block.base.namespace().file_scope) {
return Air.Inst.Ref.bool_true;
}
@@ -5512,16 +5585,134 @@ fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr
const tracy = trace(@src());
defer tracy.end();
- _ = inst;
- return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{});
+ const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
+ const lhs = sema.resolveInst(extra.lhs);
+ const rhs = sema.resolveInst(extra.rhs);
+ const lhs_ty = sema.typeOf(lhs);
+ const rhs_ty = sema.typeOf(rhs);
+ const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
+ const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
+
+ const lhs_info = getArrayCatInfo(lhs_ty) orelse
+ return sema.mod.fail(&block.base, lhs_src, "expected array, found '{}'", .{lhs_ty});
+ const rhs_info = getArrayCatInfo(rhs_ty) orelse
+ return sema.mod.fail(&block.base, rhs_src, "expected array, found '{}'", .{rhs_ty});
+ if (!lhs_info.elem_type.eql(rhs_info.elem_type)) {
+ return sema.mod.fail(&block.base, rhs_src, "expected array of type '{}', found '{}'", .{ lhs_info.elem_type, rhs_ty });
+ }
+
+ // When there is a sentinel mismatch, no sentinel on the result. The type system
+ // will catch this if it is a problem.
+ var res_sent: ?Value = null;
+ if (rhs_info.sentinel != null and lhs_info.sentinel != null) {
+ if (rhs_info.sentinel.?.eql(lhs_info.sentinel.?, lhs_info.elem_type)) {
+ res_sent = lhs_info.sentinel.?;
+ }
+ }
+
+ if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
+ if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| {
+ const final_len = lhs_info.len + rhs_info.len;
+ if (lhs_ty.zigTypeTag() == .Pointer) {
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
+
+ const lhs_sub_val = (try lhs_val.pointerDeref(anon_decl.arena())).?;
+ const rhs_sub_val = (try rhs_val.pointerDeref(anon_decl.arena())).?;
+ const buf = try anon_decl.arena().alloc(Value, final_len);
+ {
+ var i: u64 = 0;
+ while (i < lhs_info.len) : (i += 1) {
+ const val = try lhs_sub_val.elemValue(sema.arena, i);
+ buf[i] = try val.copy(anon_decl.arena());
+ }
+ }
+ {
+ var i: u64 = 0;
+ while (i < rhs_info.len) : (i += 1) {
+ const val = try rhs_sub_val.elemValue(sema.arena, i);
+ buf[lhs_info.len + i] = try val.copy(anon_decl.arena());
+ }
+ }
+ const ty = if (res_sent) |rs|
+ try Type.Tag.array_sentinel.create(anon_decl.arena(), .{ .len = final_len, .elem_type = lhs_info.elem_type, .sentinel = rs })
+ else
+ try Type.Tag.array.create(anon_decl.arena(), .{ .len = final_len, .elem_type = lhs_info.elem_type });
+ const val = try Value.Tag.array.create(anon_decl.arena(), buf);
+ return sema.analyzeDeclRef(try anon_decl.finish(
+ ty,
+ val,
+ ));
+ }
+ return sema.mod.fail(&block.base, lhs_src, "TODO array_cat more types of Values", .{});
+ } else {
+ return sema.mod.fail(&block.base, lhs_src, "TODO runtime array_cat", .{});
+ }
+ } else {
+ return sema.mod.fail(&block.base, lhs_src, "TODO runtime array_cat", .{});
+ }
+}
+
+fn getArrayCatInfo(t: Type) ?Type.ArrayInfo {
+ return switch (t.zigTypeTag()) {
+ .Array => t.arrayInfo(),
+ .Pointer => blk: {
+ const ptrinfo = t.ptrInfo().data;
+ if (ptrinfo.pointee_type.zigTypeTag() != .Array) return null;
+ if (ptrinfo.size != .One) return null;
+ break :blk ptrinfo.pointee_type.arrayInfo();
+ },
+ else => null,
+ };
}
fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
- _ = inst;
- return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayMul", .{});
+ const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
+ const lhs = sema.resolveInst(extra.lhs);
+ const lhs_ty = sema.typeOf(lhs);
+ const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
+ const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
+
+ // In `**` rhs has to be comptime-known, but lhs can be runtime-known
+ const tomulby = try sema.resolveInt(block, rhs_src, extra.rhs, Type.initTag(.usize));
+ const mulinfo = getArrayCatInfo(lhs_ty) orelse
+ return sema.mod.fail(&block.base, lhs_src, "expected array, found '{}'", .{lhs_ty});
+
+ const final_len = std.math.mul(u64, mulinfo.len, tomulby) catch return sema.mod.fail(&block.base, rhs_src, "operation results in overflow", .{});
+ if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
+ if (lhs_ty.zigTypeTag() == .Pointer) {
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
+ const lhs_sub_val = (try lhs_val.pointerDeref(anon_decl.arena())).?;
+
+ const final_ty = if (mulinfo.sentinel) |sent|
+ try Type.Tag.array_sentinel.create(anon_decl.arena(), .{ .len = final_len, .elem_type = mulinfo.elem_type, .sentinel = sent })
+ else
+ try Type.Tag.array.create(anon_decl.arena(), .{ .len = final_len, .elem_type = mulinfo.elem_type });
+
+ const buf = try anon_decl.arena().alloc(Value, final_len);
+ var i: u64 = 0;
+ while (i < tomulby) : (i += 1) {
+ var j: u64 = 0;
+ while (j < mulinfo.len) : (j += 1) {
+ const val = try lhs_sub_val.elemValue(sema.arena, j);
+ buf[mulinfo.len * i + j] = try val.copy(anon_decl.arena());
+ }
+ }
+ const val = try Value.Tag.array.create(anon_decl.arena(), buf);
+ return sema.analyzeDeclRef(try anon_decl.finish(
+ final_ty,
+ val,
+ ));
+ }
+ return sema.mod.fail(&block.base, lhs_src, "TODO array_mul more types of Values", .{});
+ }
+ return sema.mod.fail(&block.base, lhs_src, "TODO runtime array_mul", .{});
}
fn zirNegate(
@@ -5573,6 +5764,19 @@ fn zirOverflowArithmetic(
return sema.mod.fail(&block.base, src, "TODO implement Sema.zirOverflowArithmetic", .{});
}
+fn zirSatArithmetic(
+ sema: *Sema,
+ block: *Scope.Block,
+ extended: Zir.Inst.Extended.InstData,
+) CompileError!Air.Inst.Ref {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const extra = sema.code.extraData(Zir.Inst.SaturatingArithmetic, extended.operand).data;
+ const src: LazySrcLoc = .{ .node_offset = extra.node };
+ return sema.mod.fail(&block.base, src, "TODO implement Sema.zirSatArithmetic", .{});
+}
+
fn analyzeArithmetic(
sema: *Sema,
block: *Scope.Block,
@@ -5701,7 +5905,7 @@ fn analyzeArithmetic(
try lhs_val.floatMul(rhs_val, scalar_type, sema.arena);
break :blk val;
},
- else => return sema.mod.fail(&block.base, src, "TODO Implement arithmetic operand '{s}'", .{@tagName(zir_tag)}),
+ else => return sema.mod.fail(&block.base, src, "TODO implement comptime arithmetic for operand '{s}'", .{@tagName(zir_tag)}),
};
log.debug("{s}({}, {}) result: {}", .{ @tagName(zir_tag), lhs_val, rhs_val, value });
@@ -5714,6 +5918,14 @@ fn analyzeArithmetic(
try sema.requireRuntimeBlock(block, lhs_src);
}
+ if (zir_tag == .mod_rem) {
+ const dirty_lhs = lhs_ty.isSignedInt() or lhs_ty.isFloat();
+ const dirty_rhs = rhs_ty.isSignedInt() or rhs_ty.isFloat();
+ if (dirty_lhs or dirty_rhs) {
+ return sema.mod.fail(&block.base, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{ lhs_ty, rhs_ty });
+ }
+ }
+
const air_tag: Air.Inst.Tag = switch (zir_tag) {
.add => .add,
.addwrap => .addwrap,
@@ -5722,7 +5934,9 @@ fn analyzeArithmetic(
.mul => .mul,
.mulwrap => .mulwrap,
.div => .div,
- else => return sema.mod.fail(&block.base, src, "TODO implement arithmetic for operand '{s}''", .{@tagName(zir_tag)}),
+ .mod_rem => .rem,
+ .rem => .rem,
+ else => return sema.mod.fail(&block.base, src, "TODO implement arithmetic for operand '{s}'", .{@tagName(zir_tag)}),
};
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
@@ -7184,9 +7398,7 @@ fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!A
}
fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- return sema.mod.fail(&block.base, src, "TODO: Sema.zirRem", .{});
+ return sema.zirArithmetic(block, inst);
}
fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -8060,7 +8272,7 @@ fn namespaceLookup(
) CompileError!?*Decl {
const mod = sema.mod;
const gpa = sema.gpa;
- if (try sema.lookupInNamespace(namespace, decl_name)) |decl| {
+ if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl| {
if (!decl.is_pub and decl.namespace.file_scope != block.getFileScope()) {
const msg = msg: {
const msg = try mod.errMsg(&block.base, src, "'{s}' is not marked 'pub'", .{
@@ -8776,8 +8988,7 @@ fn analyzeDeclVal(
return result;
}
-fn analyzeDeclRef(sema: *Sema, decl: *Decl) CompileError!Air.Inst.Ref {
- try sema.mod.declareDeclDependency(sema.owner_decl, decl);
+fn ensureDeclAnalyzed(sema: *Sema, decl: *Decl) CompileError!void {
sema.mod.ensureDeclAnalyzed(decl) catch |err| {
if (sema.owner_func) |owner_func| {
owner_func.state = .dependency_failure;
@@ -8786,6 +8997,11 @@ fn analyzeDeclRef(sema: *Sema, decl: *Decl) CompileError!Air.Inst.Ref {
}
return err;
};
+}
+
+fn analyzeDeclRef(sema: *Sema, decl: *Decl) CompileError!Air.Inst.Ref {
+ try sema.mod.declareDeclDependency(sema.owner_decl, decl);
+ try sema.ensureDeclAnalyzed(decl);
const decl_tv = try decl.typedValue();
if (decl_tv.val.castTag(.variable)) |payload| {
@@ -8818,9 +9034,12 @@ fn analyzeRef(
try sema.requireRuntimeBlock(block, src);
const ptr_type = try Module.simplePtrType(sema.arena, operand_ty, false, .One);
- const alloc = try block.addTy(.alloc, ptr_type);
+ const mut_ptr_type = try Module.simplePtrType(sema.arena, operand_ty, true, .One);
+ const alloc = try block.addTy(.alloc, mut_ptr_type);
try sema.storePtr(block, src, alloc, operand);
- return alloc;
+
+ // TODO: Replace with sema.coerce when that supports adding pointer constness.
+ return sema.bitcast(block, ptr_type, alloc, src);
}
fn analyzeLoad(
@@ -9414,6 +9633,16 @@ pub fn resolveTypeLayout(
}
}
+pub fn resolvePendingTypes(sema: *Sema, block: *Scope.Block) !void {
+ for (sema.types_pending_resolution.items) |ty| {
+ // If an error happens resolving the fields of a struct, it will be marked
+ // invalid and a proper compile error set up. But we should still look at the
+ // other types pending resolution.
+ const src: LazySrcLoc = .{ .node_offset = 0 };
+ sema.resolveDeclFields(block, src, ty) catch continue;
+ }
+}
+
/// `sema` and `block` are expected to be the same ones used for the `Decl`.
pub fn resolveDeclFields(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type) !void {
switch (ty.tag()) {
@@ -9948,7 +10177,7 @@ fn typeHasOnePossibleValue(
};
}
-fn getAstTree(sema: *Sema, block: *Scope.Block) CompileError!*const std.zig.ast.Tree {
+fn getAstTree(sema: *Sema, block: *Scope.Block) CompileError!*const std.zig.Ast {
return block.src_decl.namespace.file_scope.getTree(sema.gpa) catch |err| {
log.err("unable to load AST to report compile error: {s}", .{@errorName(err)});
return error.AnalysisFail;
@@ -9957,14 +10186,14 @@ fn getAstTree(sema: *Sema, block: *Scope.Block) CompileError!*const std.zig.ast.
fn enumFieldSrcLoc(
decl: *Decl,
- tree: std.zig.ast.Tree,
+ tree: std.zig.Ast,
node_offset: i32,
field_index: usize,
) LazySrcLoc {
@setCold(true);
const enum_node = decl.relativeToNodeIndex(node_offset);
const node_tags = tree.nodes.items(.tag);
- var buffer: [2]std.zig.ast.Node.Index = undefined;
+ var buffer: [2]std.zig.Ast.Node.Index = undefined;
const container_decl = switch (node_tags[enum_node]) {
.container_decl,
.container_decl_trailing,
diff --git a/src/Zir.zig b/src/Zir.zig
index 2110122580..a3c0d5d3a8 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -16,7 +16,7 @@ const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
-const ast = std.zig.ast;
+const Ast = std.zig.Ast;
const Zir = @This();
const Type = @import("type.zig").Type;
@@ -495,12 +495,15 @@ pub const Inst = struct {
/// Uses the `ptr_type` union field.
ptr_type,
/// Slice operation `lhs[rhs..]`. No sentinel and no end offset.
+ /// Returns a pointer to the subslice.
/// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceStart`.
slice_start,
/// Slice operation `array_ptr[start..end]`. No sentinel.
+ /// Returns a pointer to the subslice.
/// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceEnd`.
slice_end,
/// Slice operation `array_ptr[start..end:sentinel]`.
+ /// Returns a pointer to the subslice.
/// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceSentinel`.
slice_sentinel,
/// Write a value to a pointer. For loading, see `load`.
@@ -1626,6 +1629,22 @@ pub const Inst = struct {
wasm_memory_size,
/// `operand` is payload index to `BinNode`.
wasm_memory_grow,
+ /// Implements the `@addWithSaturation` builtin.
+ /// `operand` is payload index to `SaturatingArithmetic`.
+ /// `small` is unused.
+ add_with_saturation,
+ /// Implements the `@subWithSaturation` builtin.
+ /// `operand` is payload index to `SaturatingArithmetic`.
+ /// `small` is unused.
+ sub_with_saturation,
+ /// Implements the `@mulWithSaturation` builtin.
+ /// `operand` is payload index to `SaturatingArithmetic`.
+ /// `small` is unused.
+ mul_with_saturation,
+ /// Implements the `@shlWithSaturation` builtin.
+ /// `operand` is payload index to `SaturatingArithmetic`.
+ /// `small` is unused.
+ shl_with_saturation,
pub const InstData = struct {
opcode: Extended,
@@ -2073,7 +2092,7 @@ pub const Inst = struct {
/// Used for unary operators, with a token source location.
un_tok: struct {
/// Offset from Decl AST token index.
- src_tok: ast.TokenIndex,
+ src_tok: Ast.TokenIndex,
/// The meaning of this operand depends on the corresponding `Tag`.
operand: Ref,
@@ -2095,7 +2114,7 @@ pub const Inst = struct {
},
pl_tok: struct {
/// Offset from Decl AST token index.
- src_tok: ast.TokenIndex,
+ src_tok: Ast.TokenIndex,
/// index into extra.
/// `Tag` determines what lives there.
payload_index: u32,
@@ -2131,7 +2150,7 @@ pub const Inst = struct {
}
},
/// Offset from Decl AST token index.
- tok: ast.TokenIndex,
+ tok: Ast.TokenIndex,
/// Offset from Decl AST node index.
node: i32,
int: u64,
@@ -2748,6 +2767,12 @@ pub const Inst = struct {
ptr: Ref,
};
+ pub const SaturatingArithmetic = struct {
+ node: i32,
+ lhs: Ref,
+ rhs: Ref,
+ };
+
pub const Cmpxchg = struct {
ptr: Ref,
expected_value: Ref,
@@ -2853,9 +2878,9 @@ pub const Inst = struct {
pub const Item = struct {
/// null terminated string index
msg: u32,
- node: ast.Node.Index,
+ node: Ast.Node.Index,
/// If node is 0 then this will be populated.
- token: ast.TokenIndex,
+ token: Ast.TokenIndex,
/// Can be used in combination with `token`.
byte_offset: u32,
/// 0 or a payload index of a `Block`, each is a payload
@@ -2872,7 +2897,7 @@ pub const Inst = struct {
/// null terminated string index
name: u32,
/// points to the import name
- token: ast.TokenIndex,
+ token: Ast.TokenIndex,
};
};
};
@@ -2887,8 +2912,8 @@ const Writer = struct {
indent: u32,
parent_decl_node: u32,
- fn relativeToNodeIndex(self: *Writer, offset: i32) ast.Node.Index {
- return @bitCast(ast.Node.Index, offset + @bitCast(i32, self.parent_decl_node));
+ fn relativeToNodeIndex(self: *Writer, offset: i32) Ast.Node.Index {
+ return @bitCast(Ast.Node.Index, offset + @bitCast(i32, self.parent_decl_node));
}
fn writeInstToStream(
@@ -3228,6 +3253,11 @@ const Writer = struct {
.shl_with_overflow,
=> try self.writeOverflowArithmetic(stream, extended),
+ .add_with_saturation,
+ .sub_with_saturation,
+ .mul_with_saturation,
+ .shl_with_saturation,
+ => try self.writeSaturatingArithmetic(stream, extended),
.struct_decl => try self.writeStructDecl(stream, extended),
.union_decl => try self.writeUnionDecl(stream, extended),
.enum_decl => try self.writeEnumDecl(stream, extended),
@@ -3394,7 +3424,7 @@ const Writer = struct {
try self.writeBody(stream, body);
self.indent -= 2;
try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll(") ");
+ try stream.writeAll("}) ");
try self.writeSrc(stream, inst_data.src());
}
@@ -3581,6 +3611,18 @@ const Writer = struct {
try self.writeSrc(stream, src);
}
+ fn writeSaturatingArithmetic(self: *Writer, stream: anytype, extended: Inst.Extended.InstData) !void {
+ const extra = self.code.extraData(Zir.Inst.SaturatingArithmetic, extended.operand).data;
+ const src: LazySrcLoc = .{ .node_offset = extra.node };
+
+ try self.writeInstRef(stream, extra.lhs);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.rhs);
+ try stream.writeAll(", ");
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, src);
+ }
+
fn writePlNodeCall(self: *Writer, stream: anytype, inst: Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const extra = self.code.extraData(Inst.Call, inst_data.payload_index);
diff --git a/src/clang_options_data.zig b/src/clang_options_data.zig
index eb8dfc5753..524374a7e9 100644
--- a/src/clang_options_data.zig
+++ b/src/clang_options_data.zig
@@ -1,6 +1,12 @@
// This file is generated by tools/update_clang_options.zig.
// zig fmt: off
-usingnamespace @import("clang_options.zig");
+const clang_options = @import("clang_options.zig");
+const CliArg = clang_options.CliArg;
+const flagpd1 = clang_options.flagpd1;
+const flagpsl = clang_options.flagpsl;
+const joinpd1 = clang_options.joinpd1;
+const jspd1 = clang_options.jspd1;
+const sepd1 = clang_options.sepd1;
pub const data = blk: { @setEvalBranchQuota(6000); break :blk &[_]CliArg{
flagpd1("C"),
flagpd1("CC"),
diff --git a/src/codegen.zig b/src/codegen.zig
index a4f6f482b2..c60b6ee532 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -809,6 +809,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.mul => try self.airMul(inst),
.mulwrap => try self.airMulWrap(inst),
.div => try self.airDiv(inst),
+ .rem => try self.airRem(inst),
.cmp_lt => try self.airCmp(inst, .lt),
.cmp_lte => try self.airCmp(inst, .lte),
@@ -898,7 +899,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
fn dbgSetPrologueEnd(self: *Self) InnerError!void {
switch (self.debug_output) {
.dwarf => |dbg_out| {
- try dbg_out.dbg_line.append(DW.LNS_set_prologue_end);
+ try dbg_out.dbg_line.append(DW.LNS.set_prologue_end);
try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column);
},
.none => {},
@@ -908,7 +909,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
fn dbgSetEpilogueBegin(self: *Self) InnerError!void {
switch (self.debug_output) {
.dwarf => |dbg_out| {
- try dbg_out.dbg_line.append(DW.LNS_set_epilogue_begin);
+ try dbg_out.dbg_line.append(DW.LNS.set_epilogue_begin);
try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column);
},
.none => {},
@@ -924,13 +925,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// It lets you emit single-byte opcodes that add different numbers to
// both the PC and the line number at the same time.
try dbg_out.dbg_line.ensureUnusedCapacity(11);
- dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_advance_pc);
+ dbg_out.dbg_line.appendAssumeCapacity(DW.LNS.advance_pc);
leb128.writeULEB128(dbg_out.dbg_line.writer(), delta_pc) catch unreachable;
if (delta_line != 0) {
- dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_advance_line);
+ dbg_out.dbg_line.appendAssumeCapacity(DW.LNS.advance_line);
leb128.writeILEB128(dbg_out.dbg_line.writer(), delta_line) catch unreachable;
}
- dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_copy);
+ dbg_out.dbg_line.appendAssumeCapacity(DW.LNS.copy);
},
.none => {},
}
@@ -1009,7 +1010,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.dwarf => |dbg_out| {
assert(ty.hasCodeGenBits());
const index = dbg_out.dbg_info.items.len;
- try dbg_out.dbg_info.resize(index + 4); // DW.AT_type, DW.FORM_ref4
+ try dbg_out.dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4
const gop = try dbg_out.dbg_info_type_relocs.getOrPut(self.gpa, ty);
if (!gop.found_existing) {
@@ -1266,6 +1267,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
+ fn airRem(self: *Self, inst: Air.Inst.Index) !void {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement rem for {}", .{self.target.cpu.arch}),
+ };
+ return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ }
+
fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
@@ -2429,13 +2438,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.dwarf => |dbg_out| {
try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 3);
dbg_out.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter);
- dbg_out.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT_location, DW.FORM_exprloc
+ dbg_out.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
1, // ULEB128 dwarf expression length
reg.dwarfLocOp(),
});
try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 5 + name_with_null.len);
- try self.addDbgInfoTypeReloc(ty); // DW.AT_type, DW.FORM_ref4
- dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string
+ try self.addDbgInfoTypeReloc(ty); // DW.AT.type, DW.FORM.ref4
+ dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
},
.none => {},
}
@@ -2458,15 +2467,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
var counting_writer = std.io.countingWriter(std.io.null_writer);
leb128.writeILEB128(counting_writer.writer(), adjusted_stack_offset) catch unreachable;
- // DW.AT_location, DW.FORM_exprloc
+ // DW.AT.location, DW.FORM.exprloc
// ULEB128 dwarf expression length
try leb128.writeULEB128(dbg_out.dbg_info.writer(), counting_writer.bytes_written + 1);
- try dbg_out.dbg_info.append(DW.OP_breg11);
+ try dbg_out.dbg_info.append(DW.OP.breg11);
try leb128.writeILEB128(dbg_out.dbg_info.writer(), adjusted_stack_offset);
try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 5 + name_with_null.len);
- try self.addDbgInfoTypeReloc(ty); // DW.AT_type, DW.FORM_ref4
- dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string
+ try self.addDbgInfoTypeReloc(ty); // DW.AT.type, DW.FORM.ref4
+ dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
},
else => {},
}
@@ -5182,25 +5191,59 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return error.CodegenFail;
}
- usingnamespace switch (arch) {
- .i386 => @import("codegen/x86.zig"),
- .x86_64 => @import("codegen/x86_64.zig"),
- .riscv64 => @import("codegen/riscv64.zig"),
- .arm, .armeb => @import("codegen/arm.zig"),
- .aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig"),
- else => struct {
- pub const Register = enum {
- dummy,
-
- pub fn allocIndex(self: Register) ?u4 {
- _ = self;
- return null;
- }
- };
- pub const callee_preserved_regs = [_]Register{};
+ const Register = switch (arch) {
+ .i386 => @import("codegen/x86.zig").Register,
+ .x86_64 => @import("codegen/x86_64.zig").Register,
+ .riscv64 => @import("codegen/riscv64.zig").Register,
+ .arm, .armeb => @import("codegen/arm.zig").Register,
+ .aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").Register,
+ else => enum {
+ dummy,
+
+ pub fn allocIndex(self: Register) ?u4 {
+ _ = self;
+ return null;
+ }
},
};
+ const Instruction = switch (arch) {
+ .riscv64 => @import("codegen/riscv64.zig").Instruction,
+ .arm, .armeb => @import("codegen/arm.zig").Instruction,
+ .aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").Instruction,
+ else => void,
+ };
+
+ const Condition = switch (arch) {
+ .arm, .armeb => @import("codegen/arm.zig").Condition,
+ else => void,
+ };
+
+ const callee_preserved_regs = switch (arch) {
+ .i386 => @import("codegen/x86.zig").callee_preserved_regs,
+ .x86_64 => @import("codegen/x86_64.zig").callee_preserved_regs,
+ .riscv64 => @import("codegen/riscv64.zig").callee_preserved_regs,
+ .arm, .armeb => @import("codegen/arm.zig").callee_preserved_regs,
+ .aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").callee_preserved_regs,
+ else => [_]Register{},
+ };
+
+ const c_abi_int_param_regs = switch (arch) {
+ .i386 => @import("codegen/x86.zig").c_abi_int_param_regs,
+ .x86_64 => @import("codegen/x86_64.zig").c_abi_int_param_regs,
+ .arm, .armeb => @import("codegen/arm.zig").c_abi_int_param_regs,
+ .aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").c_abi_int_param_regs,
+ else => [_]Register{},
+ };
+
+ const c_abi_int_return_regs = switch (arch) {
+ .i386 => @import("codegen/x86.zig").c_abi_int_return_regs,
+ .x86_64 => @import("codegen/x86_64.zig").c_abi_int_return_regs,
+ .arm, .armeb => @import("codegen/arm.zig").c_abi_int_return_regs,
+ .aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").c_abi_int_return_regs,
+ else => [_]Register{},
+ };
+
fn parseRegName(name: []const u8) ?Register {
if (@hasDecl(Register, "parseRegName")) {
return Register.parseRegName(name);
diff --git a/src/codegen/aarch64.zig b/src/codegen/aarch64.zig
index 1c176df017..dfda04da85 100644
--- a/src/codegen/aarch64.zig
+++ b/src/codegen/aarch64.zig
@@ -52,7 +52,7 @@ pub const Register = enum(u6) {
}
pub fn dwarfLocOp(self: Register) u8 {
- return @as(u8, self.id()) + DW.OP_reg0;
+ return @as(u8, self.id()) + DW.OP.reg0;
}
};
diff --git a/src/codegen/arm.zig b/src/codegen/arm.zig
index d30479e1f1..ec9152f96b 100644
--- a/src/codegen/arm.zig
+++ b/src/codegen/arm.zig
@@ -170,7 +170,7 @@ pub const Register = enum(u5) {
}
pub fn dwarfLocOp(self: Register) u8 {
- return @as(u8, self.id()) + DW.OP_reg0;
+ return @as(u8, self.id()) + DW.OP.reg0;
}
};
@@ -192,7 +192,7 @@ pub const c_abi_int_return_regs = [_]Register{ .r0, .r1 };
/// Represents an instruction in the ARM instruction set architecture
pub const Instruction = union(enum) {
- DataProcessing: packed struct {
+ data_processing: packed struct {
// Note to self: The order of the fields top-to-bottom is
// right-to-left in the actual 32-bit int representation
op2: u12,
@@ -204,7 +204,7 @@ pub const Instruction = union(enum) {
fixed: u2 = 0b00,
cond: u4,
},
- Multiply: packed struct {
+ multiply: packed struct {
rn: u4,
fixed_1: u4 = 0b1001,
rm: u4,
@@ -215,7 +215,7 @@ pub const Instruction = union(enum) {
fixed_2: u6 = 0b000000,
cond: u4,
},
- MultiplyLong: packed struct {
+ multiply_long: packed struct {
rn: u4,
fixed_1: u4 = 0b1001,
rm: u4,
@@ -227,7 +227,17 @@ pub const Instruction = union(enum) {
fixed_2: u5 = 0b00001,
cond: u4,
},
- SingleDataTransfer: packed struct {
+ integer_saturating_arithmetic: packed struct {
+ rm: u4,
+ fixed_1: u8 = 0b0000_0101,
+ rd: u4,
+ rn: u4,
+ fixed_2: u1 = 0b0,
+ opc: u2,
+ fixed_3: u5 = 0b00010,
+ cond: u4,
+ },
+ single_data_transfer: packed struct {
offset: u12,
rd: u4,
rn: u4,
@@ -240,7 +250,7 @@ pub const Instruction = union(enum) {
fixed: u2 = 0b01,
cond: u4,
},
- ExtraLoadStore: packed struct {
+ extra_load_store: packed struct {
imm4l: u4,
fixed_1: u1 = 0b1,
op2: u2,
@@ -256,7 +266,7 @@ pub const Instruction = union(enum) {
fixed_3: u3 = 0b000,
cond: u4,
},
- BlockDataTransfer: packed struct {
+ block_data_transfer: packed struct {
register_list: u16,
rn: u4,
load_store: u1,
@@ -267,25 +277,25 @@ pub const Instruction = union(enum) {
fixed: u3 = 0b100,
cond: u4,
},
- Branch: packed struct {
+ branch: packed struct {
offset: u24,
link: u1,
fixed: u3 = 0b101,
cond: u4,
},
- BranchExchange: packed struct {
+ branch_exchange: packed struct {
rn: u4,
fixed_1: u1 = 0b1,
link: u1,
fixed_2: u22 = 0b0001_0010_1111_1111_1111_00,
cond: u4,
},
- SupervisorCall: packed struct {
+ supervisor_call: packed struct {
comment: u24,
fixed: u4 = 0b1111,
cond: u4,
},
- Breakpoint: packed struct {
+ breakpoint: packed struct {
imm4: u4,
fixed_1: u4 = 0b0111,
imm12: u12,
@@ -293,7 +303,7 @@ pub const Instruction = union(enum) {
},
/// Represents the possible operations which can be performed by a
- /// DataProcessing instruction
+ /// Data Processing instruction
const Opcode = enum(u4) {
// Rd := Op1 AND Op2
@"and",
@@ -530,16 +540,17 @@ pub const Instruction = union(enum) {
pub fn toU32(self: Instruction) u32 {
return switch (self) {
- .DataProcessing => |v| @bitCast(u32, v),
- .Multiply => |v| @bitCast(u32, v),
- .MultiplyLong => |v| @bitCast(u32, v),
- .SingleDataTransfer => |v| @bitCast(u32, v),
- .ExtraLoadStore => |v| @bitCast(u32, v),
- .BlockDataTransfer => |v| @bitCast(u32, v),
- .Branch => |v| @bitCast(u32, v),
- .BranchExchange => |v| @bitCast(u32, v),
- .SupervisorCall => |v| @bitCast(u32, v),
- .Breakpoint => |v| @intCast(u32, v.imm4) | (@intCast(u32, v.fixed_1) << 4) | (@intCast(u32, v.imm12) << 8) | (@intCast(u32, v.fixed_2_and_cond) << 20),
+ .data_processing => |v| @bitCast(u32, v),
+ .multiply => |v| @bitCast(u32, v),
+ .multiply_long => |v| @bitCast(u32, v),
+ .integer_saturating_arithmetic => |v| @bitCast(u32, v),
+ .single_data_transfer => |v| @bitCast(u32, v),
+ .extra_load_store => |v| @bitCast(u32, v),
+ .block_data_transfer => |v| @bitCast(u32, v),
+ .branch => |v| @bitCast(u32, v),
+ .branch_exchange => |v| @bitCast(u32, v),
+ .supervisor_call => |v| @bitCast(u32, v),
+ .breakpoint => |v| @intCast(u32, v.imm4) | (@intCast(u32, v.fixed_1) << 4) | (@intCast(u32, v.imm12) << 8) | (@intCast(u32, v.fixed_2_and_cond) << 20),
};
}
@@ -554,7 +565,7 @@ pub const Instruction = union(enum) {
op2: Operand,
) Instruction {
return Instruction{
- .DataProcessing = .{
+ .data_processing = .{
.cond = @enumToInt(cond),
.i = @boolToInt(op2 == .Immediate),
.opcode = @enumToInt(opcode),
@@ -573,7 +584,7 @@ pub const Instruction = union(enum) {
top: bool,
) Instruction {
return Instruction{
- .DataProcessing = .{
+ .data_processing = .{
.cond = @enumToInt(cond),
.i = 1,
.opcode = if (top) 0b1010 else 0b1000,
@@ -594,7 +605,7 @@ pub const Instruction = union(enum) {
ra: ?Register,
) Instruction {
return Instruction{
- .Multiply = .{
+ .multiply = .{
.cond = @enumToInt(cond),
.accumulate = @boolToInt(ra != null),
.set_cond = set_cond,
@@ -617,7 +628,7 @@ pub const Instruction = union(enum) {
rn: Register,
) Instruction {
return Instruction{
- .MultiplyLong = .{
+ .multiply_long = .{
.cond = @enumToInt(cond),
.unsigned = signed,
.accumulate = accumulate,
@@ -630,6 +641,24 @@ pub const Instruction = union(enum) {
};
}
+ fn integerSaturationArithmetic(
+ cond: Condition,
+ rd: Register,
+ rm: Register,
+ rn: Register,
+ opc: u2,
+ ) Instruction {
+ return Instruction{
+ .integer_saturating_arithmetic = .{
+ .rm = rm.id(),
+ .rd = rd.id(),
+ .rn = rn.id(),
+ .opc = opc,
+ .cond = @enumToInt(cond),
+ },
+ };
+ }
+
fn singleDataTransfer(
cond: Condition,
rd: Register,
@@ -642,7 +671,7 @@ pub const Instruction = union(enum) {
load_store: u1,
) Instruction {
return Instruction{
- .SingleDataTransfer = .{
+ .single_data_transfer = .{
.cond = @enumToInt(cond),
.rn = rn.id(),
.rd = rd.id(),
@@ -678,7 +707,7 @@ pub const Instruction = union(enum) {
};
return Instruction{
- .ExtraLoadStore = .{
+ .extra_load_store = .{
.imm4l = imm4l,
.op2 = op2,
.imm4h = imm4h,
@@ -705,7 +734,7 @@ pub const Instruction = union(enum) {
load_store: u1,
) Instruction {
return Instruction{
- .BlockDataTransfer = .{
+ .block_data_transfer = .{
.register_list = @bitCast(u16, reg_list),
.rn = rn.id(),
.load_store = load_store,
@@ -720,7 +749,7 @@ pub const Instruction = union(enum) {
fn branch(cond: Condition, offset: i26, link: u1) Instruction {
return Instruction{
- .Branch = .{
+ .branch = .{
.cond = @enumToInt(cond),
.link = link,
.offset = @bitCast(u24, @intCast(i24, offset >> 2)),
@@ -730,7 +759,7 @@ pub const Instruction = union(enum) {
fn branchExchange(cond: Condition, rn: Register, link: u1) Instruction {
return Instruction{
- .BranchExchange = .{
+ .branch_exchange = .{
.cond = @enumToInt(cond),
.link = link,
.rn = rn.id(),
@@ -740,7 +769,7 @@ pub const Instruction = union(enum) {
fn supervisorCall(cond: Condition, comment: u24) Instruction {
return Instruction{
- .SupervisorCall = .{
+ .supervisor_call = .{
.cond = @enumToInt(cond),
.comment = comment,
},
@@ -749,7 +778,7 @@ pub const Instruction = union(enum) {
fn breakpoint(imm: u16) Instruction {
return Instruction{
- .Breakpoint = .{
+ .breakpoint = .{
.imm12 = @truncate(u12, imm >> 4),
.imm4 = @truncate(u4, imm),
},
@@ -857,11 +886,11 @@ pub const Instruction = union(enum) {
return dataProcessing(cond, .mov, 1, rd, .r0, op2);
}
- pub fn bic(cond: Condition, rd: Register, op2: Operand) Instruction {
+ pub fn bic(cond: Condition, rd: Register, rn: Register, op2: Operand) Instruction {
return dataProcessing(cond, .bic, 0, rd, rn, op2);
}
- pub fn bics(cond: Condition, rd: Register, op2: Operand) Instruction {
+ pub fn bics(cond: Condition, rd: Register, rn: Register, op2: Operand) Instruction {
return dataProcessing(cond, .bic, 1, rd, rn, op2);
}
@@ -873,6 +902,24 @@ pub const Instruction = union(enum) {
return dataProcessing(cond, .mvn, 1, rd, .r0, op2);
}
+ // Integer Saturating Arithmetic
+
+ pub fn qadd(cond: Condition, rd: Register, rm: Register, rn: Register) Instruction {
+ return integerSaturationArithmetic(cond, rd, rm, rn, 0b00);
+ }
+
+ pub fn qsub(cond: Condition, rd: Register, rm: Register, rn: Register) Instruction {
+ return integerSaturationArithmetic(cond, rd, rm, rn, 0b01);
+ }
+
+ pub fn qdadd(cond: Condition, rd: Register, rm: Register, rn: Register) Instruction {
+ return integerSaturationArithmetic(cond, rd, rm, rn, 0b10);
+ }
+
+ pub fn qdsub(cond: Condition, rd: Register, rm: Register, rn: Register) Instruction {
+ return integerSaturationArithmetic(cond, rd, rm, rn, 0b11);
+ }
+
// movw and movt
pub fn movw(cond: Condition, rd: Register, imm: u16) Instruction {
@@ -887,7 +934,7 @@ pub const Instruction = union(enum) {
pub fn mrs(cond: Condition, rd: Register, psr: Psr) Instruction {
return Instruction{
- .DataProcessing = .{
+ .data_processing = .{
.cond = @enumToInt(cond),
.i = 0,
.opcode = if (psr == .spsr) 0b1010 else 0b1000,
@@ -901,7 +948,7 @@ pub const Instruction = union(enum) {
pub fn msr(cond: Condition, psr: Psr, op: Operand) Instruction {
return Instruction{
- .DataProcessing = .{
+ .data_processing = .{
.cond = @enumToInt(cond),
.i = 0,
.opcode = if (psr == .spsr) 0b1011 else 0b1001,
@@ -1294,6 +1341,10 @@ test "serialize instructions" {
.inst = Instruction.ldmea(.al, .r4, true, .{ .r2 = true, .r5 = true }),
.expected = 0b1110_100_1_0_0_1_1_0100_0000000000100100,
},
+ .{ // qadd r0, r7, r8
+ .inst = Instruction.qadd(.al, .r0, .r7, .r8),
+ .expected = 0b1110_00010_00_0_1000_0000_0000_0101_0111,
+ },
};
for (testcases) |case| {
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 2084b1e1ce..fd964f2829 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -858,6 +858,7 @@ fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfM
// TODO use a different strategy for div that communicates to the optimizer
// that wrapping is UB.
.div => try airBinOp( o, inst, " / "),
+ .rem => try airBinOp( o, inst, " % "),
.cmp_eq => try airBinOp(o, inst, " == "),
.cmp_gt => try airBinOp(o, inst, " > "),
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index d7aa2d45b3..8b7fd5dc54 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -167,7 +167,7 @@ pub const Object = struct {
const context = llvm.Context.create();
errdefer context.dispose();
- initializeLLVMTargets();
+ initializeLLVMTarget(options.target.cpu.arch);
const root_nameZ = try gpa.dupeZ(u8, options.root_name);
defer gpa.free(root_nameZ);
@@ -256,14 +256,6 @@ pub const Object = struct {
gpa.destroy(self);
}
- fn initializeLLVMTargets() void {
- llvm.initializeAllTargets();
- llvm.initializeAllTargetInfos();
- llvm.initializeAllTargetMCs();
- llvm.initializeAllAsmPrinters();
- llvm.initializeAllAsmParsers();
- }
-
fn locPath(
arena: *Allocator,
opt_loc: ?Compilation.EmitLoc,
@@ -796,11 +788,13 @@ pub const DeclGen = struct {
const gpa = self.gpa;
const elem_ty = tv.ty.elemType();
const elem_vals = payload.data;
- const llvm_elems = try gpa.alloc(*const llvm.Value, elem_vals.len);
+ const sento = tv.ty.sentinel();
+ const llvm_elems = try gpa.alloc(*const llvm.Value, elem_vals.len + @boolToInt(sento != null));
defer gpa.free(llvm_elems);
for (elem_vals) |elem_val, i| {
llvm_elems[i] = try self.genTypedValue(.{ .ty = elem_ty, .val = elem_val });
}
+ if (sento) |sent| llvm_elems[elem_vals.len] = try self.genTypedValue(.{ .ty = elem_ty, .val = sent });
const llvm_elem_ty = try self.llvmType(elem_ty);
return llvm_elem_ty.constArray(
llvm_elems.ptr,
@@ -985,6 +979,7 @@ pub const FuncGen = struct {
.mul => try self.airMul(inst, false),
.mulwrap => try self.airMul(inst, true),
.div => try self.airDiv(inst),
+ .rem => try self.airRem(inst),
.ptr_add => try self.airPtrAdd(inst),
.ptr_sub => try self.airPtrSub(inst),
@@ -1727,6 +1722,19 @@ pub const FuncGen = struct {
return self.builder.buildUDiv(lhs, rhs, "");
}
+ fn airRem(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
+
+ if (inst_ty.isFloat()) return self.builder.buildFRem(lhs, rhs, "");
+ if (inst_ty.isSignedInt()) return self.builder.buildSRem(lhs, rhs, "");
+ return self.builder.buildURem(lhs, rhs, "");
+ }
+
fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -1957,3 +1965,144 @@ pub const FuncGen = struct {
return self.llvmModule().getIntrinsicDeclaration(id, null, 0);
}
};
+
+fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void {
+ switch (arch) {
+ .aarch64, .aarch64_be, .aarch64_32 => {
+ llvm.LLVMInitializeAArch64Target();
+ llvm.LLVMInitializeAArch64TargetInfo();
+ llvm.LLVMInitializeAArch64TargetMC();
+ llvm.LLVMInitializeAArch64AsmPrinter();
+ llvm.LLVMInitializeAArch64AsmParser();
+ },
+ .amdgcn => {
+ llvm.LLVMInitializeAMDGPUTarget();
+ llvm.LLVMInitializeAMDGPUTargetInfo();
+ llvm.LLVMInitializeAMDGPUTargetMC();
+ llvm.LLVMInitializeAMDGPUAsmPrinter();
+ llvm.LLVMInitializeAMDGPUAsmParser();
+ },
+ .arm, .armeb => {
+ llvm.LLVMInitializeARMTarget();
+ llvm.LLVMInitializeARMTargetInfo();
+ llvm.LLVMInitializeARMTargetMC();
+ llvm.LLVMInitializeARMAsmPrinter();
+ llvm.LLVMInitializeARMAsmParser();
+ },
+ .avr => {
+ llvm.LLVMInitializeAVRTarget();
+ llvm.LLVMInitializeAVRTargetInfo();
+ llvm.LLVMInitializeAVRTargetMC();
+ llvm.LLVMInitializeAVRAsmPrinter();
+ llvm.LLVMInitializeAVRAsmParser();
+ },
+ .bpfel, .bpfeb => {
+ llvm.LLVMInitializeBPFTarget();
+ llvm.LLVMInitializeBPFTargetInfo();
+ llvm.LLVMInitializeBPFTargetMC();
+ llvm.LLVMInitializeBPFAsmPrinter();
+ llvm.LLVMInitializeBPFAsmParser();
+ },
+ .hexagon => {
+ llvm.LLVMInitializeHexagonTarget();
+ llvm.LLVMInitializeHexagonTargetInfo();
+ llvm.LLVMInitializeHexagonTargetMC();
+ llvm.LLVMInitializeHexagonAsmPrinter();
+ llvm.LLVMInitializeHexagonAsmParser();
+ },
+ .lanai => {
+ llvm.LLVMInitializeLanaiTarget();
+ llvm.LLVMInitializeLanaiTargetInfo();
+ llvm.LLVMInitializeLanaiTargetMC();
+ llvm.LLVMInitializeLanaiAsmPrinter();
+ llvm.LLVMInitializeLanaiAsmParser();
+ },
+ .mips, .mipsel, .mips64, .mips64el => {
+ llvm.LLVMInitializeMipsTarget();
+ llvm.LLVMInitializeMipsTargetInfo();
+ llvm.LLVMInitializeMipsTargetMC();
+ llvm.LLVMInitializeMipsAsmPrinter();
+ llvm.LLVMInitializeMipsAsmParser();
+ },
+ .msp430 => {
+ llvm.LLVMInitializeMSP430Target();
+ llvm.LLVMInitializeMSP430TargetInfo();
+ llvm.LLVMInitializeMSP430TargetMC();
+ llvm.LLVMInitializeMSP430AsmPrinter();
+ llvm.LLVMInitializeMSP430AsmParser();
+ },
+ .nvptx, .nvptx64 => {
+ llvm.LLVMInitializeNVPTXTarget();
+ llvm.LLVMInitializeNVPTXTargetInfo();
+ llvm.LLVMInitializeNVPTXTargetMC();
+ llvm.LLVMInitializeNVPTXAsmPrinter();
+ // There is no LLVMInitializeNVPTXAsmParser function available.
+ },
+ .powerpc, .powerpcle, .powerpc64, .powerpc64le => {
+ llvm.LLVMInitializePowerPCTarget();
+ llvm.LLVMInitializePowerPCTargetInfo();
+ llvm.LLVMInitializePowerPCTargetMC();
+ llvm.LLVMInitializePowerPCAsmPrinter();
+ llvm.LLVMInitializePowerPCAsmParser();
+ },
+ .riscv32, .riscv64 => {
+ llvm.LLVMInitializeRISCVTarget();
+ llvm.LLVMInitializeRISCVTargetInfo();
+ llvm.LLVMInitializeRISCVTargetMC();
+ llvm.LLVMInitializeRISCVAsmPrinter();
+ llvm.LLVMInitializeRISCVAsmParser();
+ },
+ .sparc, .sparcv9, .sparcel => {
+ llvm.LLVMInitializeSparcTarget();
+ llvm.LLVMInitializeSparcTargetInfo();
+ llvm.LLVMInitializeSparcTargetMC();
+ llvm.LLVMInitializeSparcAsmPrinter();
+ llvm.LLVMInitializeSparcAsmParser();
+ },
+ .s390x => {
+ llvm.LLVMInitializeSystemZTarget();
+ llvm.LLVMInitializeSystemZTargetInfo();
+ llvm.LLVMInitializeSystemZTargetMC();
+ llvm.LLVMInitializeSystemZAsmPrinter();
+ llvm.LLVMInitializeSystemZAsmParser();
+ },
+ .wasm32, .wasm64 => {
+ llvm.LLVMInitializeWebAssemblyTarget();
+ llvm.LLVMInitializeWebAssemblyTargetInfo();
+ llvm.LLVMInitializeWebAssemblyTargetMC();
+ llvm.LLVMInitializeWebAssemblyAsmPrinter();
+ llvm.LLVMInitializeWebAssemblyAsmParser();
+ },
+ .i386, .x86_64 => {
+ llvm.LLVMInitializeX86Target();
+ llvm.LLVMInitializeX86TargetInfo();
+ llvm.LLVMInitializeX86TargetMC();
+ llvm.LLVMInitializeX86AsmPrinter();
+ llvm.LLVMInitializeX86AsmParser();
+ },
+ .xcore => {
+ llvm.LLVMInitializeXCoreTarget();
+ llvm.LLVMInitializeXCoreTargetInfo();
+ llvm.LLVMInitializeXCoreTargetMC();
+ llvm.LLVMInitializeXCoreAsmPrinter();
+ // There is no LLVMInitializeXCoreAsmParser function available.
+ },
+ .arc => {},
+ .csky => {},
+ .r600 => {},
+ .tce, .tcele => {},
+ .thumb, .thumbeb => {},
+ .le32, .le64 => {},
+ .amdil, .amdil64 => {},
+ .hsail, .hsail64 => {},
+ .spir, .spir64 => {},
+ .kalimba => {},
+ .shave => {},
+ .renderscript32 => {},
+ .renderscript64 => {},
+ .ve => {},
+ .spu_2 => {},
+ .spirv32 => {},
+ .spirv64 => {},
+ }
+}
diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig
index d33ca29d4f..a10002b5d6 100644
--- a/src/codegen/llvm/bindings.zig
+++ b/src/codegen/llvm/bindings.zig
@@ -386,6 +386,15 @@ pub const Builder = opaque {
pub const buildFDiv = LLVMBuildFDiv;
extern fn LLVMBuildFDiv(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+ pub const buildURem = LLVMBuildURem;
+ extern fn LLVMBuildURem(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildSRem = LLVMBuildSRem;
+ extern fn LLVMBuildSRem(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildFRem = LLVMBuildFRem;
+ extern fn LLVMBuildFRem(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
pub const buildAnd = LLVMBuildAnd;
extern fn LLVMBuildAnd(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
@@ -596,188 +605,93 @@ pub const Target = opaque {
extern fn LLVMGetTargetFromTriple(Triple: [*:0]const u8, T: **const Target, ErrorMessage: *[*:0]const u8) Bool;
};
-extern fn LLVMInitializeAArch64TargetInfo() void;
-extern fn LLVMInitializeAMDGPUTargetInfo() void;
-extern fn LLVMInitializeARMTargetInfo() void;
-extern fn LLVMInitializeAVRTargetInfo() void;
-extern fn LLVMInitializeBPFTargetInfo() void;
-extern fn LLVMInitializeHexagonTargetInfo() void;
-extern fn LLVMInitializeLanaiTargetInfo() void;
-extern fn LLVMInitializeMipsTargetInfo() void;
-extern fn LLVMInitializeMSP430TargetInfo() void;
-extern fn LLVMInitializeNVPTXTargetInfo() void;
-extern fn LLVMInitializePowerPCTargetInfo() void;
-extern fn LLVMInitializeRISCVTargetInfo() void;
-extern fn LLVMInitializeSparcTargetInfo() void;
-extern fn LLVMInitializeSystemZTargetInfo() void;
-extern fn LLVMInitializeWebAssemblyTargetInfo() void;
-extern fn LLVMInitializeX86TargetInfo() void;
-extern fn LLVMInitializeXCoreTargetInfo() void;
-extern fn LLVMInitializeAArch64Target() void;
-extern fn LLVMInitializeAMDGPUTarget() void;
-extern fn LLVMInitializeARMTarget() void;
-extern fn LLVMInitializeAVRTarget() void;
-extern fn LLVMInitializeBPFTarget() void;
-extern fn LLVMInitializeHexagonTarget() void;
-extern fn LLVMInitializeLanaiTarget() void;
-extern fn LLVMInitializeMipsTarget() void;
-extern fn LLVMInitializeMSP430Target() void;
-extern fn LLVMInitializeNVPTXTarget() void;
-extern fn LLVMInitializePowerPCTarget() void;
-extern fn LLVMInitializeRISCVTarget() void;
-extern fn LLVMInitializeSparcTarget() void;
-extern fn LLVMInitializeSystemZTarget() void;
-extern fn LLVMInitializeWebAssemblyTarget() void;
-extern fn LLVMInitializeX86Target() void;
-extern fn LLVMInitializeXCoreTarget() void;
-extern fn LLVMInitializeAArch64TargetMC() void;
-extern fn LLVMInitializeAMDGPUTargetMC() void;
-extern fn LLVMInitializeARMTargetMC() void;
-extern fn LLVMInitializeAVRTargetMC() void;
-extern fn LLVMInitializeBPFTargetMC() void;
-extern fn LLVMInitializeHexagonTargetMC() void;
-extern fn LLVMInitializeLanaiTargetMC() void;
-extern fn LLVMInitializeMipsTargetMC() void;
-extern fn LLVMInitializeMSP430TargetMC() void;
-extern fn LLVMInitializeNVPTXTargetMC() void;
-extern fn LLVMInitializePowerPCTargetMC() void;
-extern fn LLVMInitializeRISCVTargetMC() void;
-extern fn LLVMInitializeSparcTargetMC() void;
-extern fn LLVMInitializeSystemZTargetMC() void;
-extern fn LLVMInitializeWebAssemblyTargetMC() void;
-extern fn LLVMInitializeX86TargetMC() void;
-extern fn LLVMInitializeXCoreTargetMC() void;
-extern fn LLVMInitializeAArch64AsmPrinter() void;
-extern fn LLVMInitializeAMDGPUAsmPrinter() void;
-extern fn LLVMInitializeARMAsmPrinter() void;
-extern fn LLVMInitializeAVRAsmPrinter() void;
-extern fn LLVMInitializeBPFAsmPrinter() void;
-extern fn LLVMInitializeHexagonAsmPrinter() void;
-extern fn LLVMInitializeLanaiAsmPrinter() void;
-extern fn LLVMInitializeMipsAsmPrinter() void;
-extern fn LLVMInitializeMSP430AsmPrinter() void;
-extern fn LLVMInitializeNVPTXAsmPrinter() void;
-extern fn LLVMInitializePowerPCAsmPrinter() void;
-extern fn LLVMInitializeRISCVAsmPrinter() void;
-extern fn LLVMInitializeSparcAsmPrinter() void;
-extern fn LLVMInitializeSystemZAsmPrinter() void;
-extern fn LLVMInitializeWebAssemblyAsmPrinter() void;
-extern fn LLVMInitializeX86AsmPrinter() void;
-extern fn LLVMInitializeXCoreAsmPrinter() void;
-extern fn LLVMInitializeAArch64AsmParser() void;
-extern fn LLVMInitializeAMDGPUAsmParser() void;
-extern fn LLVMInitializeARMAsmParser() void;
-extern fn LLVMInitializeAVRAsmParser() void;
-extern fn LLVMInitializeBPFAsmParser() void;
-extern fn LLVMInitializeHexagonAsmParser() void;
-extern fn LLVMInitializeLanaiAsmParser() void;
-extern fn LLVMInitializeMipsAsmParser() void;
-extern fn LLVMInitializeMSP430AsmParser() void;
-extern fn LLVMInitializePowerPCAsmParser() void;
-extern fn LLVMInitializeRISCVAsmParser() void;
-extern fn LLVMInitializeSparcAsmParser() void;
-extern fn LLVMInitializeSystemZAsmParser() void;
-extern fn LLVMInitializeWebAssemblyAsmParser() void;
-extern fn LLVMInitializeX86AsmParser() void;
-
-pub const initializeAllTargetInfos = LLVMInitializeAllTargetInfos;
-fn LLVMInitializeAllTargetInfos() callconv(.C) void {
- LLVMInitializeAArch64TargetInfo();
- LLVMInitializeAMDGPUTargetInfo();
- LLVMInitializeARMTargetInfo();
- LLVMInitializeAVRTargetInfo();
- LLVMInitializeBPFTargetInfo();
- LLVMInitializeHexagonTargetInfo();
- LLVMInitializeLanaiTargetInfo();
- LLVMInitializeMipsTargetInfo();
- LLVMInitializeMSP430TargetInfo();
- LLVMInitializeNVPTXTargetInfo();
- LLVMInitializePowerPCTargetInfo();
- LLVMInitializeRISCVTargetInfo();
- LLVMInitializeSparcTargetInfo();
- LLVMInitializeSystemZTargetInfo();
- LLVMInitializeWebAssemblyTargetInfo();
- LLVMInitializeX86TargetInfo();
- LLVMInitializeXCoreTargetInfo();
-}
-pub const initializeAllTargets = LLVMInitializeAllTargets;
-fn LLVMInitializeAllTargets() callconv(.C) void {
- LLVMInitializeAArch64Target();
- LLVMInitializeAMDGPUTarget();
- LLVMInitializeARMTarget();
- LLVMInitializeAVRTarget();
- LLVMInitializeBPFTarget();
- LLVMInitializeHexagonTarget();
- LLVMInitializeLanaiTarget();
- LLVMInitializeMipsTarget();
- LLVMInitializeMSP430Target();
- LLVMInitializeNVPTXTarget();
- LLVMInitializePowerPCTarget();
- LLVMInitializeRISCVTarget();
- LLVMInitializeSparcTarget();
- LLVMInitializeSystemZTarget();
- LLVMInitializeWebAssemblyTarget();
- LLVMInitializeX86Target();
- LLVMInitializeXCoreTarget();
-}
-pub const initializeAllTargetMCs = LLVMInitializeAllTargetMCs;
-fn LLVMInitializeAllTargetMCs() callconv(.C) void {
- LLVMInitializeAArch64TargetMC();
- LLVMInitializeAMDGPUTargetMC();
- LLVMInitializeARMTargetMC();
- LLVMInitializeAVRTargetMC();
- LLVMInitializeBPFTargetMC();
- LLVMInitializeHexagonTargetMC();
- LLVMInitializeLanaiTargetMC();
- LLVMInitializeMipsTargetMC();
- LLVMInitializeMSP430TargetMC();
- LLVMInitializeNVPTXTargetMC();
- LLVMInitializePowerPCTargetMC();
- LLVMInitializeRISCVTargetMC();
- LLVMInitializeSparcTargetMC();
- LLVMInitializeSystemZTargetMC();
- LLVMInitializeWebAssemblyTargetMC();
- LLVMInitializeX86TargetMC();
- LLVMInitializeXCoreTargetMC();
-}
-pub const initializeAllAsmPrinters = LLVMInitializeAllAsmPrinters;
-fn LLVMInitializeAllAsmPrinters() callconv(.C) void {
- LLVMInitializeAArch64AsmPrinter();
- LLVMInitializeAMDGPUAsmPrinter();
- LLVMInitializeARMAsmPrinter();
- LLVMInitializeAVRAsmPrinter();
- LLVMInitializeBPFAsmPrinter();
- LLVMInitializeHexagonAsmPrinter();
- LLVMInitializeLanaiAsmPrinter();
- LLVMInitializeMipsAsmPrinter();
- LLVMInitializeMSP430AsmPrinter();
- LLVMInitializeNVPTXAsmPrinter();
- LLVMInitializePowerPCAsmPrinter();
- LLVMInitializeRISCVAsmPrinter();
- LLVMInitializeSparcAsmPrinter();
- LLVMInitializeSystemZAsmPrinter();
- LLVMInitializeWebAssemblyAsmPrinter();
- LLVMInitializeX86AsmPrinter();
- LLVMInitializeXCoreAsmPrinter();
-}
-pub const initializeAllAsmParsers = LLVMInitializeAllAsmParsers;
-fn LLVMInitializeAllAsmParsers() callconv(.C) void {
- LLVMInitializeAArch64AsmParser();
- LLVMInitializeAMDGPUAsmParser();
- LLVMInitializeARMAsmParser();
- LLVMInitializeAVRAsmParser();
- LLVMInitializeBPFAsmParser();
- LLVMInitializeHexagonAsmParser();
- LLVMInitializeLanaiAsmParser();
- LLVMInitializeMipsAsmParser();
- LLVMInitializeMSP430AsmParser();
- LLVMInitializePowerPCAsmParser();
- LLVMInitializeRISCVAsmParser();
- LLVMInitializeSparcAsmParser();
- LLVMInitializeSystemZAsmParser();
- LLVMInitializeWebAssemblyAsmParser();
- LLVMInitializeX86AsmParser();
-}
+pub extern fn LLVMInitializeAArch64TargetInfo() void;
+pub extern fn LLVMInitializeAMDGPUTargetInfo() void;
+pub extern fn LLVMInitializeARMTargetInfo() void;
+pub extern fn LLVMInitializeAVRTargetInfo() void;
+pub extern fn LLVMInitializeBPFTargetInfo() void;
+pub extern fn LLVMInitializeHexagonTargetInfo() void;
+pub extern fn LLVMInitializeLanaiTargetInfo() void;
+pub extern fn LLVMInitializeMipsTargetInfo() void;
+pub extern fn LLVMInitializeMSP430TargetInfo() void;
+pub extern fn LLVMInitializeNVPTXTargetInfo() void;
+pub extern fn LLVMInitializePowerPCTargetInfo() void;
+pub extern fn LLVMInitializeRISCVTargetInfo() void;
+pub extern fn LLVMInitializeSparcTargetInfo() void;
+pub extern fn LLVMInitializeSystemZTargetInfo() void;
+pub extern fn LLVMInitializeWebAssemblyTargetInfo() void;
+pub extern fn LLVMInitializeX86TargetInfo() void;
+pub extern fn LLVMInitializeXCoreTargetInfo() void;
+
+pub extern fn LLVMInitializeAArch64Target() void;
+pub extern fn LLVMInitializeAMDGPUTarget() void;
+pub extern fn LLVMInitializeARMTarget() void;
+pub extern fn LLVMInitializeAVRTarget() void;
+pub extern fn LLVMInitializeBPFTarget() void;
+pub extern fn LLVMInitializeHexagonTarget() void;
+pub extern fn LLVMInitializeLanaiTarget() void;
+pub extern fn LLVMInitializeMipsTarget() void;
+pub extern fn LLVMInitializeMSP430Target() void;
+pub extern fn LLVMInitializeNVPTXTarget() void;
+pub extern fn LLVMInitializePowerPCTarget() void;
+pub extern fn LLVMInitializeRISCVTarget() void;
+pub extern fn LLVMInitializeSparcTarget() void;
+pub extern fn LLVMInitializeSystemZTarget() void;
+pub extern fn LLVMInitializeWebAssemblyTarget() void;
+pub extern fn LLVMInitializeX86Target() void;
+pub extern fn LLVMInitializeXCoreTarget() void;
+
+pub extern fn LLVMInitializeAArch64TargetMC() void;
+pub extern fn LLVMInitializeAMDGPUTargetMC() void;
+pub extern fn LLVMInitializeARMTargetMC() void;
+pub extern fn LLVMInitializeAVRTargetMC() void;
+pub extern fn LLVMInitializeBPFTargetMC() void;
+pub extern fn LLVMInitializeHexagonTargetMC() void;
+pub extern fn LLVMInitializeLanaiTargetMC() void;
+pub extern fn LLVMInitializeMipsTargetMC() void;
+pub extern fn LLVMInitializeMSP430TargetMC() void;
+pub extern fn LLVMInitializeNVPTXTargetMC() void;
+pub extern fn LLVMInitializePowerPCTargetMC() void;
+pub extern fn LLVMInitializeRISCVTargetMC() void;
+pub extern fn LLVMInitializeSparcTargetMC() void;
+pub extern fn LLVMInitializeSystemZTargetMC() void;
+pub extern fn LLVMInitializeWebAssemblyTargetMC() void;
+pub extern fn LLVMInitializeX86TargetMC() void;
+pub extern fn LLVMInitializeXCoreTargetMC() void;
+
+pub extern fn LLVMInitializeAArch64AsmPrinter() void;
+pub extern fn LLVMInitializeAMDGPUAsmPrinter() void;
+pub extern fn LLVMInitializeARMAsmPrinter() void;
+pub extern fn LLVMInitializeAVRAsmPrinter() void;
+pub extern fn LLVMInitializeBPFAsmPrinter() void;
+pub extern fn LLVMInitializeHexagonAsmPrinter() void;
+pub extern fn LLVMInitializeLanaiAsmPrinter() void;
+pub extern fn LLVMInitializeMipsAsmPrinter() void;
+pub extern fn LLVMInitializeMSP430AsmPrinter() void;
+pub extern fn LLVMInitializeNVPTXAsmPrinter() void;
+pub extern fn LLVMInitializePowerPCAsmPrinter() void;
+pub extern fn LLVMInitializeRISCVAsmPrinter() void;
+pub extern fn LLVMInitializeSparcAsmPrinter() void;
+pub extern fn LLVMInitializeSystemZAsmPrinter() void;
+pub extern fn LLVMInitializeWebAssemblyAsmPrinter() void;
+pub extern fn LLVMInitializeX86AsmPrinter() void;
+pub extern fn LLVMInitializeXCoreAsmPrinter() void;
+
+pub extern fn LLVMInitializeAArch64AsmParser() void;
+pub extern fn LLVMInitializeAMDGPUAsmParser() void;
+pub extern fn LLVMInitializeARMAsmParser() void;
+pub extern fn LLVMInitializeAVRAsmParser() void;
+pub extern fn LLVMInitializeBPFAsmParser() void;
+pub extern fn LLVMInitializeHexagonAsmParser() void;
+pub extern fn LLVMInitializeLanaiAsmParser() void;
+pub extern fn LLVMInitializeMipsAsmParser() void;
+pub extern fn LLVMInitializeMSP430AsmParser() void;
+pub extern fn LLVMInitializePowerPCAsmParser() void;
+pub extern fn LLVMInitializeRISCVAsmParser() void;
+pub extern fn LLVMInitializeSparcAsmParser() void;
+pub extern fn LLVMInitializeSystemZAsmParser() void;
+pub extern fn LLVMInitializeWebAssemblyAsmParser() void;
+pub extern fn LLVMInitializeX86AsmParser() void;
extern fn ZigLLDLinkCOFF(argc: c_int, argv: [*:null]const ?[*:0]const u8, can_exit_early: bool) c_int;
extern fn ZigLLDLinkELF(argc: c_int, argv: [*:null]const ?[*:0]const u8, can_exit_early: bool) c_int;
diff --git a/src/codegen/riscv64.zig b/src/codegen/riscv64.zig
index 831f74b1b7..b297737816 100644
--- a/src/codegen/riscv64.zig
+++ b/src/codegen/riscv64.zig
@@ -390,7 +390,7 @@ pub const RawRegister = enum(u5) {
x24, x25, x26, x27, x28, x29, x30, x31,
pub fn dwarfLocOp(reg: RawRegister) u8 {
- return @enumToInt(reg) + DW.OP_reg0;
+ return @enumToInt(reg) + DW.OP.reg0;
}
};
@@ -424,7 +424,7 @@ pub const Register = enum(u5) {
}
pub fn dwarfLocOp(reg: Register) u8 {
- return @as(u8, @enumToInt(reg)) + DW.OP_reg0;
+ return @as(u8, @enumToInt(reg)) + DW.OP.reg0;
}
};
diff --git a/src/codegen/spirv/spec.zig b/src/codegen/spirv/spec.zig
index 429ed63d23..26d1925646 100644
--- a/src/codegen/spirv/spec.zig
+++ b/src/codegen/spirv/spec.zig
@@ -582,8 +582,8 @@ pub const Opcode = enum(u16) {
OpSpecConstantCompositeContinuedINTEL = 6092,
_,
- const OpReportIntersectionKHR = OpReportIntersectionNV;
- const OpTypeAccelerationStructureKHR = OpTypeAccelerationStructureNV;
+ const OpReportIntersectionKHR: Opcode = .OpReportIntersectionNV;
+ const OpTypeAccelerationStructureKHR: Opcode = .OpTypeAccelerationStructureNV;
};
pub const ImageOperands = packed struct {
Bias: bool align(@alignOf(u32)) = false,
diff --git a/src/codegen/x86.zig b/src/codegen/x86.zig
index fdad4e56db..5b981b9ef4 100644
--- a/src/codegen/x86.zig
+++ b/src/codegen/x86.zig
@@ -59,14 +59,14 @@ pub const Register = enum(u8) {
pub fn dwarfLocOp(reg: Register) u8 {
return switch (reg.to32()) {
- .eax => DW.OP_reg0,
- .ecx => DW.OP_reg1,
- .edx => DW.OP_reg2,
- .ebx => DW.OP_reg3,
- .esp => DW.OP_reg4,
- .ebp => DW.OP_reg5,
- .esi => DW.OP_reg6,
- .edi => DW.OP_reg7,
+ .eax => DW.OP.reg0,
+ .ecx => DW.OP.reg1,
+ .edx => DW.OP.reg2,
+ .ebx => DW.OP.reg3,
+ .esp => DW.OP.reg4,
+ .ebp => DW.OP.reg5,
+ .esi => DW.OP.reg6,
+ .edi => DW.OP.reg7,
else => unreachable,
};
}
diff --git a/src/codegen/x86_64.zig b/src/codegen/x86_64.zig
index 2964d7245e..72a7468041 100644
--- a/src/codegen/x86_64.zig
+++ b/src/codegen/x86_64.zig
@@ -115,23 +115,23 @@ pub const Register = enum(u8) {
pub fn dwarfLocOp(self: Register) u8 {
return switch (self.to64()) {
- .rax => DW.OP_reg0,
- .rdx => DW.OP_reg1,
- .rcx => DW.OP_reg2,
- .rbx => DW.OP_reg3,
- .rsi => DW.OP_reg4,
- .rdi => DW.OP_reg5,
- .rbp => DW.OP_reg6,
- .rsp => DW.OP_reg7,
-
- .r8 => DW.OP_reg8,
- .r9 => DW.OP_reg9,
- .r10 => DW.OP_reg10,
- .r11 => DW.OP_reg11,
- .r12 => DW.OP_reg12,
- .r13 => DW.OP_reg13,
- .r14 => DW.OP_reg14,
- .r15 => DW.OP_reg15,
+ .rax => DW.OP.reg0,
+ .rdx => DW.OP.reg1,
+ .rcx => DW.OP.reg2,
+ .rbx => DW.OP.reg3,
+ .rsi => DW.OP.reg4,
+ .rdi => DW.OP.reg5,
+ .rbp => DW.OP.reg6,
+ .rsp => DW.OP.reg7,
+
+ .r8 => DW.OP.reg8,
+ .r9 => DW.OP.reg9,
+ .r10 => DW.OP.reg10,
+ .r11 => DW.OP.reg11,
+ .r12 => DW.OP.reg12,
+ .r13 => DW.OP.reg13,
+ .r14 => DW.OP.reg14,
+ .r15 => DW.OP.reg15,
else => unreachable,
};
diff --git a/src/libc_installation.zig b/src/libc_installation.zig
index b639e0f2f8..62174930f8 100644
--- a/src/libc_installation.zig
+++ b/src/libc_installation.zig
@@ -12,7 +12,7 @@ const is_haiku = Target.current.os.tag == .haiku;
const log = std.log.scoped(.libc_installation);
-usingnamespace @import("windows_sdk.zig");
+const ZigWindowsSDK = @import("windows_sdk.zig").ZigWindowsSDK;
/// See the render function implementation for documentation of the fields.
pub const LibCInstallation = struct {
@@ -41,6 +41,7 @@ pub const LibCInstallation = struct {
pub fn parse(
allocator: *Allocator,
libc_file: []const u8,
+ target: std.zig.CrossTarget,
) !LibCInstallation {
var self: LibCInstallation = .{};
@@ -96,26 +97,31 @@ pub const LibCInstallation = struct {
log.err("sys_include_dir may not be empty\n", .{});
return error.ParseError;
}
- if (self.crt_dir == null and !is_darwin) {
- log.err("crt_dir may not be empty for {s}\n", .{@tagName(Target.current.os.tag)});
+
+ const os_tag = target.getOsTag();
+ if (self.crt_dir == null and !target.isDarwin()) {
+ log.err("crt_dir may not be empty for {s}\n", .{@tagName(os_tag)});
return error.ParseError;
}
- if (self.msvc_lib_dir == null and is_windows) {
+
+ const abi = target.getAbi();
+ if (self.msvc_lib_dir == null and target.isWindows() and abi == .msvc) {
log.err("msvc_lib_dir may not be empty for {s}-{s}\n", .{
- @tagName(Target.current.os.tag),
- @tagName(Target.current.abi),
+ @tagName(os_tag),
+ @tagName(abi),
});
return error.ParseError;
}
- if (self.kernel32_lib_dir == null and is_windows) {
+ if (self.kernel32_lib_dir == null and target.isWindows() and abi == .msvc) {
log.err("kernel32_lib_dir may not be empty for {s}-{s}\n", .{
- @tagName(Target.current.os.tag),
- @tagName(Target.current.abi),
+ @tagName(os_tag),
+ @tagName(abi),
});
return error.ParseError;
}
- if (self.gcc_dir == null and is_haiku) {
- log.err("gcc_dir may not be empty for {s}\n", .{@tagName(Target.current.os.tag)});
+
+ if (self.gcc_dir == null and os_tag == .haiku) {
+ log.err("gcc_dir may not be empty for {s}\n", .{@tagName(os_tag)});
return error.ParseError;
}
@@ -183,9 +189,9 @@ pub const LibCInstallation = struct {
if (!build_options.have_llvm)
return error.WindowsSdkNotFound;
var sdk: *ZigWindowsSDK = undefined;
- switch (zig_find_windows_sdk(&sdk)) {
+ switch (ZigWindowsSDK.find(&sdk)) {
.None => {
- defer zig_free_windows_sdk(sdk);
+ defer sdk.free();
var batch = Batch(FindError!void, 5, .auto_async).init();
batch.add(&async self.findNativeMsvcIncludeDir(args, sdk));
diff --git a/src/libunwind.zig b/src/libunwind.zig
index 0d70854213..192f9ac2d2 100644
--- a/src/libunwind.zig
+++ b/src/libunwind.zig
@@ -71,6 +71,8 @@ pub fn buildStaticLib(comp: *Compilation) !void {
try cflags.append("-Wa,--noexecstack");
try cflags.append("-fvisibility=hidden");
try cflags.append("-fvisibility-inlines-hidden");
+ // necessary so that libunwind can unwind through its own stack frames
+ try cflags.append("-funwind-tables");
// This is intentionally always defined because the macro definition means, should it only
// build for the target specified by compiler defines. Since we pass -target the compiler
diff --git a/src/link.zig b/src/link.zig
index 1293fab4d2..88159496f4 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -73,6 +73,10 @@ pub const Options = struct {
rdynamic: bool,
z_nodelete: bool,
z_defs: bool,
+ z_origin: bool,
+ z_noexecstack: bool,
+ z_now: bool,
+ z_relro: bool,
tsaware: bool,
nxcompat: bool,
dynamicbase: bool,
@@ -179,7 +183,7 @@ pub const File = struct {
/// This is where the .debug_info tag for the type is.
off: u32,
/// Offset from `TextBlock.dbg_info_off` (the buffer that is local to a Decl).
- /// List of DW.AT_type / DW.FORM_ref4 that points to the type.
+ /// List of DW.AT.type / DW.FORM.ref4 that points to the type.
relocs: std.ArrayListUnmanaged(u32),
};
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 9ddebd3453..f8cf70104f 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -772,48 +772,48 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
// These are LEB encoded but since the values are all less than 127
// we can simply append these bytes.
const abbrev_buf = [_]u8{
- abbrev_compile_unit, DW.TAG_compile_unit, DW.CHILDREN_yes, // header
- DW.AT_stmt_list, DW.FORM_sec_offset, DW.AT_low_pc,
- DW.FORM_addr, DW.AT_high_pc, DW.FORM_addr,
- DW.AT_name, DW.FORM_strp, DW.AT_comp_dir,
- DW.FORM_strp, DW.AT_producer, DW.FORM_strp,
- DW.AT_language, DW.FORM_data2, 0,
+ abbrev_compile_unit, DW.TAG.compile_unit, DW.CHILDREN.yes, // header
+ DW.AT.stmt_list, DW.FORM.sec_offset, DW.AT.low_pc,
+ DW.FORM.addr, DW.AT.high_pc, DW.FORM.addr,
+ DW.AT.name, DW.FORM.strp, DW.AT.comp_dir,
+ DW.FORM.strp, DW.AT.producer, DW.FORM.strp,
+ DW.AT.language, DW.FORM.data2, 0,
0, // table sentinel
abbrev_subprogram,
- DW.TAG_subprogram,
- DW.CHILDREN_yes, // header
- DW.AT_low_pc,
- DW.FORM_addr,
- DW.AT_high_pc,
- DW.FORM_data4,
- DW.AT_type,
- DW.FORM_ref4,
- DW.AT_name,
- DW.FORM_string,
+ DW.TAG.subprogram,
+ DW.CHILDREN.yes, // header
+ DW.AT.low_pc,
+ DW.FORM.addr,
+ DW.AT.high_pc,
+ DW.FORM.data4,
+ DW.AT.type,
+ DW.FORM.ref4,
+ DW.AT.name,
+ DW.FORM.string,
0, 0, // table sentinel
abbrev_subprogram_retvoid,
- DW.TAG_subprogram, DW.CHILDREN_yes, // header
- DW.AT_low_pc, DW.FORM_addr,
- DW.AT_high_pc, DW.FORM_data4,
- DW.AT_name, DW.FORM_string,
+ DW.TAG.subprogram, DW.CHILDREN.yes, // header
+ DW.AT.low_pc, DW.FORM.addr,
+ DW.AT.high_pc, DW.FORM.data4,
+ DW.AT.name, DW.FORM.string,
0,
0, // table sentinel
abbrev_base_type,
- DW.TAG_base_type,
- DW.CHILDREN_no, // header
- DW.AT_encoding,
- DW.FORM_data1,
- DW.AT_byte_size,
- DW.FORM_data1,
- DW.AT_name,
- DW.FORM_string, 0, 0, // table sentinel
- abbrev_pad1, DW.TAG_unspecified_type, DW.CHILDREN_no, // header
+ DW.TAG.base_type,
+ DW.CHILDREN.no, // header
+ DW.AT.encoding,
+ DW.FORM.data1,
+ DW.AT.byte_size,
+ DW.FORM.data1,
+ DW.AT.name,
+ DW.FORM.string, 0, 0, // table sentinel
+ abbrev_pad1, DW.TAG.unspecified_type, DW.CHILDREN.no, // header
0, 0, // table sentinel
abbrev_parameter,
- DW.TAG_formal_parameter, DW.CHILDREN_no, // header
- DW.AT_location, DW.FORM_exprloc,
- DW.AT_type, DW.FORM_ref4,
- DW.AT_name, DW.FORM_string,
+ DW.TAG.formal_parameter, DW.CHILDREN.no, // header
+ DW.AT.location, DW.FORM.exprloc,
+ DW.AT.type, DW.FORM.ref4,
+ DW.AT.name, DW.FORM.string,
0,
0, // table sentinel
0,
@@ -897,7 +897,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
const high_pc = text_phdr.p_vaddr + text_phdr.p_memsz;
di_buf.appendAssumeCapacity(abbrev_compile_unit);
- self.writeDwarfAddrAssumeCapacity(&di_buf, 0); // DW.AT_stmt_list, DW.FORM_sec_offset
+ self.writeDwarfAddrAssumeCapacity(&di_buf, 0); // DW.AT.stmt_list, DW.FORM.sec_offset
self.writeDwarfAddrAssumeCapacity(&di_buf, low_pc);
self.writeDwarfAddrAssumeCapacity(&di_buf, high_pc);
self.writeDwarfAddrAssumeCapacity(&di_buf, name_strp);
@@ -906,7 +906,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
// We are still waiting on dwarf-std.org to assign DW_LANG_Zig a number:
// http://dwarfstd.org/ShowIssue.php?issue=171115.1
// Until then we say it is C99.
- mem.writeInt(u16, di_buf.addManyAsArrayAssumeCapacity(2), DW.LANG_C99, target_endian);
+ mem.writeInt(u16, di_buf.addManyAsArrayAssumeCapacity(2), DW.LANG.C99, target_endian);
if (di_buf.items.len > first_dbg_info_decl.dbg_info_off) {
// Move the first N decls to the end to make more padding for the header.
@@ -1030,7 +1030,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
di_buf.items.len += ptr_width_bytes; // We will come back and write this.
const after_header_len = di_buf.items.len;
- const opcode_base = DW.LNS_set_isa + 1;
+ const opcode_base = DW.LNS.set_isa + 1;
di_buf.appendSliceAssumeCapacity(&[_]u8{
1, // minimum_instruction_length
1, // maximum_operations_per_instruction
@@ -1041,18 +1041,18 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
// Standard opcode lengths. The number of items here is based on `opcode_base`.
// The value is the number of LEB128 operands the instruction takes.
- 0, // `DW.LNS_copy`
- 1, // `DW.LNS_advance_pc`
- 1, // `DW.LNS_advance_line`
- 1, // `DW.LNS_set_file`
- 1, // `DW.LNS_set_column`
- 0, // `DW.LNS_negate_stmt`
- 0, // `DW.LNS_set_basic_block`
- 0, // `DW.LNS_const_add_pc`
- 1, // `DW.LNS_fixed_advance_pc`
- 0, // `DW.LNS_set_prologue_end`
- 0, // `DW.LNS_set_epilogue_begin`
- 1, // `DW.LNS_set_isa`
+ 0, // `DW.LNS.copy`
+ 1, // `DW.LNS.advance_pc`
+ 1, // `DW.LNS.advance_line`
+ 1, // `DW.LNS.set_file`
+ 1, // `DW.LNS.set_column`
+ 0, // `DW.LNS.negate_stmt`
+ 0, // `DW.LNS.set_basic_block`
+ 0, // `DW.LNS.const_add_pc`
+ 1, // `DW.LNS.fixed_advance_pc`
+ 0, // `DW.LNS.set_prologue_end`
+ 0, // `DW.LNS.set_epilogue_begin`
+ 1, // `DW.LNS.set_isa`
0, // include_directories (none except the compilation unit cwd)
});
// file_names[0]
@@ -1345,6 +1345,10 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
man.hash.add(self.base.options.skip_linker_dependencies);
man.hash.add(self.base.options.z_nodelete);
man.hash.add(self.base.options.z_defs);
+ man.hash.add(self.base.options.z_origin);
+ man.hash.add(self.base.options.z_noexecstack);
+ man.hash.add(self.base.options.z_now);
+ man.hash.add(self.base.options.z_relro);
if (self.base.options.link_libc) {
man.hash.add(self.base.options.libc_installation != null);
if (self.base.options.libc_installation) |libc_installation| {
@@ -1482,6 +1486,22 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
try argv.append("-z");
try argv.append("defs");
}
+ if (self.base.options.z_origin) {
+ try argv.append("-z");
+ try argv.append("origin");
+ }
+ if (self.base.options.z_noexecstack) {
+ try argv.append("-z");
+ try argv.append("noexecstack");
+ }
+ if (self.base.options.z_now) {
+ try argv.append("-z");
+ try argv.append("now");
+ }
+ if (self.base.options.z_relro) {
+ try argv.append("-z");
+ try argv.append("relro");
+ }
if (getLDMOption(target)) |ldm| {
// Any target ELF will use the freebsd osabi if suffixed with "_fbsd".
@@ -2053,7 +2073,7 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
// The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
// range of the compilation unit. When we expand the text section, this range changes,
- // so the DW_TAG_compile_unit tag of the .debug_info section becomes dirty.
+ // so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
self.debug_info_header_dirty = true;
// This becomes dirty for the same reason. We could potentially make this more
// fine-grained with the addition of support for more compilation units. It is planned to
@@ -2303,22 +2323,22 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
const ptr_width_bytes = self.ptrWidthBytes();
dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{
- DW.LNS_extended_op,
+ DW.LNS.extended_op,
ptr_width_bytes + 1,
- DW.LNE_set_address,
+ DW.LNE.set_address,
});
// This is the "relocatable" vaddr, corresponding to `code_buffer` index `0`.
assert(dbg_line_vaddr_reloc_index == dbg_line_buffer.items.len);
dbg_line_buffer.items.len += ptr_width_bytes;
- dbg_line_buffer.appendAssumeCapacity(DW.LNS_advance_line);
+ dbg_line_buffer.appendAssumeCapacity(DW.LNS.advance_line);
// This is the "relocatable" relative line offset from the previous function's end curly
// to this function's begin curly.
assert(self.getRelocDbgLineOff() == dbg_line_buffer.items.len);
// Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later.
leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line_off);
- dbg_line_buffer.appendAssumeCapacity(DW.LNS_set_file);
+ dbg_line_buffer.appendAssumeCapacity(DW.LNS.set_file);
assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len);
// Once we support more than one source file, this will have the ability to be more
// than one possible value.
@@ -2327,7 +2347,7 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
// Emit a line for the begin curly with prologue_end=false. The codegen will
// do the work of setting prologue_end=true and epilogue_begin=true.
- dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy);
+ dbg_line_buffer.appendAssumeCapacity(DW.LNS.copy);
// .debug_info subprogram
const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1];
@@ -2344,9 +2364,9 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
// "relocations" and have to be in this fixed place so that functions can be
// moved in virtual address space.
assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len);
- dbg_info_buffer.items.len += ptr_width_bytes; // DW.AT_low_pc, DW.FORM_addr
+ dbg_info_buffer.items.len += ptr_width_bytes; // DW.AT.low_pc, DW.FORM.addr
assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len);
- dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4
+ dbg_info_buffer.items.len += 4; // DW.AT.high_pc, DW.FORM.data4
if (fn_ret_has_bits) {
const gop = try dbg_info_type_relocs.getOrPut(self.base.allocator, fn_ret_type);
if (!gop.found_existing) {
@@ -2356,9 +2376,9 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
};
}
try gop.value_ptr.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len));
- dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4
+ dbg_info_buffer.items.len += 4; // DW.AT.type, DW.FORM.ref4
}
- dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string
+ dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT.name, DW.FORM.string
const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
.dwarf = .{
@@ -2409,7 +2429,7 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_size), target_endian);
}
- try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS_extended_op, 1, DW.LNE_end_sequence });
+ try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS.extended_op, 1, DW.LNE.end_sequence });
// Now we have the full contents and may allocate a region to store it.
@@ -2493,7 +2513,7 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
const file_pos = debug_line_sect.sh_offset + src_fn.off;
try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos);
- // .debug_info - End the TAG_subprogram children.
+ // .debug_info - End the TAG.subprogram children.
try dbg_info_buffer.append(0);
return self.finishUpdateDecl(module, decl, &dbg_info_type_relocs, &dbg_info_buffer);
@@ -2566,34 +2586,34 @@ fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !vo
.Bool => {
try dbg_info_buffer.appendSlice(&[_]u8{
abbrev_base_type,
- DW.ATE_boolean, // DW.AT_encoding , DW.FORM_data1
- 1, // DW.AT_byte_size, DW.FORM_data1
- 'b', 'o', 'o', 'l', 0, // DW.AT_name, DW.FORM_string
+ DW.ATE.boolean, // DW.AT.encoding , DW.FORM.data1
+ 1, // DW.AT.byte_size, DW.FORM.data1
+ 'b', 'o', 'o', 'l', 0, // DW.AT.name, DW.FORM.string
});
},
.Int => {
const info = ty.intInfo(self.base.options.target);
try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 12);
dbg_info_buffer.appendAssumeCapacity(abbrev_base_type);
- // DW.AT_encoding, DW.FORM_data1
+ // DW.AT.encoding, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(switch (info.signedness) {
- .signed => DW.ATE_signed,
- .unsigned => DW.ATE_unsigned,
+ .signed => DW.ATE.signed,
+ .unsigned => DW.ATE.unsigned,
});
- // DW.AT_byte_size, DW.FORM_data1
+ // DW.AT.byte_size, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(@intCast(u8, ty.abiSize(self.base.options.target)));
- // DW.AT_name, DW.FORM_string
+ // DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty});
},
.Optional => {
if (ty.isPtrLikeOptional()) {
try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 12);
dbg_info_buffer.appendAssumeCapacity(abbrev_base_type);
- // DW.AT_encoding, DW.FORM_data1
- dbg_info_buffer.appendAssumeCapacity(DW.ATE_address);
- // DW.AT_byte_size, DW.FORM_data1
+ // DW.AT.encoding, DW.FORM.data1
+ dbg_info_buffer.appendAssumeCapacity(DW.ATE.address);
+ // DW.AT.byte_size, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(@intCast(u8, ty.abiSize(self.base.options.target)));
- // DW.AT_name, DW.FORM_string
+ // DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty});
} else {
log.err("TODO implement .debug_info for type '{}'", .{ty});
@@ -3034,7 +3054,7 @@ fn archPtrWidthBytes(self: Elf) u8 {
/// The reloc offset for the virtual address of a function in its Line Number Program.
/// Size is a virtual address integer.
const dbg_line_vaddr_reloc_index = 3;
-/// The reloc offset for the virtual address of a function in its .debug_info TAG_subprogram.
+/// The reloc offset for the virtual address of a function in its .debug_info TAG.subprogram.
/// Size is a virtual address integer.
const dbg_info_low_pc_reloc_index = 1;
@@ -3060,7 +3080,7 @@ fn dbgLineNeededHeaderBytes(self: Elf) u32 {
const root_src_dir_path_len = if (self.base.options.module.?.root_pkg.root_src_directory.path) |p| p.len else 1; // "."
return @intCast(u32, 53 + directory_entry_format_count * 2 + file_name_entry_format_count * 2 +
directory_count * 8 + file_name_count * 8 +
- // These are encoded as DW.FORM_string rather than DW.FORM_strp as we would like
+ // These are encoded as DW.FORM.string rather than DW.FORM.strp as we would like
// because of a workaround for readelf and gdb failing to understand DWARFv5 correctly.
root_src_dir_path_len +
self.base.options.module.?.root_pkg.root_src_path.len);
@@ -3088,8 +3108,8 @@ fn pwriteDbgLineNops(
const tracy = trace(@src());
defer tracy.end();
- const page_of_nops = [1]u8{DW.LNS_negate_stmt} ** 4096;
- const three_byte_nop = [3]u8{ DW.LNS_advance_pc, 0b1000_0000, 0 };
+ const page_of_nops = [1]u8{DW.LNS.negate_stmt} ** 4096;
+ const three_byte_nop = [3]u8{ DW.LNS.advance_pc, 0b1000_0000, 0 };
var vecs: [512]std.os.iovec_const = undefined;
var vec_index: usize = 0;
{
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index be5bc230de..ce00c85dea 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -38,6 +38,8 @@ const LlvmObject = @import("../codegen/llvm.zig").Object;
const LoadCommand = commands.LoadCommand;
const Module = @import("../Module.zig");
const SegmentCommand = commands.SegmentCommand;
+const StringIndexAdapter = std.hash_map.StringIndexAdapter;
+const StringIndexContext = std.hash_map.StringIndexContext;
const Trie = @import("MachO/Trie.zig");
pub const TextBlock = Atom;
@@ -157,7 +159,7 @@ dyld_private_atom: ?*Atom = null,
stub_helper_preamble_atom: ?*Atom = null,
strtab: std.ArrayListUnmanaged(u8) = .{},
-strtab_dir: std.HashMapUnmanaged(u32, u32, StringIndexContext, std.hash_map.default_max_load_percentage) = .{},
+strtab_dir: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.default_max_load_percentage) = .{},
got_entries_map: std.AutoArrayHashMapUnmanaged(GotIndirectionKey, *Atom) = .{},
stubs_map: std.AutoArrayHashMapUnmanaged(u32, *Atom) = .{},
@@ -221,33 +223,6 @@ const PendingUpdate = union(enum) {
add_got_entry: u32,
};
-const StringIndexContext = struct {
- strtab: *std.ArrayListUnmanaged(u8),
-
- pub fn eql(_: StringIndexContext, a: u32, b: u32) bool {
- return a == b;
- }
-
- pub fn hash(self: StringIndexContext, x: u32) u64 {
- const x_slice = mem.spanZ(@ptrCast([*:0]const u8, self.strtab.items.ptr) + x);
- return std.hash_map.hashString(x_slice);
- }
-};
-
-pub const StringSliceAdapter = struct {
- strtab: *std.ArrayListUnmanaged(u8),
-
- pub fn eql(self: StringSliceAdapter, a_slice: []const u8, b: u32) bool {
- const b_slice = mem.spanZ(@ptrCast([*:0]const u8, self.strtab.items.ptr) + b);
- return mem.eql(u8, a_slice, b_slice);
- }
-
- pub fn hash(self: StringSliceAdapter, adapted_key: []const u8) u64 {
- _ = self;
- return std.hash_map.hashString(adapted_key);
- }
-};
-
const SymbolWithLoc = struct {
// Table where the symbol can be found.
where: enum {
@@ -930,7 +905,19 @@ fn resolveSearchDir(
if (fs.path.isAbsolute(dir)) {
if (syslibroot) |root| {
- const full_path = try fs.path.join(arena, &[_][]const u8{ root, dir });
+ const common_dir = if (std.Target.current.os.tag == .windows) blk: {
+ // We need to check for disk designator and strip it out from dir path so
+ // that we can concat dir with syslibroot.
+ // TODO we should backport this mechanism to 'MachO.Dylib.parseDependentLibs()'
+ const disk_designator = fs.path.diskDesignatorWindows(dir);
+
+ if (mem.indexOf(u8, dir, disk_designator)) |where| {
+ break :blk dir[where + disk_designator.len ..];
+ }
+
+ break :blk dir;
+ } else dir;
+ const full_path = try fs.path.join(arena, &[_][]const u8{ root, common_dir });
try candidates.append(full_path);
}
}
@@ -2234,8 +2221,8 @@ fn createTentativeDefAtoms(self: *MachO) !void {
}
fn createDsoHandleAtom(self: *MachO) !void {
- if (self.strtab_dir.getAdapted(@as([]const u8, "___dso_handle"), StringSliceAdapter{
- .strtab = &self.strtab,
+ if (self.strtab_dir.getKeyAdapted(@as([]const u8, "___dso_handle"), StringIndexAdapter{
+ .bytes = &self.strtab,
})) |n_strx| blk: {
const resolv = self.symbol_resolver.getPtr(n_strx) orelse break :blk;
if (resolv.where != .undef) break :blk;
@@ -2822,8 +2809,8 @@ fn setEntryPoint(self: *MachO) !void {
// TODO we should respect the -entry flag passed in by the user to set a custom
// entrypoint. For now, assume default of `_main`.
const seg = self.load_commands.items[self.text_segment_cmd_index.?].Segment;
- const n_strx = self.strtab_dir.getAdapted(@as([]const u8, "_main"), StringSliceAdapter{
- .strtab = &self.strtab,
+ const n_strx = self.strtab_dir.getKeyAdapted(@as([]const u8, "_main"), StringIndexAdapter{
+ .bytes = &self.strtab,
}) orelse {
log.err("'_main' export not found", .{});
return error.MissingMainEntrypoint;
@@ -4245,8 +4232,8 @@ pub fn addExternFn(self: *MachO, name: []const u8) !u32 {
const sym_name = try std.fmt.allocPrint(self.base.allocator, "_{s}", .{name});
defer self.base.allocator.free(sym_name);
- if (self.strtab_dir.getAdapted(@as([]const u8, sym_name), StringSliceAdapter{
- .strtab = &self.strtab,
+ if (self.strtab_dir.getKeyAdapted(@as([]const u8, sym_name), StringIndexAdapter{
+ .bytes = &self.strtab,
})) |n_strx| {
const resolv = self.symbol_resolver.get(n_strx) orelse unreachable;
return resolv.where_index;
@@ -4937,7 +4924,13 @@ pub fn makeStaticString(bytes: []const u8) [16]u8 {
}
pub fn makeString(self: *MachO, string: []const u8) !u32 {
- if (self.strtab_dir.getAdapted(@as([]const u8, string), StringSliceAdapter{ .strtab = &self.strtab })) |off| {
+ const gop = try self.strtab_dir.getOrPutContextAdapted(self.base.allocator, @as([]const u8, string), StringIndexAdapter{
+ .bytes = &self.strtab,
+ }, StringIndexContext{
+ .bytes = &self.strtab,
+ });
+ if (gop.found_existing) {
+ const off = gop.key_ptr.*;
log.debug("reusing string '{s}' at offset 0x{x}", .{ string, off });
return off;
}
@@ -4950,9 +4943,7 @@ pub fn makeString(self: *MachO, string: []const u8) !u32 {
self.strtab.appendSliceAssumeCapacity(string);
self.strtab.appendAssumeCapacity(0);
- try self.strtab_dir.putContext(self.base.allocator, new_off, new_off, StringIndexContext{
- .strtab = &self.strtab,
- });
+ gop.key_ptr.* = new_off;
return new_off;
}
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index 673ebf5cb0..298855934e 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -16,6 +16,7 @@ const Allocator = mem.Allocator;
const Arch = std.Target.Cpu.Arch;
const MachO = @import("../MachO.zig");
const Object = @import("Object.zig");
+const StringIndexAdapter = std.hash_map.StringIndexAdapter;
/// Each decl always gets a local symbol with the fully qualified name.
/// The vaddr and size are found here directly.
@@ -692,8 +693,8 @@ fn initRelocFromObject(rel: macho.relocation_info, context: RelocContext) !Reloc
parsed_rel.where = .local;
parsed_rel.where_index = where_index;
} else {
- const n_strx = context.macho_file.strtab_dir.getAdapted(@as([]const u8, sym_name), MachO.StringSliceAdapter{
- .strtab = &context.macho_file.strtab,
+ const n_strx = context.macho_file.strtab_dir.getKeyAdapted(@as([]const u8, sym_name), StringIndexAdapter{
+ .bytes = &context.macho_file.strtab,
}) orelse unreachable;
const resolv = context.macho_file.symbol_resolver.get(n_strx) orelse unreachable;
switch (resolv.where) {
@@ -756,8 +757,8 @@ pub fn parseRelocs(self: *Atom, relocs: []macho.relocation_info, context: RelocC
const where_index = context.object.symbol_mapping.get(rel.r_symbolnum) orelse unreachable;
subtractor = where_index;
} else {
- const n_strx = context.macho_file.strtab_dir.getAdapted(@as([]const u8, sym_name), MachO.StringSliceAdapter{
- .strtab = &context.macho_file.strtab,
+ const n_strx = context.macho_file.strtab_dir.getKeyAdapted(@as([]const u8, sym_name), StringIndexAdapter{
+ .bytes = &context.macho_file.strtab,
}) orelse unreachable;
const resolv = context.macho_file.symbol_resolver.get(n_strx) orelse unreachable;
assert(resolv.where == .global);
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index 450d842134..a8c0138f60 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -96,7 +96,7 @@ const abbrev_parameter = 6;
/// The reloc offset for the virtual address of a function in its Line Number Program.
/// Size is a virtual address integer.
const dbg_line_vaddr_reloc_index = 3;
-/// The reloc offset for the virtual address of a function in its .debug_info TAG_subprogram.
+/// The reloc offset for the virtual address of a function in its .debug_info TAG.subprogram.
/// Size is a virtual address integer.
const dbg_info_low_pc_reloc_index = 1;
@@ -281,40 +281,40 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt
// These are LEB encoded but since the values are all less than 127
// we can simply append these bytes.
const abbrev_buf = [_]u8{
- abbrev_compile_unit, DW.TAG_compile_unit, DW.CHILDREN_yes, // header
- DW.AT_stmt_list, DW.FORM_sec_offset, // offset
- DW.AT_low_pc, DW.FORM_addr,
- DW.AT_high_pc, DW.FORM_addr,
- DW.AT_name, DW.FORM_strp,
- DW.AT_comp_dir, DW.FORM_strp,
- DW.AT_producer, DW.FORM_strp,
- DW.AT_language, DW.FORM_data2,
+ abbrev_compile_unit, DW.TAG.compile_unit, DW.CHILDREN.yes, // header
+ DW.AT.stmt_list, DW.FORM.sec_offset, // offset
+ DW.AT.low_pc, DW.FORM.addr,
+ DW.AT.high_pc, DW.FORM.addr,
+ DW.AT.name, DW.FORM.strp,
+ DW.AT.comp_dir, DW.FORM.strp,
+ DW.AT.producer, DW.FORM.strp,
+ DW.AT.language, DW.FORM.data2,
0, 0, // table sentinel
- abbrev_subprogram, DW.TAG_subprogram, DW.CHILDREN_yes, // header
- DW.AT_low_pc, DW.FORM_addr, // start VM address
- DW.AT_high_pc, DW.FORM_data4,
- DW.AT_type, DW.FORM_ref4,
- DW.AT_name, DW.FORM_string,
- DW.AT_decl_line, DW.FORM_data4,
- DW.AT_decl_file, DW.FORM_data1,
+ abbrev_subprogram, DW.TAG.subprogram, DW.CHILDREN.yes, // header
+ DW.AT.low_pc, DW.FORM.addr, // start VM address
+ DW.AT.high_pc, DW.FORM.data4,
+ DW.AT.type, DW.FORM.ref4,
+ DW.AT.name, DW.FORM.string,
+ DW.AT.decl_line, DW.FORM.data4,
+ DW.AT.decl_file, DW.FORM.data1,
0, 0, // table sentinel
abbrev_subprogram_retvoid,
- DW.TAG_subprogram, DW.CHILDREN_yes, // header
- DW.AT_low_pc, DW.FORM_addr,
- DW.AT_high_pc, DW.FORM_data4,
- DW.AT_name, DW.FORM_string,
- DW.AT_decl_line, DW.FORM_data4,
- DW.AT_decl_file, DW.FORM_data1,
+ DW.TAG.subprogram, DW.CHILDREN.yes, // header
+ DW.AT.low_pc, DW.FORM.addr,
+ DW.AT.high_pc, DW.FORM.data4,
+ DW.AT.name, DW.FORM.string,
+ DW.AT.decl_line, DW.FORM.data4,
+ DW.AT.decl_file, DW.FORM.data1,
0, 0, // table sentinel
- abbrev_base_type, DW.TAG_base_type, DW.CHILDREN_no, // header
- DW.AT_encoding, DW.FORM_data1, DW.AT_byte_size,
- DW.FORM_data1, DW.AT_name, DW.FORM_string,
+ abbrev_base_type, DW.TAG.base_type, DW.CHILDREN.no, // header
+ DW.AT.encoding, DW.FORM.data1, DW.AT.byte_size,
+ DW.FORM.data1, DW.AT.name, DW.FORM.string,
0, 0, // table sentinel
- abbrev_pad1, DW.TAG_unspecified_type, DW.CHILDREN_no, // header
+ abbrev_pad1, DW.TAG.unspecified_type, DW.CHILDREN.no, // header
0, 0, // table sentinel
- abbrev_parameter, DW.TAG_formal_parameter, DW.CHILDREN_no, // header
- DW.AT_location, DW.FORM_exprloc, DW.AT_type,
- DW.FORM_ref4, DW.AT_name, DW.FORM_string,
+ abbrev_parameter, DW.TAG.formal_parameter, DW.CHILDREN.no, // header
+ DW.AT.location, DW.FORM.exprloc, DW.AT.type,
+ DW.FORM.ref4, DW.AT.name, DW.FORM.string,
0, 0, // table sentinel
0, 0, 0, // section sentinel
};
@@ -379,7 +379,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt
const high_pc = text_section.addr + text_section.size;
di_buf.appendAssumeCapacity(abbrev_compile_unit);
- mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), 0); // DW.AT_stmt_list, DW.FORM_sec_offset
+ mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), 0); // DW.AT.stmt_list, DW.FORM.sec_offset
mem.writeIntLittle(u64, di_buf.addManyAsArrayAssumeCapacity(8), low_pc);
mem.writeIntLittle(u64, di_buf.addManyAsArrayAssumeCapacity(8), high_pc);
mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, name_strp));
@@ -388,7 +388,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt
// We are still waiting on dwarf-std.org to assign DW_LANG_Zig a number:
// http://dwarfstd.org/ShowIssue.php?issue=171115.1
// Until then we say it is C99.
- mem.writeIntLittle(u16, di_buf.addManyAsArrayAssumeCapacity(2), DW.LANG_C99);
+ mem.writeIntLittle(u16, di_buf.addManyAsArrayAssumeCapacity(2), DW.LANG.C99);
if (di_buf.items.len > first_dbg_info_decl.dbg_info_off) {
// Move the first N decls to the end to make more padding for the header.
@@ -496,7 +496,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt
di_buf.items.len += @sizeOf(u32); // We will come back and write this.
const after_header_len = di_buf.items.len;
- const opcode_base = DW.LNS_set_isa + 1;
+ const opcode_base = DW.LNS.set_isa + 1;
di_buf.appendSliceAssumeCapacity(&[_]u8{
1, // minimum_instruction_length
1, // maximum_operations_per_instruction
@@ -507,18 +507,18 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt
// Standard opcode lengths. The number of items here is based on `opcode_base`.
// The value is the number of LEB128 operands the instruction takes.
- 0, // `DW.LNS_copy`
- 1, // `DW.LNS_advance_pc`
- 1, // `DW.LNS_advance_line`
- 1, // `DW.LNS_set_file`
- 1, // `DW.LNS_set_column`
- 0, // `DW.LNS_negate_stmt`
- 0, // `DW.LNS_set_basic_block`
- 0, // `DW.LNS_const_add_pc`
- 1, // `DW.LNS_fixed_advance_pc`
- 0, // `DW.LNS_set_prologue_end`
- 0, // `DW.LNS_set_epilogue_begin`
- 1, // `DW.LNS_set_isa`
+ 0, // `DW.LNS.copy`
+ 1, // `DW.LNS.advance_pc`
+ 1, // `DW.LNS.advance_line`
+ 1, // `DW.LNS.set_file`
+ 1, // `DW.LNS.set_column`
+ 0, // `DW.LNS.negate_stmt`
+ 0, // `DW.LNS.set_basic_block`
+ 0, // `DW.LNS.const_add_pc`
+ 1, // `DW.LNS.fixed_advance_pc`
+ 0, // `DW.LNS.set_prologue_end`
+ 0, // `DW.LNS.set_epilogue_begin`
+ 1, // `DW.LNS.set_isa`
0, // include_directories (none except the compilation unit cwd)
});
// file_names[0]
@@ -861,22 +861,22 @@ pub fn initDeclDebugBuffers(
const line_off = @intCast(u28, decl.src_line + func.lbrace_line);
dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{
- DW.LNS_extended_op,
+ DW.LNS.extended_op,
@sizeOf(u64) + 1,
- DW.LNE_set_address,
+ DW.LNE.set_address,
});
// This is the "relocatable" vaddr, corresponding to `code_buffer` index `0`.
assert(dbg_line_vaddr_reloc_index == dbg_line_buffer.items.len);
dbg_line_buffer.items.len += @sizeOf(u64);
- dbg_line_buffer.appendAssumeCapacity(DW.LNS_advance_line);
+ dbg_line_buffer.appendAssumeCapacity(DW.LNS.advance_line);
// This is the "relocatable" relative line offset from the previous function's end curly
// to this function's begin curly.
assert(getRelocDbgLineOff() == dbg_line_buffer.items.len);
// Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later.
leb.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line_off);
- dbg_line_buffer.appendAssumeCapacity(DW.LNS_set_file);
+ dbg_line_buffer.appendAssumeCapacity(DW.LNS.set_file);
assert(getRelocDbgFileIndex() == dbg_line_buffer.items.len);
// Once we support more than one source file, this will have the ability to be more
// than one possible value.
@@ -885,7 +885,7 @@ pub fn initDeclDebugBuffers(
// Emit a line for the begin curly with prologue_end=false. The codegen will
// do the work of setting prologue_end=true and epilogue_begin=true.
- dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy);
+ dbg_line_buffer.appendAssumeCapacity(DW.LNS.copy);
// .debug_info subprogram
const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1];
@@ -902,9 +902,9 @@ pub fn initDeclDebugBuffers(
// "relocations" and have to be in this fixed place so that functions can be
// moved in virtual address space.
assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len);
- dbg_info_buffer.items.len += @sizeOf(u64); // DW.AT_low_pc, DW.FORM_addr
+ dbg_info_buffer.items.len += @sizeOf(u64); // DW.AT.low_pc, DW.FORM.addr
assert(getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len);
- dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4
+ dbg_info_buffer.items.len += 4; // DW.AT.high_pc, DW.FORM.data4
if (fn_ret_has_bits) {
const gop = try dbg_info_type_relocs.getOrPut(allocator, fn_ret_type);
if (!gop.found_existing) {
@@ -914,11 +914,11 @@ pub fn initDeclDebugBuffers(
};
}
try gop.value_ptr.relocs.append(allocator, @intCast(u32, dbg_info_buffer.items.len));
- dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4
+ dbg_info_buffer.items.len += 4; // DW.AT.type, DW.FORM.ref4
}
- dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string
- mem.writeIntLittle(u32, dbg_info_buffer.addManyAsArrayAssumeCapacity(4), line_off + 1); // DW.AT_decl_line, DW.FORM_data4
- dbg_info_buffer.appendAssumeCapacity(file_index); // DW.AT_decl_file, DW.FORM_data1
+ dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT.name, DW.FORM.string
+ mem.writeIntLittle(u32, dbg_info_buffer.addManyAsArrayAssumeCapacity(4), line_off + 1); // DW.AT.decl_line, DW.FORM.data4
+ dbg_info_buffer.appendAssumeCapacity(file_index); // DW.AT.decl_file, DW.FORM.data1
},
else => {
// TODO implement .debug_info for global variables
@@ -970,16 +970,16 @@ pub fn commitDeclDebugInfo(
{
// Advance line and PC.
// TODO encapsulate logic in a helper function.
- try dbg_line_buffer.append(DW.LNS_advance_pc);
+ try dbg_line_buffer.append(DW.LNS.advance_pc);
try leb.writeULEB128(dbg_line_buffer.writer(), text_block.size);
- try dbg_line_buffer.append(DW.LNS_advance_line);
+ try dbg_line_buffer.append(DW.LNS.advance_line);
const func = decl.val.castTag(.function).?.data;
const line_off = @intCast(u28, func.rbrace_line - func.lbrace_line);
try leb.writeULEB128(dbg_line_buffer.writer(), line_off);
}
- try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS_extended_op, 1, DW.LNE_end_sequence });
+ try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS.extended_op, 1, DW.LNE.end_sequence });
// Now we have the full contents and may allocate a region to store it.
@@ -1060,7 +1060,7 @@ pub fn commitDeclDebugInfo(
const file_pos = debug_line_sect.offset + src_fn.off;
try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos);
- // .debug_info - End the TAG_subprogram children.
+ // .debug_info - End the TAG.subprogram children.
try dbg_info_buffer.append(0);
},
else => {},
@@ -1113,27 +1113,27 @@ fn addDbgInfoType(
.Bool => {
try dbg_info_buffer.appendSlice(&[_]u8{
abbrev_base_type,
- DW.ATE_boolean, // DW.AT_encoding , DW.FORM_data1
- 1, // DW.AT_byte_size, DW.FORM_data1
+ DW.ATE.boolean, // DW.AT.encoding , DW.FORM.data1
+ 1, // DW.AT.byte_size, DW.FORM.data1
'b',
'o',
'o',
'l',
- 0, // DW.AT_name, DW.FORM_string
+ 0, // DW.AT.name, DW.FORM.string
});
},
.Int => {
const info = ty.intInfo(target);
try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 12);
dbg_info_buffer.appendAssumeCapacity(abbrev_base_type);
- // DW.AT_encoding, DW.FORM_data1
+ // DW.AT.encoding, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(switch (info.signedness) {
- .signed => DW.ATE_signed,
- .unsigned => DW.ATE_unsigned,
+ .signed => DW.ATE.signed,
+ .unsigned => DW.ATE.unsigned,
});
- // DW.AT_byte_size, DW.FORM_data1
+ // DW.AT.byte_size, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(@intCast(u8, ty.abiSize(target)));
- // DW.AT_name, DW.FORM_string
+ // DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty});
},
else => {
@@ -1291,7 +1291,7 @@ fn dbgLineNeededHeaderBytes(self: DebugSymbols, module: *Module) u32 {
const root_src_dir_path_len = if (module.root_pkg.root_src_directory.path) |p| p.len else 1; // "."
return @intCast(u32, 53 + directory_entry_format_count * 2 + file_name_entry_format_count * 2 +
directory_count * 8 + file_name_count * 8 +
- // These are encoded as DW.FORM_string rather than DW.FORM_strp as we would like
+ // These are encoded as DW.FORM.string rather than DW.FORM.strp as we would like
// because of a workaround for readelf and gdb failing to understand DWARFv5 correctly.
root_src_dir_path_len +
module.root_pkg.root_src_path.len);
@@ -1317,8 +1317,8 @@ fn pwriteDbgLineNops(
const tracy = trace(@src());
defer tracy.end();
- const page_of_nops = [1]u8{DW.LNS_negate_stmt} ** 4096;
- const three_byte_nop = [3]u8{ DW.LNS_advance_pc, 0b1000_0000, 0 };
+ const page_of_nops = [1]u8{DW.LNS.negate_stmt} ** 4096;
+ const three_byte_nop = [3]u8{ DW.LNS.advance_pc, 0b1000_0000, 0 };
var vecs: [32]std.os.iovec_const = undefined;
var vec_index: usize = 0;
{
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index b558463cea..a7e312e6c7 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -823,8 +823,8 @@ pub fn parseDebugInfo(self: *Object, allocator: *Allocator) !void {
},
else => |e| return e,
};
- const name = try compile_unit.die.getAttrString(&debug_info.inner, dwarf.AT_name);
- const comp_dir = try compile_unit.die.getAttrString(&debug_info.inner, dwarf.AT_comp_dir);
+ const name = try compile_unit.die.getAttrString(&debug_info.inner, dwarf.AT.name);
+ const comp_dir = try compile_unit.die.getAttrString(&debug_info.inner, dwarf.AT.comp_dir);
self.debug_info = debug_info;
self.tu_name = try allocator.dupe(u8, name);
diff --git a/src/link/tapi/parse/test.zig b/src/link/tapi/parse/test.zig
index b96a71fe97..fc0bed5df6 100644
--- a/src/link/tapi/parse/test.zig
+++ b/src/link/tapi/parse/test.zig
@@ -1,8 +1,8 @@
const std = @import("std");
const mem = std.mem;
const testing = std.testing;
-
-usingnamespace @import("../parse.zig");
+const Tree = @import("../parse.zig").Tree;
+const Node = @import("../parse.zig").Node;
test "explicit doc" {
const source =
diff --git a/src/main.zig b/src/main.zig
index 44b3a0515c..6a76c9507f 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -6,7 +6,7 @@ const mem = std.mem;
const process = std.process;
const Allocator = mem.Allocator;
const ArrayList = std.ArrayList;
-const ast = std.zig.ast;
+const Ast = std.zig.Ast;
const warn = std.log.warn;
const Compilation = @import("Compilation.zig");
@@ -589,6 +589,10 @@ fn buildOutputType(
var linker_bind_global_refs_locally: ?bool = null;
var linker_z_nodelete = false;
var linker_z_defs = false;
+ var linker_z_origin = false;
+ var linker_z_noexecstack = false;
+ var linker_z_now = false;
+ var linker_z_relro = false;
var linker_tsaware = false;
var linker_nxcompat = false;
var linker_dynamicbase = false;
@@ -1393,6 +1397,14 @@ fn buildOutputType(
linker_z_nodelete = true;
} else if (mem.eql(u8, z_arg, "defs")) {
linker_z_defs = true;
+ } else if (mem.eql(u8, z_arg, "origin")) {
+ linker_z_origin = true;
+ } else if (mem.eql(u8, z_arg, "noexecstack")) {
+ linker_z_noexecstack = true;
+ } else if (mem.eql(u8, z_arg, "now")) {
+ linker_z_now = true;
+ } else if (mem.eql(u8, z_arg, "relro")) {
+ linker_z_relro = true;
} else {
warn("unsupported linker arg: -z {s}", .{z_arg});
}
@@ -1581,39 +1593,11 @@ fn buildOutputType(
}
};
- var diags: std.zig.CrossTarget.ParseOptions.Diagnostics = .{};
- const cross_target = std.zig.CrossTarget.parse(.{
+ const cross_target = try parseCrossTargetOrReportFatalError(arena, .{
.arch_os_abi = target_arch_os_abi,
.cpu_features = target_mcpu,
.dynamic_linker = target_dynamic_linker,
- .diagnostics = &diags,
- }) catch |err| switch (err) {
- error.UnknownCpuModel => {
- help: {
- var help_text = std.ArrayList(u8).init(arena);
- for (diags.arch.?.allCpuModels()) |cpu| {
- help_text.writer().print(" {s}\n", .{cpu.name}) catch break :help;
- }
- std.log.info("Available CPUs for architecture '{s}':\n{s}", .{
- @tagName(diags.arch.?), help_text.items,
- });
- }
- fatal("Unknown CPU: '{s}'", .{diags.cpu_name.?});
- },
- error.UnknownCpuFeature => {
- help: {
- var help_text = std.ArrayList(u8).init(arena);
- for (diags.arch.?.allFeaturesList()) |feature| {
- help_text.writer().print(" {s}: {s}\n", .{ feature.name, feature.description }) catch break :help;
- }
- std.log.info("Available CPU features for architecture '{s}':\n{s}", .{
- @tagName(diags.arch.?), help_text.items,
- });
- }
- fatal("Unknown CPU feature: '{s}'", .{diags.unknown_feature_name});
- },
- else => |e| return e,
- };
+ });
const target_info = try detectNativeTargetInfo(gpa, cross_target);
@@ -1960,7 +1944,7 @@ fn buildOutputType(
defer if (libc_installation) |*l| l.deinit(gpa);
if (libc_paths_file) |paths_file| {
- libc_installation = LibCInstallation.parse(gpa, paths_file) catch |err| {
+ libc_installation = LibCInstallation.parse(gpa, paths_file, cross_target) catch |err| {
fatal("unable to parse libc paths file at path {s}: {s}", .{ paths_file, @errorName(err) });
};
}
@@ -2077,6 +2061,10 @@ fn buildOutputType(
.linker_bind_global_refs_locally = linker_bind_global_refs_locally,
.linker_z_nodelete = linker_z_nodelete,
.linker_z_defs = linker_z_defs,
+ .linker_z_origin = linker_z_origin,
+ .linker_z_noexecstack = linker_z_noexecstack,
+ .linker_z_now = linker_z_now,
+ .linker_z_relro = linker_z_relro,
.linker_tsaware = linker_tsaware,
.linker_nxcompat = linker_nxcompat,
.linker_dynamicbase = linker_dynamicbase,
@@ -2273,6 +2261,43 @@ fn buildOutputType(
return cleanExit();
}
+fn parseCrossTargetOrReportFatalError(allocator: *Allocator, opts: std.zig.CrossTarget.ParseOptions) !std.zig.CrossTarget {
+ var opts_with_diags = opts;
+ var diags: std.zig.CrossTarget.ParseOptions.Diagnostics = .{};
+ if (opts_with_diags.diagnostics == null) {
+ opts_with_diags.diagnostics = &diags;
+ }
+ return std.zig.CrossTarget.parse(opts_with_diags) catch |err| switch (err) {
+ error.UnknownCpuModel => {
+ help: {
+ var help_text = std.ArrayList(u8).init(allocator);
+ defer help_text.deinit();
+ for (diags.arch.?.allCpuModels()) |cpu| {
+ help_text.writer().print(" {s}\n", .{cpu.name}) catch break :help;
+ }
+ std.log.info("Available CPUs for architecture '{s}':\n{s}", .{
+ @tagName(diags.arch.?), help_text.items,
+ });
+ }
+ fatal("Unknown CPU: '{s}'", .{diags.cpu_name.?});
+ },
+ error.UnknownCpuFeature => {
+ help: {
+ var help_text = std.ArrayList(u8).init(allocator);
+ defer help_text.deinit();
+ for (diags.arch.?.allFeaturesList()) |feature| {
+ help_text.writer().print(" {s}: {s}\n", .{ feature.name, feature.description }) catch break :help;
+ }
+ std.log.info("Available CPU features for architecture '{s}':\n{s}", .{
+ @tagName(diags.arch.?), help_text.items,
+ });
+ }
+ fatal("Unknown CPU feature: '{s}'", .{diags.unknown_feature_name});
+ },
+ else => |e| return e,
+ };
+}
+
fn runOrTest(
comp: *Compilation,
gpa: *Allocator,
@@ -2614,10 +2639,15 @@ pub const usage_libc =
\\
\\ Parse a libc installation text file and validate it.
\\
+ \\Options:
+ \\ -h, --help Print this help and exit
+ \\ -target [name] <arch><sub>-<os>-<abi> see the targets command
+ \\
;
pub fn cmdLibC(gpa: *Allocator, args: []const []const u8) !void {
var input_file: ?[]const u8 = null;
+ var target_arch_os_abi: []const u8 = "native";
{
var i: usize = 0;
while (i < args.len) : (i += 1) {
@@ -2627,6 +2657,10 @@ pub fn cmdLibC(gpa: *Allocator, args: []const []const u8) !void {
const stdout = io.getStdOut().writer();
try stdout.writeAll(usage_libc);
return cleanExit();
+ } else if (mem.eql(u8, arg, "-target")) {
+ if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg});
+ i += 1;
+ target_arch_os_abi = args[i];
} else {
fatal("unrecognized parameter: '{s}'", .{arg});
}
@@ -2637,12 +2671,21 @@ pub fn cmdLibC(gpa: *Allocator, args: []const []const u8) !void {
}
}
}
+
+ const cross_target = try parseCrossTargetOrReportFatalError(gpa, .{
+ .arch_os_abi = target_arch_os_abi,
+ });
+
if (input_file) |libc_file| {
- var libc = LibCInstallation.parse(gpa, libc_file) catch |err| {
+ var libc = LibCInstallation.parse(gpa, libc_file, cross_target) catch |err| {
fatal("unable to parse libc file at path {s}: {s}", .{ libc_file, @errorName(err) });
};
defer libc.deinit(gpa);
} else {
+ if (!cross_target.isNative()) {
+ fatal("unable to detect libc for non-native target", .{});
+ }
+
var libc = LibCInstallation.findNative(.{
.allocator = gpa,
.verbose = true,
@@ -3423,8 +3466,8 @@ fn fmtPathFile(
fn printErrMsgToStdErr(
gpa: *mem.Allocator,
arena: *mem.Allocator,
- parse_error: ast.Error,
- tree: ast.Tree,
+ parse_error: Ast.Error,
+ tree: Ast,
path: []const u8,
color: Color,
) !void {
@@ -3828,7 +3871,7 @@ fn parseCodeModel(arg: []const u8) std.builtin.CodeModel {
/// garbage collector to run concurrently to zig processes, and to allow multiple
/// zig processes to run concurrently with each other, without clobbering each other.
fn gimmeMoreOfThoseSweetSweetFileDescriptors() void {
- if (!@hasDecl(std.os, "rlimit")) return;
+ if (!@hasDecl(std.os.system, "rlimit")) return;
const posix = std.os;
var lim = posix.getrlimit(.NOFILE) catch return; // Oh well; we tried.
@@ -3836,7 +3879,7 @@ fn gimmeMoreOfThoseSweetSweetFileDescriptors() void {
// On Darwin, `NOFILE` is bounded by a hardcoded value `OPEN_MAX`.
// According to the man pages for setrlimit():
// setrlimit() now returns with errno set to EINVAL in places that historically succeeded.
- // It no longer accepts "rlim_cur = RLIM_INFINITY" for RLIM_NOFILE.
+ // It no longer accepts "rlim_cur = RLIM.INFINITY" for RLIM.NOFILE.
// Use "rlim_cur = min(OPEN_MAX, rlim_max)".
lim.max = std.math.min(std.os.darwin.OPEN_MAX, lim.max);
}
@@ -3846,7 +3889,7 @@ fn gimmeMoreOfThoseSweetSweetFileDescriptors() void {
var min: posix.rlim_t = lim.cur;
var max: posix.rlim_t = 1 << 20;
// But if there's a defined upper bound, don't search, just set it.
- if (lim.max != posix.RLIM_INFINITY) {
+ if (lim.max != posix.RLIM.INFINITY) {
min = lim.max;
max = lim.max;
}
@@ -4029,12 +4072,12 @@ pub fn cmdAstCheck(
}
{
- const token_bytes = @sizeOf(std.zig.ast.TokenList) +
- file.tree.tokens.len * (@sizeOf(std.zig.Token.Tag) + @sizeOf(std.zig.ast.ByteOffset));
- const tree_bytes = @sizeOf(std.zig.ast.Tree) + file.tree.nodes.len *
- (@sizeOf(std.zig.ast.Node.Tag) +
- @sizeOf(std.zig.ast.Node.Data) +
- @sizeOf(std.zig.ast.TokenIndex));
+ const token_bytes = @sizeOf(Ast.TokenList) +
+ file.tree.tokens.len * (@sizeOf(std.zig.Token.Tag) + @sizeOf(Ast.ByteOffset));
+ const tree_bytes = @sizeOf(Ast) + file.tree.nodes.len *
+ (@sizeOf(Ast.Node.Tag) +
+ @sizeOf(Ast.Node.Data) +
+ @sizeOf(Ast.TokenIndex));
const instruction_bytes = file.zir.instructions.len *
// Here we don't use @sizeOf(Zir.Inst.Data) because it would include
// the debug safety tag but we want to measure release size.
diff --git a/src/mingw.zig b/src/mingw.zig
index 529025c517..587f019270 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -1022,6 +1022,10 @@ const mingwex_arm32_src = [_][]const u8{
};
const mingwex_arm64_src = [_][]const u8{
+ "misc" ++ path.sep_str ++ "initenv.c",
+ "math" ++ path.sep_str ++ "arm-common" ++ path.sep_str ++ "log2.c",
+ "math" ++ path.sep_str ++ "arm-common" ++ path.sep_str ++ "pow.c",
+ "math" ++ path.sep_str ++ "arm-common" ++ path.sep_str ++ "scalbn.c",
"math" ++ path.sep_str ++ "arm64" ++ path.sep_str ++ "_chgsignl.S",
"math" ++ path.sep_str ++ "arm64" ++ path.sep_str ++ "rint.c",
"math" ++ path.sep_str ++ "arm64" ++ path.sep_str ++ "rintf.c",
diff --git a/src/print_air.zig b/src/print_air.zig
index 276158f720..0106d0a0f3 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -109,6 +109,7 @@ const Writer = struct {
.mul,
.mulwrap,
.div,
+ .rem,
.ptr_add,
.ptr_sub,
.bit_and,
diff --git a/src/stage1/all_types.hpp b/src/stage1/all_types.hpp
index dfebc66cfd..4004199eb6 100644
--- a/src/stage1/all_types.hpp
+++ b/src/stage1/all_types.hpp
@@ -1125,6 +1125,7 @@ struct AstNodeContainerInitExpr {
struct AstNodeIdentifier {
Buf *name;
+ bool is_at_syntax;
};
struct AstNodeEnumLiteral {
@@ -1801,6 +1802,10 @@ enum BuiltinFnId {
BuiltinFnIdReduce,
BuiltinFnIdMaximum,
BuiltinFnIdMinimum,
+ BuiltinFnIdSatAdd,
+ BuiltinFnIdSatSub,
+ BuiltinFnIdSatMul,
+ BuiltinFnIdSatShl,
};
struct BuiltinFnEntry {
@@ -2945,6 +2950,10 @@ enum IrBinOp {
IrBinOpArrayMult,
IrBinOpMaximum,
IrBinOpMinimum,
+ IrBinOpSatAdd,
+ IrBinOpSatSub,
+ IrBinOpSatMul,
+ IrBinOpSatShl,
};
struct Stage1ZirInstBinOp {
diff --git a/src/stage1/analyze.cpp b/src/stage1/analyze.cpp
index 402c86d86f..2eb609ef1a 100644
--- a/src/stage1/analyze.cpp
+++ b/src/stage1/analyze.cpp
@@ -3918,12 +3918,6 @@ static void add_top_level_decl(CodeGen *g, ScopeDecls *decls_scope, Tld *tld) {
add_error_note(g, msg, other_tld->source_node, buf_sprintf("previous definition here"));
return;
}
-
- ZigType *type;
- if (get_primitive_type(g, tld->name, &type) != ErrorPrimitiveTypeNotFound) {
- add_node_error(g, tld->source_node,
- buf_sprintf("declaration shadows primitive type '%s'", buf_ptr(tld->name)));
- }
}
}
@@ -4170,13 +4164,6 @@ ZigVar *add_variable(CodeGen *g, AstNode *source_node, Scope *parent_scope, Buf
variable_entry->var_type = g->builtin_types.entry_invalid;
} else {
variable_entry->align_bytes = get_abi_alignment(g, var_type);
-
- ZigType *type;
- if (get_primitive_type(g, name, &type) != ErrorPrimitiveTypeNotFound) {
- add_node_error(g, source_node,
- buf_sprintf("variable shadows primitive type '%s'", buf_ptr(name)));
- variable_entry->var_type = g->builtin_types.entry_invalid;
- }
}
Scope *child_scope;
diff --git a/src/stage1/astgen.cpp b/src/stage1/astgen.cpp
index 86c18abc1e..9e5d9da9ee 100644
--- a/src/stage1/astgen.cpp
+++ b/src/stage1/astgen.cpp
@@ -3194,13 +3194,6 @@ ZigVar *create_local_var(CodeGen *codegen, AstNode *node, Scope *parent_scope,
add_error_note(codegen, msg, existing_var->decl_node, buf_sprintf("previous declaration here"));
}
variable_entry->var_type = codegen->builtin_types.entry_invalid;
- } else {
- ZigType *type;
- if (get_primitive_type(codegen, name, &type) != ErrorPrimitiveTypeNotFound) {
- add_node_error(codegen, node,
- buf_sprintf("variable shadows primitive type '%s'", buf_ptr(name)));
- variable_entry->var_type = codegen->builtin_types.entry_invalid;
- }
}
}
} else {
@@ -3815,35 +3808,38 @@ static Stage1ZirInst *astgen_identifier(Stage1AstGen *ag, Scope *scope, AstNode
Error err;
assert(node->type == NodeTypeIdentifier);
- Buf *variable_name = node_identifier_buf(node);
-
- if (buf_eql_str(variable_name, "_")) {
- if (lval == LValAssign) {
- Stage1ZirInstConst *const_instruction = ir_build_instruction<Stage1ZirInstConst>(ag, scope, node);
- const_instruction->value = ag->codegen->pass1_arena->create<ZigValue>();
- const_instruction->value->type = get_pointer_to_type(ag->codegen,
- ag->codegen->builtin_types.entry_void, false);
- const_instruction->value->special = ConstValSpecialStatic;
- const_instruction->value->data.x_ptr.special = ConstPtrSpecialDiscard;
- return &const_instruction->base;
+ bool is_at_syntax;
+ Buf *variable_name = node_identifier_buf2(node, &is_at_syntax);
+
+ if (!is_at_syntax) {
+ if (buf_eql_str(variable_name, "_")) {
+ if (lval == LValAssign) {
+ Stage1ZirInstConst *const_instruction = ir_build_instruction<Stage1ZirInstConst>(ag, scope, node);
+ const_instruction->value = ag->codegen->pass1_arena->create<ZigValue>();
+ const_instruction->value->type = get_pointer_to_type(ag->codegen,
+ ag->codegen->builtin_types.entry_void, false);
+ const_instruction->value->special = ConstValSpecialStatic;
+ const_instruction->value->data.x_ptr.special = ConstPtrSpecialDiscard;
+ return &const_instruction->base;
+ }
}
- }
- ZigType *primitive_type;
- if ((err = get_primitive_type(ag->codegen, variable_name, &primitive_type))) {
- if (err == ErrorOverflow) {
- add_node_error(ag->codegen, node,
- buf_sprintf("primitive integer type '%s' exceeds maximum bit width of 65535",
- buf_ptr(variable_name)));
- return ag->codegen->invalid_inst_src;
- }
- assert(err == ErrorPrimitiveTypeNotFound);
- } else {
- Stage1ZirInst *value = ir_build_const_type(ag, scope, node, primitive_type);
- if (lval == LValPtr || lval == LValAssign) {
- return ir_build_ref_src(ag, scope, node, value);
+ ZigType *primitive_type;
+ if ((err = get_primitive_type(ag->codegen, variable_name, &primitive_type))) {
+ if (err == ErrorOverflow) {
+ add_node_error(ag->codegen, node,
+ buf_sprintf("primitive integer type '%s' exceeds maximum bit width of 65535",
+ buf_ptr(variable_name)));
+ return ag->codegen->invalid_inst_src;
+ }
+ assert(err == ErrorPrimitiveTypeNotFound);
} else {
- return ir_expr_wrap(ag, scope, value, result_loc);
+ Stage1ZirInst *value = ir_build_const_type(ag, scope, node, primitive_type);
+ if (lval == LValPtr || lval == LValAssign) {
+ return ir_build_ref_src(ag, scope, node, value);
+ } else {
+ return ir_expr_wrap(ag, scope, value, result_loc);
+ }
}
}
@@ -4708,6 +4704,66 @@ static Stage1ZirInst *astgen_builtin_fn_call(Stage1AstGen *ag, Scope *scope, Ast
Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpMaximum, arg0_value, arg1_value, true);
return ir_lval_wrap(ag, scope, bin_op, lval, result_loc);
}
+ case BuiltinFnIdSatAdd:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ Stage1ZirInst *arg0_value = astgen_node(ag, arg0_node, scope);
+ if (arg0_value == ag->codegen->invalid_inst_src)
+ return arg0_value;
+
+ AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
+ Stage1ZirInst *arg1_value = astgen_node(ag, arg1_node, scope);
+ if (arg1_value == ag->codegen->invalid_inst_src)
+ return arg1_value;
+
+ Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpSatAdd, arg0_value, arg1_value, true);
+ return ir_lval_wrap(ag, scope, bin_op, lval, result_loc);
+ }
+ case BuiltinFnIdSatSub:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ Stage1ZirInst *arg0_value = astgen_node(ag, arg0_node, scope);
+ if (arg0_value == ag->codegen->invalid_inst_src)
+ return arg0_value;
+
+ AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
+ Stage1ZirInst *arg1_value = astgen_node(ag, arg1_node, scope);
+ if (arg1_value == ag->codegen->invalid_inst_src)
+ return arg1_value;
+
+ Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpSatSub, arg0_value, arg1_value, true);
+ return ir_lval_wrap(ag, scope, bin_op, lval, result_loc);
+ }
+ case BuiltinFnIdSatMul:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ Stage1ZirInst *arg0_value = astgen_node(ag, arg0_node, scope);
+ if (arg0_value == ag->codegen->invalid_inst_src)
+ return arg0_value;
+
+ AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
+ Stage1ZirInst *arg1_value = astgen_node(ag, arg1_node, scope);
+ if (arg1_value == ag->codegen->invalid_inst_src)
+ return arg1_value;
+
+ Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpSatMul, arg0_value, arg1_value, true);
+ return ir_lval_wrap(ag, scope, bin_op, lval, result_loc);
+ }
+ case BuiltinFnIdSatShl:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ Stage1ZirInst *arg0_value = astgen_node(ag, arg0_node, scope);
+ if (arg0_value == ag->codegen->invalid_inst_src)
+ return arg0_value;
+
+ AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
+ Stage1ZirInst *arg1_value = astgen_node(ag, arg1_node, scope);
+ if (arg1_value == ag->codegen->invalid_inst_src)
+ return arg1_value;
+
+ Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpSatShl, arg0_value, arg1_value, true);
+ return ir_lval_wrap(ag, scope, bin_op, lval, result_loc);
+ }
case BuiltinFnIdMemcpy:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
diff --git a/src/stage1/bigint.cpp b/src/stage1/bigint.cpp
index 5c8efad698..3a7f10e699 100644
--- a/src/stage1/bigint.cpp
+++ b/src/stage1/bigint.cpp
@@ -468,6 +468,84 @@ void bigint_min(BigInt* dest, const BigInt *op1, const BigInt *op2) {
}
}
+/// clamps op within bit_count/signedness boundaries
+/// signed bounds are [-2^(bit_count-1)..2^(bit_count-1)-1]
+/// unsigned bounds are [0..2^bit_count-1]
+void bigint_clamp_by_bitcount(BigInt* dest, uint32_t bit_count, bool is_signed) {
+ // compute the number of bits required to store the value, and use that
+ // to decide whether to clamp the result
+ bool is_negative = dest->is_negative;
+ // to workaround the fact this bits_needed calculation would yield 65 or more for
+ // all negative numbers, set is_negative to false. this is a cheap way to find
+ // bits_needed(abs(dest)).
+ dest->is_negative = false;
+ // because we've set is_negative to false, we have to account for the extra bit here
+ // by adding 1 additional bit_needed when (is_negative && !is_signed).
+ size_t full_bits = dest->digit_count * 64;
+ size_t leading_zero_count = bigint_clz(dest, full_bits);
+ size_t bits_needed = full_bits - leading_zero_count + (is_negative && !is_signed);
+
+ bit_count -= is_signed;
+ if(bits_needed > bit_count) {
+ BigInt one;
+ bigint_init_unsigned(&one, 1);
+ BigInt bit_count_big;
+ bigint_init_unsigned(&bit_count_big, bit_count);
+
+ if(is_signed) {
+ if(is_negative) {
+ BigInt bound;
+ bigint_shl(&bound, &one, &bit_count_big);
+ bigint_deinit(dest);
+ *dest = bound;
+ } else {
+ BigInt bound;
+ bigint_shl(&bound, &one, &bit_count_big);
+ BigInt bound_sub_one;
+ bigint_sub(&bound_sub_one, &bound, &one);
+ bigint_deinit(&bound);
+ bigint_deinit(dest);
+ *dest = bound_sub_one;
+ }
+ } else {
+ if(is_negative) {
+ bigint_deinit(dest);
+ bigint_init_unsigned(dest, 0);
+ return; // skips setting is_negative which would be invalid
+ } else {
+ BigInt bound;
+ bigint_shl(&bound, &one, &bit_count_big);
+ BigInt bound_sub_one;
+ bigint_sub(&bound_sub_one, &bound, &one);
+ bigint_deinit(&bound);
+ bigint_deinit(dest);
+ *dest = bound_sub_one;
+ }
+ }
+ }
+ dest->is_negative = is_negative;
+}
+
+void bigint_add_sat(BigInt* dest, const BigInt *op1, const BigInt *op2, uint32_t bit_count, bool is_signed) {
+ bigint_add(dest, op1, op2);
+ bigint_clamp_by_bitcount(dest, bit_count, is_signed);
+}
+
+void bigint_sub_sat(BigInt* dest, const BigInt *op1, const BigInt *op2, uint32_t bit_count, bool is_signed) {
+ bigint_sub(dest, op1, op2);
+ bigint_clamp_by_bitcount(dest, bit_count, is_signed);
+}
+
+void bigint_mul_sat(BigInt* dest, const BigInt *op1, const BigInt *op2, uint32_t bit_count, bool is_signed) {
+ bigint_mul(dest, op1, op2);
+ bigint_clamp_by_bitcount(dest, bit_count, is_signed);
+}
+
+void bigint_shl_sat(BigInt* dest, const BigInt *op1, const BigInt *op2, uint32_t bit_count, bool is_signed) {
+ bigint_shl(dest, op1, op2);
+ bigint_clamp_by_bitcount(dest, bit_count, is_signed);
+}
+
void bigint_add(BigInt *dest, const BigInt *op1, const BigInt *op2) {
if (op1->digit_count == 0) {
return bigint_init_bigint(dest, op2);
diff --git a/src/stage1/bigint.hpp b/src/stage1/bigint.hpp
index 53e07f9284..7d30fb1689 100644
--- a/src/stage1/bigint.hpp
+++ b/src/stage1/bigint.hpp
@@ -105,4 +105,8 @@ bool mul_u64_overflow(uint64_t op1, uint64_t op2, uint64_t *result);
uint32_t bigint_hash(BigInt const *x);
bool bigint_eql(BigInt const *a, BigInt const *b);
+void bigint_add_sat(BigInt* dest, const BigInt *op1, const BigInt *op2, uint32_t bit_count, bool is_signed);
+void bigint_sub_sat(BigInt* dest, const BigInt *op1, const BigInt *op2, uint32_t bit_count, bool is_signed);
+void bigint_mul_sat(BigInt* dest, const BigInt *op1, const BigInt *op2, uint32_t bit_count, bool is_signed);
+void bigint_shl_sat(BigInt* dest, const BigInt *op1, const BigInt *op2, uint32_t bit_count, bool is_signed);
#endif
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index c44081c770..614ed8e26c 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -3335,6 +3335,46 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable,
} else {
zig_unreachable();
}
+ case IrBinOpSatAdd:
+ if (scalar_type->id == ZigTypeIdInt) {
+ if (scalar_type->data.integral.is_signed) {
+ return ZigLLVMBuildSAddSat(g->builder, op1_value, op2_value, "");
+ } else {
+ return ZigLLVMBuildUAddSat(g->builder, op1_value, op2_value, "");
+ }
+ } else {
+ zig_unreachable();
+ }
+ case IrBinOpSatSub:
+ if (scalar_type->id == ZigTypeIdInt) {
+ if (scalar_type->data.integral.is_signed) {
+ return ZigLLVMBuildSSubSat(g->builder, op1_value, op2_value, "");
+ } else {
+ return ZigLLVMBuildUSubSat(g->builder, op1_value, op2_value, "");
+ }
+ } else {
+ zig_unreachable();
+ }
+ case IrBinOpSatMul:
+ if (scalar_type->id == ZigTypeIdInt) {
+ if (scalar_type->data.integral.is_signed) {
+ return ZigLLVMBuildSMulFixSat(g->builder, op1_value, op2_value, "");
+ } else {
+ return ZigLLVMBuildUMulFixSat(g->builder, op1_value, op2_value, "");
+ }
+ } else {
+ zig_unreachable();
+ }
+ case IrBinOpSatShl:
+ if (scalar_type->id == ZigTypeIdInt) {
+ if (scalar_type->data.integral.is_signed) {
+ return ZigLLVMBuildSShlSat(g->builder, op1_value, op2_value, "");
+ } else {
+ return ZigLLVMBuildUShlSat(g->builder, op1_value, op2_value, "");
+ }
+ } else {
+ zig_unreachable();
+ }
}
zig_unreachable();
}
@@ -9096,6 +9136,10 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdReduce, "reduce", 2);
create_builtin_fn(g, BuiltinFnIdMaximum, "maximum", 2);
create_builtin_fn(g, BuiltinFnIdMinimum, "minimum", 2);
+ create_builtin_fn(g, BuiltinFnIdSatAdd, "addWithSaturation", 2);
+ create_builtin_fn(g, BuiltinFnIdSatSub, "subWithSaturation", 2);
+ create_builtin_fn(g, BuiltinFnIdSatMul, "mulWithSaturation", 2);
+ create_builtin_fn(g, BuiltinFnIdSatShl, "shlWithSaturation", 2);
}
static const char *bool_to_str(bool b) {
diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp
index 830ce76708..0604c05c46 100644
--- a/src/stage1/ir.cpp
+++ b/src/stage1/ir.cpp
@@ -9820,6 +9820,34 @@ static ErrorMsg *ir_eval_math_op_scalar(IrAnalyze *ira, Scope *scope, AstNode *s
float_min(out_val, op1_val, op2_val);
}
break;
+ case IrBinOpSatAdd:
+ if (is_int) {
+ bigint_add_sat(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint, type_entry->data.integral.bit_count, type_entry->data.integral.is_signed);
+ } else {
+ zig_unreachable();
+ }
+ break;
+ case IrBinOpSatSub:
+ if (is_int) {
+ bigint_sub_sat(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint, type_entry->data.integral.bit_count, type_entry->data.integral.is_signed);
+ } else {
+ zig_unreachable();
+ }
+ break;
+ case IrBinOpSatMul:
+ if (is_int) {
+ bigint_mul_sat(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint, type_entry->data.integral.bit_count, type_entry->data.integral.is_signed);
+ } else {
+ zig_unreachable();
+ }
+ break;
+ case IrBinOpSatShl:
+ if (is_int) {
+ bigint_shl_sat(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint, type_entry->data.integral.bit_count, type_entry->data.integral.is_signed);
+ } else {
+ zig_unreachable();
+ }
+ break;
}
if (type_entry->id == ZigTypeIdInt) {
@@ -10041,6 +10069,10 @@ static bool ok_float_op(IrBinOp op) {
case IrBinOpBitShiftRightExact:
case IrBinOpAddWrap:
case IrBinOpSubWrap:
+ case IrBinOpSatAdd:
+ case IrBinOpSatSub:
+ case IrBinOpSatMul:
+ case IrBinOpSatShl:
case IrBinOpMultWrap:
case IrBinOpArrayCat:
case IrBinOpArrayMult:
@@ -11014,6 +11046,10 @@ static Stage1AirInst *ir_analyze_instruction_bin_op(IrAnalyze *ira, Stage1ZirIns
case IrBinOpRemMod:
case IrBinOpMaximum:
case IrBinOpMinimum:
+ case IrBinOpSatAdd:
+ case IrBinOpSatSub:
+ case IrBinOpSatMul:
+ case IrBinOpSatShl:
return ir_analyze_bin_op_math(ira, bin_op_instruction);
case IrBinOpArrayCat:
return ir_analyze_array_cat(ira, bin_op_instruction);
diff --git a/src/stage1/ir_print.cpp b/src/stage1/ir_print.cpp
index 96e924b768..152221926d 100644
--- a/src/stage1/ir_print.cpp
+++ b/src/stage1/ir_print.cpp
@@ -737,6 +737,14 @@ static const char *ir_bin_op_id_str(IrBinOp op_id) {
return "@maximum";
case IrBinOpMinimum:
return "@minimum";
+ case IrBinOpSatAdd:
+ return "@addWithSaturation";
+ case IrBinOpSatSub:
+ return "@subWithSaturation";
+ case IrBinOpSatMul:
+ return "@mulWithSaturation";
+ case IrBinOpSatShl:
+ return "@shlWithSaturation";
}
zig_unreachable();
}
diff --git a/src/stage1/parser.cpp b/src/stage1/parser.cpp
index 9a429364b1..b06a944172 100644
--- a/src/stage1/parser.cpp
+++ b/src/stage1/parser.cpp
@@ -3482,8 +3482,7 @@ Error source_char_literal(const char *source, uint32_t *result, size_t *bad_inde
}
}
-
-Buf *token_identifier_buf(RootStruct *root_struct, TokenIndex token) {
+static Buf *token_identifier_buf2(RootStruct *root_struct, TokenIndex token, bool *is_at_syntax) {
Error err;
const char *source = buf_ptr(root_struct->source_code);
size_t byte_offset = root_struct->token_locs[token].offset;
@@ -3495,6 +3494,7 @@ Buf *token_identifier_buf(RootStruct *root_struct, TokenIndex token) {
assert(source[byte_offset] != '.'); // wrong token index
if (source[byte_offset] == '@') {
+ *is_at_syntax = true;
size_t bad_index;
Buf *str = buf_alloc();
if ((err = source_string_literal_buf(source + byte_offset + 1, str, &bad_index))) {
@@ -3503,6 +3503,7 @@ Buf *token_identifier_buf(RootStruct *root_struct, TokenIndex token) {
}
return str;
} else {
+ *is_at_syntax = false;
size_t start = byte_offset;
for (;; byte_offset += 1) {
if (source[byte_offset] == 0) break;
@@ -3519,7 +3520,17 @@ Buf *token_identifier_buf(RootStruct *root_struct, TokenIndex token) {
}
}
+Buf *token_identifier_buf(RootStruct *root_struct, TokenIndex token) {
+ bool trash;
+ return token_identifier_buf2(root_struct, token, &trash);
+}
+
Buf *node_identifier_buf(AstNode *node) {
+ bool trash;
+ return node_identifier_buf2(node, &trash);
+}
+
+Buf *node_identifier_buf2(AstNode *node, bool *is_at_syntax) {
assert(node->type == NodeTypeIdentifier);
// Currently, stage1 runs astgen for every comptime function call,
// resulting the allocation here wasting memory. As a workaround until
@@ -3527,8 +3538,10 @@ Buf *node_identifier_buf(AstNode *node) {
// we memoize the result into the AST here.
if (node->data.identifier.name == nullptr) {
RootStruct *root_struct = node->owner->data.structure.root_struct;
- node->data.identifier.name = token_identifier_buf(root_struct, node->main_token);
+ node->data.identifier.name = token_identifier_buf2(root_struct, node->main_token,
+ &node->data.identifier.is_at_syntax);
}
+ *is_at_syntax = node->data.identifier.is_at_syntax;
return node->data.identifier.name;
}
diff --git a/src/stage1/parser.hpp b/src/stage1/parser.hpp
index 9f73444cb8..8ac8ce6de1 100644
--- a/src/stage1/parser.hpp
+++ b/src/stage1/parser.hpp
@@ -19,6 +19,7 @@ void ast_print(AstNode *node, int indent);
void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *context), void *context);
Buf *node_identifier_buf(AstNode *node);
+Buf *node_identifier_buf2(AstNode *node, bool *is_at_syntax);
Buf *token_identifier_buf(RootStruct *root_struct, TokenIndex token);
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 0cc40cdfd4..d980fa657e 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -203,9 +203,7 @@ const Scope = struct {
/// Check if the global scope contains this name, without looking into the "future", e.g.
/// ignore the preprocessed decl and macro names.
fn containsNow(scope: *Root, name: []const u8) bool {
- return isZigPrimitiveType(name) or
- scope.sym_table.contains(name) or
- scope.macro_table.contains(name);
+ return scope.sym_table.contains(name) or scope.macro_table.contains(name);
}
/// Check if the global scope contains the name, includes all decls that haven't been translated yet.
@@ -358,7 +356,7 @@ pub fn translate(
args_end: [*]?[*]const u8,
errors: *[]ClangErrMsg,
resources_path: [*:0]const u8,
-) !std.zig.ast.Tree {
+) !std.zig.Ast {
const ast_unit = clang.LoadFromCommandLine(
args_begin,
args_end,
@@ -371,7 +369,7 @@ pub fn translate(
};
defer ast_unit.delete();
- // For memory that has the same lifetime as the Tree that we return
+ // For memory that has the same lifetime as the Ast that we return
// from this function.
var arena = std.heap.ArenaAllocator.init(gpa);
errdefer arena.deinit();
@@ -397,7 +395,15 @@ pub fn translate(
context.pattern_list.deinit(gpa);
}
- try context.global_scope.nodes.append(Tag.usingnamespace_builtins.init());
+ inline for (meta.declarations(std.zig.c_builtins)) |decl| {
+ if (decl.is_pub) {
+ const builtin = try Tag.pub_var_simple.create(context.arena, .{
+ .name = decl.name,
+ .init = try Tag.import_c_builtin.create(context.arena, decl.name),
+ });
+ try addTopLevelDecl(&context, decl.name, builtin);
+ }
+ }
try prepopulateGlobalNameTable(ast_unit, &context);
@@ -495,19 +501,17 @@ fn declVisitorNamesOnly(c: *Context, decl: *const clang.Decl) Error!void {
},
else => return,
} else unreachable;
- // TODO https://github.com/ziglang/zig/issues/3756
- // TODO https://github.com/ziglang/zig/issues/1802
- const name = if (isZigPrimitiveType(decl_name)) try std.fmt.allocPrint(c.arena, "{s}_{d}", .{ decl_name, c.getMangle() }) else decl_name;
+
const result = try c.unnamed_typedefs.getOrPut(c.gpa, addr);
if (result.found_existing) {
// One typedef can declare multiple names.
// Don't put this one in `decl_table` so it's processed later.
return;
}
- result.value_ptr.* = name;
+ result.value_ptr.* = decl_name;
// Put this typedef in the decl_table to avoid redefinitions.
- try c.decl_table.putNoClobber(c.gpa, @ptrToInt(typedef_decl.getCanonicalDecl()), name);
- try c.typedefs.put(c.gpa, name, {});
+ try c.decl_table.putNoClobber(c.gpa, @ptrToInt(typedef_decl.getCanonicalDecl()), decl_name);
+ try c.typedefs.put(c.gpa, decl_name, {});
}
}
}
@@ -752,10 +756,6 @@ fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]co
const is_pub = mangled_name == null;
const is_threadlocal = var_decl.getTLSKind() != .None;
const scope = &c.global_scope.base;
-
- // TODO https://github.com/ziglang/zig/issues/3756
- // TODO https://github.com/ziglang/zig/issues/1802
- const checked_name = if (isZigPrimitiveType(var_name)) try std.fmt.allocPrint(c.arena, "{s}_{d}", .{ var_name, c.getMangle() }) else var_name;
const var_decl_loc = var_decl.getLocation();
const qual_type = var_decl.getTypeSourceInfo_getType();
@@ -774,7 +774,7 @@ fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]co
const type_node = transQualTypeMaybeInitialized(c, scope, qual_type, decl_init, var_decl_loc) catch |err| switch (err) {
error.UnsupportedTranslation, error.UnsupportedType => {
- return failDecl(c, var_decl_loc, checked_name, "unable to resolve variable type", .{});
+ return failDecl(c, var_decl_loc, var_name, "unable to resolve variable type", .{});
},
error.OutOfMemory => |e| return e,
};
@@ -833,11 +833,11 @@ fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]co
.is_threadlocal = is_threadlocal,
.linksection_string = linksection_string,
.alignment = zigAlignment(var_decl.getAlignedAttribute(c.clang_context)),
- .name = checked_name,
+ .name = var_name,
.type = type_node,
.init = init_node,
});
- return addTopLevelDecl(c, checked_name, node);
+ return addTopLevelDecl(c, var_name, node);
}
const builtin_typedef_map = std.ComptimeStringMap([]const u8, .{
@@ -861,11 +861,7 @@ fn transTypeDef(c: *Context, scope: *Scope, typedef_decl: *const clang.TypedefNa
const toplevel = scope.id == .root;
const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined;
- const bare_name = try c.str(@ptrCast(*const clang.NamedDecl, typedef_decl).getName_bytes_begin());
-
- // TODO https://github.com/ziglang/zig/issues/3756
- // TODO https://github.com/ziglang/zig/issues/1802
- var name: []const u8 = if (isZigPrimitiveType(bare_name)) try std.fmt.allocPrint(c.arena, "{s}_{d}", .{ bare_name, c.getMangle() }) else bare_name;
+ var name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, typedef_decl).getName_bytes_begin());
try c.typedefs.put(c.gpa, name, {});
if (builtin_typedef_map.get(name)) |builtin| {
@@ -1535,12 +1531,12 @@ fn transOffsetOfExpr(
/// node -> @bitCast(usize, @intCast(isize, node))
fn usizeCastForWrappingPtrArithmetic(gpa: *mem.Allocator, node: Node) TransError!Node {
const intcast_node = try Tag.int_cast.create(gpa, .{
- .lhs = try Tag.identifier.create(gpa, "isize"),
+ .lhs = try Tag.type.create(gpa, "isize"),
.rhs = node,
});
return Tag.bit_cast.create(gpa, .{
- .lhs = try Tag.identifier.create(gpa, "usize"),
+ .lhs = try Tag.type.create(gpa, "usize"),
.rhs = intcast_node,
});
}
@@ -1994,6 +1990,7 @@ fn transImplicitCastExpr(
fn isBuiltinDefined(name: []const u8) bool {
inline for (meta.declarations(std.zig.c_builtins)) |decl| {
+ if (!decl.is_pub) continue;
if (std.mem.eql(u8, name, decl.name)) return true;
}
return false;
@@ -3345,7 +3342,7 @@ fn transSignedArrayAccess(
const then_value = try Tag.add.create(c.arena, .{
.lhs = container_node,
.rhs = try Tag.int_cast.create(c.arena, .{
- .lhs = try Tag.identifier.create(c.arena, "usize"),
+ .lhs = try Tag.type.create(c.arena, "usize"),
.rhs = tmp_ref,
}),
});
@@ -3357,7 +3354,7 @@ fn transSignedArrayAccess(
const minuend = container_node;
const signed_size = try Tag.int_cast.create(c.arena, .{
- .lhs = try Tag.identifier.create(c.arena, "isize"),
+ .lhs = try Tag.type.create(c.arena, "isize"),
.rhs = tmp_ref,
});
const to_cast = try Tag.add_wrap.create(c.arena, .{
@@ -3365,7 +3362,7 @@ fn transSignedArrayAccess(
.rhs = try Tag.negate.create(c.arena, Tag.one_literal.init()),
});
const bitcast_node = try Tag.bit_cast.create(c.arena, .{
- .lhs = try Tag.identifier.create(c.arena, "usize"),
+ .lhs = try Tag.type.create(c.arena, "usize"),
.rhs = to_cast,
});
const subtrahend = try Tag.bit_not.create(c.arena, bitcast_node);
@@ -3421,7 +3418,7 @@ fn transArrayAccess(c: *Context, scope: *Scope, stmt: *const clang.ArraySubscrip
const container_node = try transExpr(c, scope, unwrapped_base, .used);
const rhs = if (is_longlong or is_signed) blk: {
// check if long long first so that signed long long doesn't just become unsigned long long
- const typeid_node = if (is_longlong) try Tag.identifier.create(c.arena, "usize") else try transQualTypeIntWidthOf(c, subscr_qt, false);
+ const typeid_node = if (is_longlong) try Tag.type.create(c.arena, "usize") else try transQualTypeIntWidthOf(c, subscr_qt, false);
break :blk try Tag.int_cast.create(c.arena, .{ .lhs = typeid_node, .rhs = try transExpr(c, scope, subscr_expr, .used) });
} else try transExpr(c, scope, subscr_expr, .used);
@@ -3953,7 +3950,7 @@ fn transFloatingLiteral(c: *Context, scope: *Scope, expr: *const clang.FloatingL
fn transBinaryConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang.BinaryConditionalOperator, used: ResultUsed) TransError!Node {
// GNU extension of the ternary operator where the middle expression is
- // omitted, the conditition itself is returned if it evaluates to true
+ // omitted, the condition itself is returned if it evaluates to true
const qt = @ptrCast(*const clang.Expr, stmt).getType();
const res_is_bool = qualTypeIsBoolean(qt);
const casted_stmt = @ptrCast(*const clang.AbstractConditionalOperator, stmt);
@@ -4040,7 +4037,7 @@ fn transConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang.Condi
.then = then_body,
.@"else" = else_body,
});
- // Clang inserts ImplicitCast(ToVoid)'s to both rhs and lhs so we don't need to supress the result here.
+ // Clang inserts ImplicitCast(ToVoid)'s to both rhs and lhs so we don't need to suppress the result here.
return if_node;
}
@@ -4671,6 +4668,7 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan
if (@ptrCast(*const clang.Decl, typedef_decl).castToNamedDecl()) |named_decl| {
const decl_name = try c.str(named_decl.getName_bytes_begin());
if (c.global_names.get(decl_name)) |_| trans_scope = &c.global_scope.base;
+ if (builtin_typedef_map.get(decl_name)) |builtin| return Tag.type.create(c.arena, builtin);
}
try transTypeDef(c, trans_scope, typedef_decl);
const name = c.decl_table.get(@ptrToInt(typedef_decl.getCanonicalDecl())).?;
@@ -4994,19 +4992,6 @@ pub fn freeErrors(errors: []ClangErrMsg) void {
errors.ptr.delete(errors.len);
}
-fn isZigPrimitiveType(name: []const u8) bool {
- if (name.len > 1 and (name[0] == 'u' or name[0] == 'i')) {
- for (name[1..]) |c| {
- switch (c) {
- '0'...'9' => {},
- else => return false,
- }
- }
- return true;
- }
- return @import("AstGen.zig").simple_types.has(name);
-}
-
const PatternList = struct {
patterns: []Pattern,
@@ -5272,6 +5257,26 @@ const MacroCtx = struct {
fn makeSlicer(self: *const MacroCtx) MacroSlicer {
return MacroSlicer{ .source = self.source, .tokens = self.list };
}
+
+ fn containsUndefinedIdentifier(self: *MacroCtx, scope: *Scope, params: []const ast.Payload.Param) ?[]const u8 {
+ const slicer = self.makeSlicer();
+ var i: usize = 1; // index 0 is the macro name
+ while (i < self.list.len) : (i += 1) {
+ const token = self.list[i];
+ switch (token.id) {
+ .Period, .Arrow => i += 1, // skip next token since field identifiers can be unknown
+ .Identifier => {
+ const identifier = slicer.slice(token);
+ const is_param = for (params) |param| {
+ if (param.name != null and mem.eql(u8, identifier, param.name.?)) break true;
+ } else false;
+ if (!scope.contains(identifier) and !isBuiltinDefined(identifier) and !is_param) return identifier;
+ },
+ else => {},
+ }
+ }
+ return null;
+ }
};
fn tokenizeMacro(source: []const u8, tok_list: *std.ArrayList(CToken)) Error!void {
@@ -5311,10 +5316,7 @@ fn transPreprocessorEntities(c: *Context, unit: *clang.ASTUnit) Error!void {
const end_loc = clang.Lexer.getLocForEndOfToken(macro.getSourceRange_getEnd(), c.source_manager, unit);
const name = try c.str(raw_name);
- // TODO https://github.com/ziglang/zig/issues/3756
- // TODO https://github.com/ziglang/zig/issues/1802
- const mangled_name = if (isZigPrimitiveType(name)) try std.fmt.allocPrint(c.arena, "{s}_{d}", .{ name, c.getMangle() }) else name;
- if (scope.containsNow(mangled_name)) {
+ if (scope.containsNow(name)) {
continue;
}
@@ -5328,7 +5330,7 @@ fn transPreprocessorEntities(c: *Context, unit: *clang.ASTUnit) Error!void {
var macro_ctx = MacroCtx{
.source = slice,
.list = tok_list.items,
- .name = mangled_name,
+ .name = name,
.loc = begin_loc,
};
assert(mem.eql(u8, macro_ctx.slice(), name));
@@ -5345,7 +5347,10 @@ fn transPreprocessorEntities(c: *Context, unit: *clang.ASTUnit) Error!void {
},
.Nl, .Eof => {
// this means it is a macro without a value
- // we don't care about such things
+ // We define it as an empty string so that it can still be used with ++
+ const str_node = try Tag.string_literal.create(c.arena, "\"\"");
+ const var_decl = try Tag.pub_var_simple.create(c.arena, .{ .name = name, .init = str_node });
+ try c.global_scope.macro_table.put(name, var_decl);
continue;
},
.LParen => {
@@ -5371,6 +5376,9 @@ fn transPreprocessorEntities(c: *Context, unit: *clang.ASTUnit) Error!void {
fn transMacroDefine(c: *Context, m: *MacroCtx) ParseError!void {
const scope = &c.global_scope.base;
+ if (m.containsUndefinedIdentifier(scope, &.{})) |ident|
+ return m.fail(c, "unable to translate macro: undefined identifier `{s}`", .{ident});
+
const init_node = try parseCExpr(c, m, scope);
const last = m.next().?;
if (last != .Eof and last != .Nl)
@@ -5421,6 +5429,9 @@ fn transMacroFnDefine(c: *Context, m: *MacroCtx) ParseError!void {
return m.fail(c, "unable to translate C expr: expected ')'", .{});
}
+ if (m.containsUndefinedIdentifier(scope, fn_params.items)) |ident|
+ return m.fail(c, "unable to translate macro: undefined identifier `{s}`", .{ident});
+
const expr = try parseCExpr(c, m, scope);
const last = m.next().?;
if (last != .Eof and last != .Nl)
@@ -5762,11 +5773,8 @@ fn parseCPrimaryExprInner(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!N
},
.Identifier => {
const mangled_name = scope.getAlias(slice);
- if (mem.startsWith(u8, mangled_name, "__builtin_") and !isBuiltinDefined(mangled_name)) {
- try m.fail(c, "TODO implement function '{s}' in std.zig.c_builtins", .{mangled_name});
- return error.ParseError;
- }
- const identifier = try Tag.identifier.create(c.arena, builtin_typedef_map.get(mangled_name) orelse mangled_name);
+ if (builtin_typedef_map.get(mangled_name)) |ty| return Tag.type.create(c.arena, ty);
+ const identifier = try Tag.identifier.create(c.arena, mangled_name);
scope.skipVariableDiscard(identifier.castTag(.identifier).?.data);
return identifier;
},
@@ -6055,7 +6063,8 @@ fn parseCSpecifierQualifierList(c: *Context, m: *MacroCtx, scope: *Scope, allow_
.Identifier => {
const mangled_name = scope.getAlias(m.slice());
if (!allow_fail or c.typedefs.contains(mangled_name)) {
- return try Tag.identifier.create(c.arena, builtin_typedef_map.get(mangled_name) orelse mangled_name);
+ if (builtin_typedef_map.get(mangled_name)) |ty| return try Tag.type.create(c.arena, ty);
+ return try Tag.identifier.create(c.arena, mangled_name);
}
},
.Keyword_void => return try Tag.type.create(c.arena, "c_void"),
diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig
index e1da1c7d05..3686b90bda 100644
--- a/src/translate_c/ast.zig
+++ b/src/translate_c/ast.zig
@@ -31,8 +31,6 @@ pub const Node = extern union {
@"anytype",
@"continue",
@"break",
- /// pub usingnamespace @import("std").zig.c_builtins
- usingnamespace_builtins,
// After this, the tag requires a payload.
integer_literal,
@@ -119,6 +117,8 @@ pub const Node = extern union {
ellipsis3,
assign,
+ /// @import("std").zig.c_builtins.<name>
+ import_c_builtin,
log2_int_type,
/// @import("std").math.Log2Int(operand)
std_math_Log2Int,
@@ -224,7 +224,7 @@ pub const Node = extern union {
/// [1]type{val} ** count
array_filler,
- pub const last_no_payload_tag = Tag.usingnamespace_builtins;
+ pub const last_no_payload_tag = Tag.@"break";
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
pub fn Type(comptime t: Tag) type {
@@ -236,7 +236,6 @@ pub const Node = extern union {
.true_literal,
.false_literal,
.empty_block,
- .usingnamespace_builtins,
.return_void,
.zero_literal,
.one_literal,
@@ -344,6 +343,7 @@ pub const Node = extern union {
.warning,
.type,
.helpers_macro,
+ .import_c_builtin,
=> Payload.Value,
.discard => Payload.Discard,
.@"if" => Payload.If,
@@ -714,9 +714,9 @@ pub const Payload = struct {
};
};
-/// Converts the nodes into a Zig ast.
+/// Converts the nodes into a Zig Ast.
/// Caller must free the source slice.
-pub fn render(gpa: *Allocator, nodes: []const Node) !std.zig.ast.Tree {
+pub fn render(gpa: *Allocator, nodes: []const Node) !std.zig.Ast {
var ctx = Context{
.gpa = gpa,
.buf = std.ArrayList(u8).init(gpa),
@@ -767,7 +767,7 @@ pub fn render(gpa: *Allocator, nodes: []const Node) !std.zig.ast.Tree {
.start = @intCast(u32, ctx.buf.items.len),
});
- return std.zig.ast.Tree{
+ return std.zig.Ast{
.source = try ctx.buf.toOwnedSliceSentinel(0),
.tokens = ctx.tokens.toOwnedSlice(),
.nodes = ctx.nodes.toOwnedSlice(),
@@ -776,17 +776,17 @@ pub fn render(gpa: *Allocator, nodes: []const Node) !std.zig.ast.Tree {
};
}
-const NodeIndex = std.zig.ast.Node.Index;
-const NodeSubRange = std.zig.ast.Node.SubRange;
-const TokenIndex = std.zig.ast.TokenIndex;
+const NodeIndex = std.zig.Ast.Node.Index;
+const NodeSubRange = std.zig.Ast.Node.SubRange;
+const TokenIndex = std.zig.Ast.TokenIndex;
const TokenTag = std.zig.Token.Tag;
const Context = struct {
gpa: *Allocator,
buf: std.ArrayList(u8) = .{},
- nodes: std.zig.ast.NodeList = .{},
- extra_data: std.ArrayListUnmanaged(std.zig.ast.Node.Index) = .{},
- tokens: std.zig.ast.TokenList = .{},
+ nodes: std.zig.Ast.NodeList = .{},
+ extra_data: std.ArrayListUnmanaged(std.zig.Ast.Node.Index) = .{},
+ tokens: std.zig.Ast.TokenList = .{},
fn addTokenFmt(c: *Context, tag: TokenTag, comptime format: []const u8, args: anytype) Allocator.Error!TokenIndex {
const start_index = c.buf.items.len;
@@ -801,11 +801,26 @@ const Context = struct {
}
fn addToken(c: *Context, tag: TokenTag, bytes: []const u8) Allocator.Error!TokenIndex {
- return addTokenFmt(c, tag, "{s}", .{bytes});
+ return c.addTokenFmt(tag, "{s}", .{bytes});
+ }
+
+ fn isZigPrimitiveType(name: []const u8) bool {
+ if (name.len > 1 and (name[0] == 'u' or name[0] == 'i')) {
+ for (name[1..]) |c| {
+ switch (c) {
+ '0'...'9' => {},
+ else => return false,
+ }
+ }
+ return true;
+ }
+ return @import("../AstGen.zig").simple_types.has(name);
}
fn addIdentifier(c: *Context, bytes: []const u8) Allocator.Error!TokenIndex {
- return addTokenFmt(c, .identifier, "{s}", .{std.zig.fmtId(bytes)});
+ if (isZigPrimitiveType(bytes))
+ return c.addTokenFmt(.identifier, "@\"{s}\"", .{bytes});
+ return c.addTokenFmt(.identifier, "{s}", .{std.zig.fmtId(bytes)});
}
fn listToSpan(c: *Context, list: []const NodeIndex) Allocator.Error!NodeSubRange {
@@ -816,7 +831,7 @@ const Context = struct {
};
}
- fn addNode(c: *Context, elem: std.zig.ast.NodeList.Elem) Allocator.Error!NodeIndex {
+ fn addNode(c: *Context, elem: std.zig.Ast.NodeList.Elem) Allocator.Error!NodeIndex {
const result = @intCast(NodeIndex, c.nodes.len);
try c.nodes.append(c.gpa, elem);
return result;
@@ -856,22 +871,6 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
try c.buf.append('\n');
return @as(NodeIndex, 0); // error: integer value 0 cannot be coerced to type 'std.mem.Allocator.Error!u32'
},
- .usingnamespace_builtins => {
- // pub usingnamespace @import("std").c.builtins;
- _ = try c.addToken(.keyword_pub, "pub");
- const usingnamespace_token = try c.addToken(.keyword_usingnamespace, "usingnamespace");
- const import_node = try renderStdImport(c, &.{ "zig", "c_builtins" });
- _ = try c.addToken(.semicolon, ";");
-
- return c.addNode(.{
- .tag = .@"usingnamespace",
- .main_token = usingnamespace_token,
- .data = .{
- .lhs = import_node,
- .rhs = undefined,
- },
- });
- },
.std_math_Log2Int => {
const payload = node.castTag(.std_math_Log2Int).?.data;
const import_node = try renderStdImport(c, &.{ "math", "Log2Int" });
@@ -928,23 +927,23 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
return renderCall(c, lhs, payload.args);
},
.null_literal => return c.addNode(.{
- .tag = .null_literal,
- .main_token = try c.addToken(.keyword_null, "null"),
+ .tag = .identifier,
+ .main_token = try c.addToken(.identifier, "null"),
.data = undefined,
}),
.undefined_literal => return c.addNode(.{
- .tag = .undefined_literal,
- .main_token = try c.addToken(.keyword_undefined, "undefined"),
+ .tag = .identifier,
+ .main_token = try c.addToken(.identifier, "undefined"),
.data = undefined,
}),
.true_literal => return c.addNode(.{
- .tag = .true_literal,
- .main_token = try c.addToken(.keyword_true, "true"),
+ .tag = .identifier,
+ .main_token = try c.addToken(.identifier, "true"),
.data = undefined,
}),
.false_literal => return c.addNode(.{
- .tag = .false_literal,
- .main_token = try c.addToken(.keyword_false, "false"),
+ .tag = .identifier,
+ .main_token = try c.addToken(.identifier, "false"),
.data = undefined,
}),
.zero_literal => return c.addNode(.{
@@ -1128,6 +1127,15 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
};
return renderStdImport(c, &chain);
},
+ .import_c_builtin => {
+ const payload = node.castTag(.import_c_builtin).?.data;
+ const chain = [_][]const u8{
+ "zig",
+ "c_builtins",
+ payload,
+ };
+ return renderStdImport(c, &chain);
+ },
.string_slice => {
const payload = node.castTag(.string_slice).?.data;
@@ -1151,7 +1159,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
.main_token = l_bracket,
.data = .{
.lhs = string,
- .rhs = try c.addExtra(std.zig.ast.Node.Slice{
+ .rhs = try c.addExtra(std.zig.Ast.Node.Slice{
.start = start,
.end = end,
}),
@@ -1586,7 +1594,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
.main_token = while_tok,
.data = .{
.lhs = cond,
- .rhs = try c.addExtra(std.zig.ast.Node.WhileCont{
+ .rhs = try c.addExtra(std.zig.Ast.Node.WhileCont{
.cont_expr = cont_expr,
.then_expr = body,
}),
@@ -1599,8 +1607,8 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
const while_tok = try c.addToken(.keyword_while, "while");
_ = try c.addToken(.l_paren, "(");
const cond = try c.addNode(.{
- .tag = .true_literal,
- .main_token = try c.addToken(.keyword_true, "true"),
+ .tag = .identifier,
+ .main_token = try c.addToken(.identifier, "true"),
.data = undefined,
});
_ = try c.addToken(.r_paren, ")");
@@ -1639,7 +1647,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
.main_token = if_tok,
.data = .{
.lhs = cond,
- .rhs = try c.addExtra(std.zig.ast.Node.If{
+ .rhs = try c.addExtra(std.zig.Ast.Node.If{
.then_expr = then_expr,
.else_expr = else_expr,
}),
@@ -1999,7 +2007,7 @@ fn renderRecord(c: *Context, node: Node) !NodeIndex {
members[1] = 0;
for (payload.fields) |field, i| {
- const name_tok = try c.addIdentifier(field.name);
+ const name_tok = try c.addTokenFmt(.identifier, "{s}", .{std.zig.fmtId(field.name)});
_ = try c.addToken(.colon, ":");
const type_expr = try renderNode(c, field.type);
@@ -2079,7 +2087,7 @@ fn renderFieldAccess(c: *Context, lhs: NodeIndex, field_name: []const u8) !NodeI
.main_token = try c.addToken(.period, "."),
.data = .{
.lhs = lhs,
- .rhs = try c.addIdentifier(field_name),
+ .rhs = try c.addTokenFmt(.identifier, "{s}", .{std.zig.fmtId(field_name)}),
},
});
}
@@ -2160,7 +2168,7 @@ fn renderNullSentinelArrayType(c: *Context, len: usize, elem_type: Node) !NodeIn
.main_token = l_bracket,
.data = .{
.lhs = len_expr,
- .rhs = try c.addExtra(std.zig.ast.Node.ArrayTypeSentinel{
+ .rhs = try c.addExtra(std.zig.Ast.Node.ArrayTypeSentinel{
.sentinel = sentinel_expr,
.elem_type = elem_type_expr,
}),
@@ -2337,7 +2345,6 @@ fn renderNodeGrouped(c: *Context, node: Node) !NodeIndex {
.@"comptime",
.@"defer",
.asm_simple,
- .usingnamespace_builtins,
.while_true,
.if_not_break,
.switch_else,
@@ -2356,6 +2363,7 @@ fn renderNodeGrouped(c: *Context, node: Node) !NodeIndex {
.bit_xor_assign,
.assign,
.helpers_macro,
+ .import_c_builtin,
=> {
// these should never appear in places where grouping might be needed.
unreachable;
@@ -2363,7 +2371,7 @@ fn renderNodeGrouped(c: *Context, node: Node) !NodeIndex {
}
}
-fn renderPrefixOp(c: *Context, node: Node, tag: std.zig.ast.Node.Tag, tok_tag: TokenTag, bytes: []const u8) !NodeIndex {
+fn renderPrefixOp(c: *Context, node: Node, tag: std.zig.Ast.Node.Tag, tok_tag: TokenTag, bytes: []const u8) !NodeIndex {
const payload = @fieldParentPtr(Payload.UnOp, "base", node.ptr_otherwise).data;
return c.addNode(.{
.tag = tag,
@@ -2375,7 +2383,7 @@ fn renderPrefixOp(c: *Context, node: Node, tag: std.zig.ast.Node.Tag, tok_tag: T
});
}
-fn renderBinOpGrouped(c: *Context, node: Node, tag: std.zig.ast.Node.Tag, tok_tag: TokenTag, bytes: []const u8) !NodeIndex {
+fn renderBinOpGrouped(c: *Context, node: Node, tag: std.zig.Ast.Node.Tag, tok_tag: TokenTag, bytes: []const u8) !NodeIndex {
const payload = @fieldParentPtr(Payload.BinOp, "base", node.ptr_otherwise).data;
const lhs = try renderNodeGrouped(c, payload.lhs);
return c.addNode(.{
@@ -2388,7 +2396,7 @@ fn renderBinOpGrouped(c: *Context, node: Node, tag: std.zig.ast.Node.Tag, tok_ta
});
}
-fn renderBinOp(c: *Context, node: Node, tag: std.zig.ast.Node.Tag, tok_tag: TokenTag, bytes: []const u8) !NodeIndex {
+fn renderBinOp(c: *Context, node: Node, tag: std.zig.Ast.Node.Tag, tok_tag: TokenTag, bytes: []const u8) !NodeIndex {
const payload = @fieldParentPtr(Payload.BinOp, "base", node.ptr_otherwise).data;
const lhs = try renderNode(c, payload.lhs);
return c.addNode(.{
@@ -2589,7 +2597,7 @@ fn renderVar(c: *Context, node: Node) !NodeIndex {
.tag = .local_var_decl,
.main_token = mut_tok,
.data = .{
- .lhs = try c.addExtra(std.zig.ast.Node.LocalVarDecl{
+ .lhs = try c.addExtra(std.zig.Ast.Node.LocalVarDecl{
.type_node = type_node,
.align_node = align_node,
}),
@@ -2602,7 +2610,7 @@ fn renderVar(c: *Context, node: Node) !NodeIndex {
.tag = .global_var_decl,
.main_token = mut_tok,
.data = .{
- .lhs = try c.addExtra(std.zig.ast.Node.GlobalVarDecl{
+ .lhs = try c.addExtra(std.zig.Ast.Node.GlobalVarDecl{
.type_node = type_node,
.align_node = align_node,
.section_node = section_node,
@@ -2694,7 +2702,7 @@ fn renderFunc(c: *Context, node: Node) !NodeIndex {
.tag = .fn_proto_one,
.main_token = fn_token,
.data = .{
- .lhs = try c.addExtra(std.zig.ast.Node.FnProtoOne{
+ .lhs = try c.addExtra(std.zig.Ast.Node.FnProtoOne{
.param = params.items[0],
.align_expr = align_expr,
.section_expr = section_expr,
@@ -2708,7 +2716,7 @@ fn renderFunc(c: *Context, node: Node) !NodeIndex {
.tag = .fn_proto,
.main_token = fn_token,
.data = .{
- .lhs = try c.addExtra(std.zig.ast.Node.FnProto{
+ .lhs = try c.addExtra(std.zig.Ast.Node.FnProto{
.params_start = span.start,
.params_end = span.end,
.align_expr = align_expr,
@@ -2766,7 +2774,7 @@ fn renderMacroFunc(c: *Context, node: Node) !NodeIndex {
.tag = .fn_proto_multi,
.main_token = fn_token,
.data = .{
- .lhs = try c.addExtra(std.zig.ast.Node.SubRange{
+ .lhs = try c.addExtra(std.zig.Ast.Node.SubRange{
.start = span.start,
.end = span.end,
}),
diff --git a/src/type.zig b/src/type.zig
index 467e9c931b..b6602ba18d 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -197,7 +197,7 @@ pub const Type = extern union {
/// Prefer `castTag` to this.
pub fn cast(self: Type, comptime T: type) ?*T {
if (@hasField(T, "base_tag")) {
- return base.castTag(T.base_tag);
+ return self.castTag(T.base_tag);
}
if (self.tag_if_small_enough < Tag.no_payload_count) {
return null;
@@ -273,6 +273,15 @@ pub const Type = extern union {
};
}
+ pub const ArrayInfo = struct { elem_type: Type, sentinel: ?Value = null, len: u64 };
+ pub fn arrayInfo(self: Type) ArrayInfo {
+ return .{
+ .len = self.arrayLen(),
+ .sentinel = self.sentinel(),
+ .elem_type = self.elemType(),
+ };
+ }
+
pub fn ptrInfo(self: Type) Payload.Pointer {
switch (self.tag()) {
.single_const_pointer_to_comptime_int => return .{ .data = .{
diff --git a/src/value.zig b/src/value.zig
index 5ac9f142c4..8aaf70c428 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -315,7 +315,7 @@ pub const Value = extern union {
/// Prefer `castTag` to this.
pub fn cast(self: Value, comptime T: type) ?*T {
if (@hasField(T, "base_tag")) {
- return base.castTag(T.base_tag);
+ return self.castTag(T.base_tag);
}
if (self.tag_if_small_enough < Tag.no_payload_count) {
return null;
diff --git a/src/windows_sdk.cpp b/src/windows_sdk.cpp
index fd88374311..25620805cd 100644
--- a/src/windows_sdk.cpp
+++ b/src/windows_sdk.cpp
@@ -23,16 +23,19 @@ enum NativeArch {
NativeArchArm,
NativeArchi386,
NativeArchx86_64,
+ NativeArchAarch64,
};
#if defined(_M_ARM) || defined(__arm_)
static const NativeArch native_arch = NativeArchArm;
-#endif
-#if defined(_M_IX86) || defined(__i386__)
+#elif defined(_M_IX86) || defined(__i386__)
static const NativeArch native_arch = NativeArchi386;
-#endif
-#if defined(_M_X64) || defined(__x86_64__)
+#elif defined(_M_X64) || defined(__x86_64__)
static const NativeArch native_arch = NativeArchx86_64;
+#elif defined(_M_ARM64) || defined(__aarch64__)
+static const NativeArch native_arch = NativeArchAarch64;
+#else
+#error unsupported architecture
#endif
void zig_free_windows_sdk(struct ZigWindowsSDK *sdk) {
@@ -116,6 +119,9 @@ static ZigFindWindowsSdkError find_msvc_lib_dir(ZigWindowsSDKPrivate *priv) {
case NativeArchArm:
out_append_ptr += sprintf(out_append_ptr, "arm\\");
break;
+ case NativeArchAarch64:
+ out_append_ptr += sprintf(out_append_ptr, "arm64\\");
+ break;
}
sprintf(tmp_buf, "%s%s", output_path, "vcruntime.lib");
@@ -161,6 +167,9 @@ com_done:;
case NativeArchArm:
tmp_buf_append_ptr += sprintf(tmp_buf_append_ptr, "arm\\");
break;
+ case NativeArchAarch64:
+ tmp_buf_append_ptr += sprintf(tmp_buf_append_ptr, "arm64\\");
+ break;
}
char *output_path = strdup(tmp_buf);
@@ -204,6 +213,9 @@ static ZigFindWindowsSdkError find_10_version(ZigWindowsSDKPrivate *priv) {
case NativeArchArm:
option_name = "OptionId.DesktopCPParm";
break;
+ case NativeArchAarch64:
+ option_name = "OptionId.DesktopCPParm64";
+ break;
case NativeArchx86_64:
option_name = "OptionId.DesktopCPPx64";
break;
diff --git a/src/windows_sdk.zig b/src/windows_sdk.zig
index ca69ff4548..3ff53dc2e1 100644
--- a/src/windows_sdk.zig
+++ b/src/windows_sdk.zig
@@ -11,12 +11,17 @@ pub const ZigWindowsSDK = extern struct {
version81_len: usize,
msvc_lib_dir_ptr: ?[*]const u8,
msvc_lib_dir_len: usize,
+
+ pub const find = zig_find_windows_sdk;
+ pub const free = zig_free_windows_sdk;
+
+ pub const FindError = enum(c_int) {
+ None,
+ OutOfMemory,
+ NotFound,
+ PathTooLong,
+ };
+
+ extern fn zig_find_windows_sdk(out_sdk: **ZigWindowsSDK) FindError;
+ extern fn zig_free_windows_sdk(sdk: *ZigWindowsSDK) void;
};
-pub const ZigFindWindowsSdkError = enum(c_int) {
- None,
- OutOfMemory,
- NotFound,
- PathTooLong,
-};
-pub extern fn zig_find_windows_sdk(out_sdk: **ZigWindowsSDK) ZigFindWindowsSdkError;
-pub extern fn zig_free_windows_sdk(sdk: *ZigWindowsSDK) void;
diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp
index d0bc24ed1b..2089092c7c 100644
--- a/src/zig_llvm.cpp
+++ b/src/zig_llvm.cpp
@@ -488,6 +488,58 @@ LLVMValueRef ZigLLVMBuildSMin(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef R
return wrap(call_inst);
}
+LLVMValueRef ZigLLVMBuildSAddSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) {
+ CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::sadd_sat, unwrap(LHS), unwrap(RHS), nullptr, name);
+ return wrap(call_inst);
+}
+
+LLVMValueRef ZigLLVMBuildUAddSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) {
+ CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::uadd_sat, unwrap(LHS), unwrap(RHS), nullptr, name);
+ return wrap(call_inst);
+}
+
+LLVMValueRef ZigLLVMBuildSSubSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) {
+ CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::ssub_sat, unwrap(LHS), unwrap(RHS), nullptr, name);
+ return wrap(call_inst);
+}
+
+LLVMValueRef ZigLLVMBuildUSubSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) {
+ CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::usub_sat, unwrap(LHS), unwrap(RHS), nullptr, name);
+ return wrap(call_inst);
+}
+
+LLVMValueRef ZigLLVMBuildSMulFixSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) {
+ llvm::Type* types[1] = {
+ unwrap(LHS)->getType(),
+ };
+ // pass scale = 0 as third argument
+ llvm::Value* values[3] = {unwrap(LHS), unwrap(RHS), unwrap(B)->getInt32(0)};
+
+ CallInst *call_inst = unwrap(B)->CreateIntrinsic(Intrinsic::smul_fix_sat, types, values, nullptr, name);
+ return wrap(call_inst);
+}
+
+LLVMValueRef ZigLLVMBuildUMulFixSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) {
+ llvm::Type* types[1] = {
+ unwrap(LHS)->getType(),
+ };
+ // pass scale = 0 as third argument
+ llvm::Value* values[3] = {unwrap(LHS), unwrap(RHS), unwrap(B)->getInt32(0)};
+
+ CallInst *call_inst = unwrap(B)->CreateIntrinsic(Intrinsic::umul_fix_sat, types, values, nullptr, name);
+ return wrap(call_inst);
+}
+
+LLVMValueRef ZigLLVMBuildSShlSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) {
+ CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::sshl_sat, unwrap(LHS), unwrap(RHS), nullptr, name);
+ return wrap(call_inst);
+}
+
+LLVMValueRef ZigLLVMBuildUShlSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) {
+ CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::ushl_sat, unwrap(LHS), unwrap(RHS), nullptr, name);
+ return wrap(call_inst);
+}
+
void ZigLLVMFnSetSubprogram(LLVMValueRef fn, ZigLLVMDISubprogram *subprogram) {
assert( isa<Function>(unwrap(fn)) );
Function *unwrapped_function = reinterpret_cast<Function*>(unwrap(fn));
diff --git a/src/zig_llvm.h b/src/zig_llvm.h
index f49c2662c6..91407b7f12 100644
--- a/src/zig_llvm.h
+++ b/src/zig_llvm.h
@@ -136,6 +136,15 @@ ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildUMax(LLVMBuilderRef builder, LLVMValueRef
ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildUMin(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name);
ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildSMax(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name);
ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildSMin(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name);
+ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildUAddSat(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name);
+ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildSAddSat(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name);
+ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildUSubSat(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name);
+ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildSSubSat(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name);
+ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildSMulFixSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name);
+ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildUMulFixSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name);
+ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildUShlSat(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name);
+ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildSShlSat(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name);
+
ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildCmpXchg(LLVMBuilderRef builder, LLVMValueRef ptr, LLVMValueRef cmp,
LLVMValueRef new_val, LLVMAtomicOrdering success_ordering,