Date: Thu, 31 May 2018 10:56:59 -0400
Subject: use * for pointer type instead of &
See #770
To help automatically translate code, see the
zig-fmt-pointer-reform-2 branch.
This will convert all & into *. Due to the syntax
ambiguity (which is why we are making this change),
even address-of & will turn into *, so you'll have
to manually fix thes instances. You will be guaranteed
to get compile errors for them - expected 'type', found 'foo'
---
build.zig | 14 +-
doc/docgen.zig | 22 +-
doc/langref.html.in | 212 ++++-----
example/cat/main.zig | 2 +-
example/hello_world/hello_libc.zig | 2 +-
example/mix_o_files/base64.zig | 2 +-
example/mix_o_files/build.zig | 2 +-
example/shared_library/build.zig | 2 +-
src-self-hosted/arg.zig | 12 +-
src-self-hosted/errmsg.zig | 14 +-
src-self-hosted/introspect.zig | 6 +-
src-self-hosted/ir.zig | 2 +-
src-self-hosted/main.zig | 36 +-
src-self-hosted/module.zig | 30 +-
src-self-hosted/scope.zig | 2 +-
src-self-hosted/target.zig | 10 +-
src/all_types.hpp | 31 +-
src/analyze.cpp | 8 +-
src/ast_render.cpp | 31 +-
src/codegen.cpp | 2 +-
src/ir.cpp | 89 ++--
src/ir_print.cpp | 6 +-
src/parser.cpp | 41 +-
src/translate_c.cpp | 33 +-
std/array_list.zig | 46 +-
std/atomic/queue.zig | 32 +-
std/atomic/stack.zig | 36 +-
std/base64.zig | 12 +-
std/buf_map.zig | 18 +-
std/buf_set.zig | 18 +-
std/buffer.zig | 40 +-
std/build.zig | 278 ++++++------
std/c/darwin.zig | 8 +-
std/c/index.zig | 72 ++--
std/c/linux.zig | 4 +-
std/c/windows.zig | 2 +-
std/crypto/blake2.zig | 16 +-
std/crypto/md5.zig | 8 +-
std/crypto/sha1.zig | 8 +-
std/crypto/sha2.zig | 16 +-
std/crypto/sha3.zig | 6 +-
std/crypto/throughput_test.zig | 4 +-
std/cstr.zig | 26 +-
std/debug/failing_allocator.zig | 10 +-
std/debug/index.zig | 106 ++---
std/elf.zig | 18 +-
std/event.zig | 34 +-
std/fmt/errol/index.zig | 14 +-
std/fmt/index.zig | 8 +-
std/hash/adler.zig | 4 +-
std/hash/crc.zig | 8 +-
std/hash/fnv.zig | 4 +-
std/hash/siphash.zig | 8 +-
std/hash_map.zig | 36 +-
std/heap.zig | 82 ++--
std/io.zig | 80 ++--
std/json.zig | 36 +-
std/linked_list.zig | 32 +-
std/macho.zig | 16 +-
std/math/complex/atan.zig | 4 +-
std/math/complex/cosh.zig | 4 +-
std/math/complex/exp.zig | 4 +-
std/math/complex/index.zig | 14 +-
std/math/complex/ldexp.zig | 8 +-
std/math/complex/pow.zig | 2 +-
std/math/complex/sinh.zig | 4 +-
std/math/complex/sqrt.zig | 4 +-
std/math/complex/tanh.zig | 4 +-
std/math/hypot.zig | 2 +-
std/math/index.zig | 4 +-
std/mem.zig | 48 +--
std/net.zig | 8 +-
std/os/child_process.zig | 62 +--
std/os/darwin.zig | 64 +--
std/os/file.zig | 32 +-
std/os/get_user_id.zig | 8 +-
std/os/index.zig | 164 +++----
std/os/linux/index.zig | 174 ++++----
std/os/linux/vdso.zig | 36 +-
std/os/linux/x86_64.zig | 8 +-
std/os/path.zig | 22 +-
std/os/test.zig | 2 +-
std/os/time.zig | 6 +-
std/os/windows/index.zig | 96 ++---
std/os/windows/util.zig | 12 +-
std/os/zen.zig | 20 +-
std/rand/index.zig | 46 +-
std/rand/ziggurat.zig | 10 +-
std/segmented_list.zig | 54 +--
std/sort.zig | 54 +--
std/special/bootstrap.zig | 20 +-
std/special/build_file_template.zig | 4 +-
std/special/build_runner.zig | 6 +-
std/special/builtin.zig | 8 +-
std/special/compiler_rt/index.zig | 4 +-
std/special/compiler_rt/udivmod.zig | 18 +-
std/special/compiler_rt/udivmoddi4.zig | 2 +-
std/special/compiler_rt/udivmodti4.zig | 4 +-
std/special/compiler_rt/udivti3.zig | 2 +-
std/special/compiler_rt/umodti3.zig | 2 +-
std/special/panic.zig | 2 +-
std/unicode.zig | 6 +-
std/zig/ast.zig | 570 ++++++++++++-------------
std/zig/bench.zig | 6 +-
std/zig/parse.zig | 156 +++----
std/zig/parser_test.zig | 2 +-
std/zig/render.zig | 56 +--
std/zig/tokenizer.zig | 8 +-
test/assemble_and_link.zig | 2 +-
test/build_examples.zig | 2 +-
test/cases/align.zig | 56 +--
test/cases/atomics.zig | 12 +-
test/cases/bugs/655.zig | 4 +-
test/cases/bugs/828.zig | 6 +-
test/cases/bugs/920.zig | 6 +-
test/cases/cast.zig | 42 +-
test/cases/const_slice_child.zig | 6 +-
test/cases/coroutines.zig | 6 +-
test/cases/enum.zig | 10 +-
test/cases/enum_with_members.zig | 2 +-
test/cases/eval.zig | 12 +-
test/cases/field_parent_ptr.zig | 4 +-
test/cases/fn_in_struct_in_comptime.zig | 6 +-
test/cases/generics.zig | 8 +-
test/cases/incomplete_struct_param_tld.zig | 4 +-
test/cases/math.zig | 18 +-
test/cases/misc.zig | 48 +--
test/cases/null.zig | 2 +-
test/cases/reflection.zig | 2 +-
test/cases/slice.zig | 2 +-
test/cases/struct.zig | 28 +-
test/cases/struct_contains_null_ptr_itself.zig | 4 +-
test/cases/switch.zig | 2 +-
test/cases/this.zig | 2 +-
test/cases/type_info.zig | 16 +-
test/cases/undefined.zig | 4 +-
test/cases/union.zig | 16 +-
test/compare_output.zig | 20 +-
test/compile_errors.zig | 122 +++---
test/gen_h.zig | 2 +-
test/runtime_safety.zig | 2 +-
test/standalone/brace_expansion/build.zig | 2 +-
test/standalone/brace_expansion/main.zig | 8 +-
test/standalone/issue_339/build.zig | 2 +-
test/standalone/issue_339/test.zig | 2 +-
test/standalone/issue_794/build.zig | 2 +-
test/standalone/pkg_import/build.zig | 2 +-
test/standalone/use_alias/build.zig | 2 +-
test/tests.zig | 136 +++---
test/translate_c.zig | 58 +--
150 files changed, 2162 insertions(+), 2143 deletions(-)
(limited to 'src-self-hosted/module.zig')
diff --git a/build.zig b/build.zig
index a4e3dbcdfa..109a799ac9 100644
--- a/build.zig
+++ b/build.zig
@@ -10,7 +10,7 @@ const ArrayList = std.ArrayList;
const Buffer = std.Buffer;
const io = std.io;
-pub fn build(b: &Builder) !void {
+pub fn build(b: *Builder) !void {
const mode = b.standardReleaseOptions();
var docgen_exe = b.addExecutable("docgen", "doc/docgen.zig");
@@ -132,7 +132,7 @@ pub fn build(b: &Builder) !void {
test_step.dependOn(tests.addGenHTests(b, test_filter));
}
-fn dependOnLib(lib_exe_obj: &std.build.LibExeObjStep, dep: &const LibraryDep) void {
+fn dependOnLib(lib_exe_obj: *std.build.LibExeObjStep, dep: *const LibraryDep) void {
for (dep.libdirs.toSliceConst()) |lib_dir| {
lib_exe_obj.addLibPath(lib_dir);
}
@@ -147,7 +147,7 @@ fn dependOnLib(lib_exe_obj: &std.build.LibExeObjStep, dep: &const LibraryDep) vo
}
}
-fn addCppLib(b: &Builder, lib_exe_obj: &std.build.LibExeObjStep, cmake_binary_dir: []const u8, lib_name: []const u8) void {
+fn addCppLib(b: *Builder, lib_exe_obj: *std.build.LibExeObjStep, cmake_binary_dir: []const u8, lib_name: []const u8) void {
const lib_prefix = if (lib_exe_obj.target.isWindows()) "" else "lib";
lib_exe_obj.addObjectFile(os.path.join(b.allocator, cmake_binary_dir, "zig_cpp", b.fmt("{}{}{}", lib_prefix, lib_name, lib_exe_obj.target.libFileExt())) catch unreachable);
}
@@ -159,7 +159,7 @@ const LibraryDep = struct {
includes: ArrayList([]const u8),
};
-fn findLLVM(b: &Builder, llvm_config_exe: []const u8) !LibraryDep {
+fn findLLVM(b: *Builder, llvm_config_exe: []const u8) !LibraryDep {
const libs_output = try b.exec([][]const u8{
llvm_config_exe,
"--libs",
@@ -217,7 +217,7 @@ fn findLLVM(b: &Builder, llvm_config_exe: []const u8) !LibraryDep {
return result;
}
-pub fn installStdLib(b: &Builder, stdlib_files: []const u8) void {
+pub fn installStdLib(b: *Builder, stdlib_files: []const u8) void {
var it = mem.split(stdlib_files, ";");
while (it.next()) |stdlib_file| {
const src_path = os.path.join(b.allocator, "std", stdlib_file) catch unreachable;
@@ -226,7 +226,7 @@ pub fn installStdLib(b: &Builder, stdlib_files: []const u8) void {
}
}
-pub fn installCHeaders(b: &Builder, c_header_files: []const u8) void {
+pub fn installCHeaders(b: *Builder, c_header_files: []const u8) void {
var it = mem.split(c_header_files, ";");
while (it.next()) |c_header_file| {
const src_path = os.path.join(b.allocator, "c_headers", c_header_file) catch unreachable;
@@ -235,7 +235,7 @@ pub fn installCHeaders(b: &Builder, c_header_files: []const u8) void {
}
}
-fn nextValue(index: &usize, build_info: []const u8) []const u8 {
+fn nextValue(index: *usize, build_info: []const u8) []const u8 {
const start = index.*;
while (true) : (index.* += 1) {
switch (build_info[index.*]) {
diff --git a/doc/docgen.zig b/doc/docgen.zig
index 7dc444f127..fed4bb8eba 100644
--- a/doc/docgen.zig
+++ b/doc/docgen.zig
@@ -104,7 +104,7 @@ const Tokenizer = struct {
};
}
- fn next(self: &Tokenizer) Token {
+ fn next(self: *Tokenizer) Token {
var result = Token{
.id = Token.Id.Eof,
.start = self.index,
@@ -196,7 +196,7 @@ const Tokenizer = struct {
line_end: usize,
};
- fn getTokenLocation(self: &Tokenizer, token: &const Token) Location {
+ fn getTokenLocation(self: *Tokenizer, token: *const Token) Location {
var loc = Location{
.line = 0,
.column = 0,
@@ -221,7 +221,7 @@ const Tokenizer = struct {
}
};
-fn parseError(tokenizer: &Tokenizer, token: &const Token, comptime fmt: []const u8, args: ...) error {
+fn parseError(tokenizer: *Tokenizer, token: *const Token, comptime fmt: []const u8, args: ...) error {
const loc = tokenizer.getTokenLocation(token);
warn("{}:{}:{}: error: " ++ fmt ++ "\n", tokenizer.source_file_name, loc.line + 1, loc.column + 1, args);
if (loc.line_start <= loc.line_end) {
@@ -244,13 +244,13 @@ fn parseError(tokenizer: &Tokenizer, token: &const Token, comptime fmt: []const
return error.ParseError;
}
-fn assertToken(tokenizer: &Tokenizer, token: &const Token, id: Token.Id) !void {
+fn assertToken(tokenizer: *Tokenizer, token: *const Token, id: Token.Id) !void {
if (token.id != id) {
return parseError(tokenizer, token, "expected {}, found {}", @tagName(id), @tagName(token.id));
}
}
-fn eatToken(tokenizer: &Tokenizer, id: Token.Id) !Token {
+fn eatToken(tokenizer: *Tokenizer, id: Token.Id) !Token {
const token = tokenizer.next();
try assertToken(tokenizer, token, id);
return token;
@@ -317,7 +317,7 @@ const Action = enum {
Close,
};
-fn genToc(allocator: &mem.Allocator, tokenizer: &Tokenizer) !Toc {
+fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc {
var urls = std.HashMap([]const u8, Token, mem.hash_slice_u8, mem.eql_slice_u8).init(allocator);
errdefer urls.deinit();
@@ -546,7 +546,7 @@ fn genToc(allocator: &mem.Allocator, tokenizer: &Tokenizer) !Toc {
};
}
-fn urlize(allocator: &mem.Allocator, input: []const u8) ![]u8 {
+fn urlize(allocator: *mem.Allocator, input: []const u8) ![]u8 {
var buf = try std.Buffer.initSize(allocator, 0);
defer buf.deinit();
@@ -566,7 +566,7 @@ fn urlize(allocator: &mem.Allocator, input: []const u8) ![]u8 {
return buf.toOwnedSlice();
}
-fn escapeHtml(allocator: &mem.Allocator, input: []const u8) ![]u8 {
+fn escapeHtml(allocator: *mem.Allocator, input: []const u8) ![]u8 {
var buf = try std.Buffer.initSize(allocator, 0);
defer buf.deinit();
@@ -608,7 +608,7 @@ test "term color" {
assert(mem.eql(u8, result, "AgreenB"));
}
-fn termColor(allocator: &mem.Allocator, input: []const u8) ![]u8 {
+fn termColor(allocator: *mem.Allocator, input: []const u8) ![]u8 {
var buf = try std.Buffer.initSize(allocator, 0);
defer buf.deinit();
@@ -688,7 +688,7 @@ fn termColor(allocator: &mem.Allocator, input: []const u8) ![]u8 {
return buf.toOwnedSlice();
}
-fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var, zig_exe: []const u8) !void {
+fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: var, zig_exe: []const u8) !void {
var code_progress_index: usize = 0;
for (toc.nodes) |node| {
switch (node) {
@@ -1036,7 +1036,7 @@ fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var
}
}
-fn exec(allocator: &mem.Allocator, args: []const []const u8) !os.ChildProcess.ExecResult {
+fn exec(allocator: *mem.Allocator, args: []const []const u8) !os.ChildProcess.ExecResult {
const result = try os.ChildProcess.exec(allocator, args, null, null, max_doc_file_size);
switch (result.term) {
os.ChildProcess.Term.Exited => |exit_code| {
diff --git a/doc/langref.html.in b/doc/langref.html.in
index d63c38d0fe..3bd1124e00 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -458,7 +458,7 @@ test "string literals" {
// A C string literal is a null terminated pointer.
const null_terminated_bytes = c"hello";
- assert(@typeOf(null_terminated_bytes) == &const u8);
+ assert(@typeOf(null_terminated_bytes) == *const u8);
assert(null_terminated_bytes[5] == 0);
}
{#code_end#}
@@ -547,7 +547,7 @@ const c_string_literal =
;
{#code_end#}
- In this example the variable c_string_literal has type &const char and
+ In this example the variable c_string_literal has type *const char and
has a terminating null byte.
{#see_also|@embedFile#}
@@ -1403,12 +1403,12 @@ test "address of syntax" {
assert(x_ptr.* == 1234);
// When you get the address of a const variable, you get a const pointer.
- assert(@typeOf(x_ptr) == &const i32);
+ assert(@typeOf(x_ptr) == *const i32);
// If you want to mutate the value, you'd need an address of a mutable variable:
var y: i32 = 5678;
const y_ptr = &y;
- assert(@typeOf(y_ptr) == &i32);
+ assert(@typeOf(y_ptr) == *i32);
y_ptr.* += 1;
assert(y_ptr.* == 5679);
}
@@ -1455,7 +1455,7 @@ comptime {
test "@ptrToInt and @intToPtr" {
// To convert an integer address into a pointer, use @intToPtr:
- const ptr = @intToPtr(&i32, 0xdeadbeef);
+ const ptr = @intToPtr(*i32, 0xdeadbeef);
// To convert a pointer to an integer, use @ptrToInt:
const addr = @ptrToInt(ptr);
@@ -1467,7 +1467,7 @@ test "@ptrToInt and @intToPtr" {
comptime {
// Zig is able to do this at compile-time, as long as
// ptr is never dereferenced.
- const ptr = @intToPtr(&i32, 0xdeadbeef);
+ const ptr = @intToPtr(*i32, 0xdeadbeef);
const addr = @ptrToInt(ptr);
assert(@typeOf(addr) == usize);
assert(addr == 0xdeadbeef);
@@ -1477,17 +1477,17 @@ test "volatile" {
// In Zig, loads and stores are assumed to not have side effects.
// If a given load or store should have side effects, such as
// Memory Mapped Input/Output (MMIO), use `volatile`:
- const mmio_ptr = @intToPtr(&volatile u8, 0x12345678);
+ const mmio_ptr = @intToPtr(*volatile u8, 0x12345678);
// Now loads and stores with mmio_ptr are guaranteed to all happen
// and in the same order as in source code.
- assert(@typeOf(mmio_ptr) == &volatile u8);
+ assert(@typeOf(mmio_ptr) == *volatile u8);
}
test "nullable pointers" {
// Pointers cannot be null. If you want a null pointer, use the nullable
// prefix `?` to make the pointer type nullable.
- var ptr: ?&i32 = null;
+ var ptr: ?*i32 = null;
var x: i32 = 1;
ptr = &x;
@@ -1496,7 +1496,7 @@ test "nullable pointers" {
// Nullable pointers are the same size as normal pointers, because pointer
// value 0 is used as the null value.
- assert(@sizeOf(?&i32) == @sizeOf(&i32));
+ assert(@sizeOf(?*i32) == @sizeOf(*i32));
}
test "pointer casting" {
@@ -1504,7 +1504,7 @@ test "pointer casting" {
// operation that Zig cannot protect you against. Use @ptrCast only when other
// conversions are not possible.
const bytes align(@alignOf(u32)) = []u8{0x12, 0x12, 0x12, 0x12};
- const u32_ptr = @ptrCast(&const u32, &bytes[0]);
+ const u32_ptr = @ptrCast(*const u32, &bytes[0]);
assert(u32_ptr.* == 0x12121212);
// Even this example is contrived - there are better ways to do the above than
@@ -1518,7 +1518,7 @@ test "pointer casting" {
test "pointer child type" {
// pointer types have a `child` field which tells you the type they point to.
- assert((&u32).Child == u32);
+ assert((*u32).Child == u32);
}
{#code_end#}
{#header_open|Alignment#}
@@ -1543,15 +1543,15 @@ const builtin = @import("builtin");
test "variable alignment" {
var x: i32 = 1234;
const align_of_i32 = @alignOf(@typeOf(x));
- assert(@typeOf(&x) == &i32);
- assert(&i32 == &align(align_of_i32) i32);
+ assert(@typeOf(&x) == *i32);
+ assert(*i32 == *align(align_of_i32) i32);
if (builtin.arch == builtin.Arch.x86_64) {
- assert((&i32).alignment == 4);
+ assert((*i32).alignment == 4);
}
}
{#code_end#}
- In the same way that a &i32 can be implicitly cast to a
- &const i32, a pointer with a larger alignment can be implicitly
+
In the same way that a *i32 can be implicitly cast to a
+ *const i32, a pointer with a larger alignment can be implicitly
cast to a pointer with a smaller alignment, but not vice versa.
@@ -1565,7 +1565,7 @@ var foo: u8 align(4) = 100;
test "global variable alignment" {
assert(@typeOf(&foo).alignment == 4);
- assert(@typeOf(&foo) == &align(4) u8);
+ assert(@typeOf(&foo) == *align(4) u8);
const slice = (&foo)[0..1];
assert(@typeOf(slice) == []align(4) u8);
}
@@ -1610,7 +1610,7 @@ fn foo(bytes: []u8) u32 {
u8 can alias any memory.
As an example, this code produces undefined behavior:
- @ptrCast(&u32, f32(12.34)).*
+ @ptrCast(*u32, f32(12.34)).*
Instead, use {#link|@bitCast#}:
@bitCast(u32, f32(12.34))
As an added benefit, the @bitcast version works at compile-time.
@@ -1736,7 +1736,7 @@ const Vec3 = struct {
};
}
- pub fn dot(self: &const Vec3, other: &const Vec3) f32 {
+ pub fn dot(self: *const Vec3, other: *const Vec3) f32 {
return self.x * other.x + self.y * other.y + self.z * other.z;
}
};
@@ -1768,7 +1768,7 @@ test "struct namespaced variable" {
// struct field order is determined by the compiler for optimal performance.
// however, you can still calculate a struct base pointer given a field pointer:
-fn setYBasedOnX(x: &f32, y: f32) void {
+fn setYBasedOnX(x: *f32, y: f32) void {
const point = @fieldParentPtr(Point, "x", x);
point.y = y;
}
@@ -1786,13 +1786,13 @@ test "field parent pointer" {
fn LinkedList(comptime T: type) type {
return struct {
pub const Node = struct {
- prev: ?&Node,
- next: ?&Node,
+ prev: ?*Node,
+ next: ?*Node,
data: T,
};
- first: ?&Node,
- last: ?&Node,
+ first: ?*Node,
+ last: ?*Node,
len: usize,
};
}
@@ -2039,7 +2039,7 @@ const Variant = union(enum) {
Int: i32,
Bool: bool,
- fn truthy(self: &const Variant) bool {
+ fn truthy(self: *const Variant) bool {
return switch (self.*) {
Variant.Int => |x_int| x_int != 0,
Variant.Bool => |x_bool| x_bool,
@@ -2786,7 +2786,7 @@ test "pass aggregate type by value to function" {
}
{#code_end#}
- Instead, one must use &const. Zig allows implicitly casting something
+ Instead, one must use *const. Zig allows implicitly casting something
to a const pointer to it:
{#code_begin|test#}
@@ -2794,7 +2794,7 @@ const Foo = struct {
x: i32,
};
-fn bar(foo: &const Foo) void {}
+fn bar(foo: *const Foo) void {}
test "implicitly cast to const pointer" {
bar(Foo {.x = 12,});
@@ -3208,16 +3208,16 @@ struct Foo *do_a_thing(void) {
Zig code
{#code_begin|syntax#}
// malloc prototype included for reference
-extern fn malloc(size: size_t) ?&u8;
+extern fn malloc(size: size_t) ?*u8;
-fn doAThing() ?&Foo {
+fn doAThing() ?*Foo {
const ptr = malloc(1234) ?? return null;
// ...
}
{#code_end#}
Here, Zig is at least as convenient, if not more, than C. And, the type of "ptr"
- is &u8 not ?&u8. The ?? operator
+ is *u8 not ?*u8. The ?? operator
unwrapped the nullable type and therefore ptr is guaranteed to be non-null everywhere
it is used in the function.
@@ -3237,7 +3237,7 @@ fn doAThing() ?&Foo {
In Zig you can accomplish the same thing:
{#code_begin|syntax#}
-fn doAThing(nullable_foo: ?&Foo) void {
+fn doAThing(nullable_foo: ?*Foo) void {
// do some stuff
if (nullable_foo) |foo| {
@@ -3713,7 +3713,7 @@ fn List(comptime T: type) type {
{#code_begin|syntax#}
const Node = struct {
- next: &Node,
+ next: *Node,
name: []u8,
};
{#code_end#}
@@ -3745,7 +3745,7 @@ pub fn main() void {
{#code_begin|syntax#}
/// Calls print and then flushes the buffer.
-pub fn printf(self: &OutStream, comptime format: []const u8, args: ...) error!void {
+pub fn printf(self: *OutStream, comptime format: []const u8, args: ...) error!void {
const State = enum {
Start,
OpenBrace,
@@ -3817,7 +3817,7 @@ pub fn printf(self: &OutStream, comptime format: []const u8, args: ...) error!vo
and emits a function that actually looks like this:
{#code_begin|syntax#}
-pub fn printf(self: &OutStream, arg0: i32, arg1: []const u8) !void {
+pub fn printf(self: *OutStream, arg0: i32, arg1: []const u8) !void {
try self.write("here is a string: '");
try self.printValue(arg0);
try self.write("' here is a number: ");
@@ -3831,7 +3831,7 @@ pub fn printf(self: &OutStream, arg0: i32, arg1: []const u8) !void {
on the type:
{#code_begin|syntax#}
-pub fn printValue(self: &OutStream, value: var) !void {
+pub fn printValue(self: *OutStream, value: var) !void {
const T = @typeOf(value);
if (@isInteger(T)) {
return self.printInt(T, value);
@@ -3911,7 +3911,7 @@ pub fn main() void {
at compile time.
{#header_open|@addWithOverflow#}
- @addWithOverflow(comptime T: type, a: T, b: T, result: &T) -> bool
+ @addWithOverflow(comptime T: type, a: T, b: T, result: *T) bool
Performs result.* = a + b. If overflow or underflow occurs,
stores the overflowed bits in result and returns true.
@@ -3919,7 +3919,7 @@ pub fn main() void {
{#header_close#}
{#header_open|@ArgType#}
- @ArgType(comptime T: type, comptime n: usize) -> type
+ @ArgType(comptime T: type, comptime n: usize) type
This builtin function takes a function type and returns the type of the parameter at index n.
@@ -3931,7 +3931,7 @@ pub fn main() void {
{#header_close#}
{#header_open|@atomicLoad#}
- @atomicLoad(comptime T: type, ptr: &const T, comptime ordering: builtin.AtomicOrder) -> T
+ @atomicLoad(comptime T: type, ptr: *const T, comptime ordering: builtin.AtomicOrder) T
This builtin function atomically dereferences a pointer and returns the value.
@@ -3950,7 +3950,7 @@ pub fn main() void {
{#header_close#}
{#header_open|@atomicRmw#}
- @atomicRmw(comptime T: type, ptr: &T, comptime op: builtin.AtomicRmwOp, operand: T, comptime ordering: builtin.AtomicOrder) -> T
+ @atomicRmw(comptime T: type, ptr: *T, comptime op: builtin.AtomicRmwOp, operand: T, comptime ordering: builtin.AtomicOrder) T
This builtin function atomically modifies memory and then returns the previous value.
@@ -3969,7 +3969,7 @@ pub fn main() void {
{#header_close#}
{#header_open|@bitCast#}
- @bitCast(comptime DestType: type, value: var) -> DestType
+ @bitCast(comptime DestType: type, value: var) DestType
Converts a value of one type to another type.
@@ -4002,9 +4002,9 @@ pub fn main() void {
{#header_close#}
{#header_open|@alignCast#}
- @alignCast(comptime alignment: u29, ptr: var) -> var
+ @alignCast(comptime alignment: u29, ptr: var) var
- ptr can be &T, fn(), ?&T,
+ ptr can be *T, fn(), ?*T,
?fn(), or []T. It returns the same type as ptr
except with the alignment adjusted to the new value.
@@ -4013,7 +4013,7 @@ pub fn main() void {
{#header_close#}
{#header_open|@alignOf#}
- @alignOf(comptime T: type) -> (number literal)
+ @alignOf(comptime T: type) (number literal)
This function returns the number of bytes that this type should be aligned to
for the current target to match the C ABI. When the child type of a pointer has
@@ -4021,7 +4021,7 @@ pub fn main() void {
const assert = @import("std").debug.assert;
comptime {
- assert(&u32 == &align(@alignOf(u32)) u32);
+ assert(*u32 == *align(@alignOf(u32)) u32);
}
The result is a target-specific compile time constant. It is guaranteed to be
@@ -4049,7 +4049,7 @@ comptime {
{#see_also|Import from C Header File|@cInclude|@cImport|@cUndef|void#}
{#header_close#}
{#header_open|@cImport#}
-
@cImport(expression) -> (namespace)
+ @cImport(expression) (namespace)
This function parses C code and imports the functions, types, variables, and
compatible macro definitions into the result namespace.
@@ -4095,13 +4095,13 @@ comptime {
{#see_also|Import from C Header File|@cImport|@cDefine|@cInclude#}
{#header_close#}
{#header_open|@canImplicitCast#}
-
@canImplicitCast(comptime T: type, value) -> bool
+ @canImplicitCast(comptime T: type, value) bool
Returns whether a value can be implicitly casted to a given type.
{#header_close#}
{#header_open|@clz#}
- @clz(x: T) -> U
+ @clz(x: T) U
This function counts the number of leading zeroes in x which is an integer
type T.
@@ -4116,13 +4116,13 @@ comptime {
{#header_close#}
{#header_open|@cmpxchgStrong#}
-
@cmpxchgStrong(comptime T: type, ptr: &T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) -> ?T
+ @cmpxchgStrong(comptime T: type, ptr: *T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) ?T
This function performs a strong atomic compare exchange operation. It's the equivalent of this code,
except atomic:
{#code_begin|syntax#}
-fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: &T, expected_value: T, new_value: T) ?T {
+fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_value: T) ?T {
const old_value = ptr.*;
if (old_value == expected_value) {
ptr.* = new_value;
@@ -4143,13 +4143,13 @@ fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: &T, expected_value: T, new_v
{#see_also|Compile Variables|cmpxchgWeak#}
{#header_close#}
{#header_open|@cmpxchgWeak#}
- @cmpxchgWeak(comptime T: type, ptr: &T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) -> ?T
+ @cmpxchgWeak(comptime T: type, ptr: *T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) ?T
This function performs a weak atomic compare exchange operation. It's the equivalent of this code,
except atomic:
{#code_begin|syntax#}
-fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: &T, expected_value: T, new_value: T) ?T {
+fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_value: T) ?T {
const old_value = ptr.*;
if (old_value == expected_value and usuallyTrueButSometimesFalse()) {
ptr.* = new_value;
@@ -4237,7 +4237,7 @@ test "main" {
{#code_end#}
{#header_close#}
{#header_open|@ctz#}
- @ctz(x: T) -> U
+ @ctz(x: T) U
This function counts the number of trailing zeroes in x which is an integer
type T.
@@ -4251,7 +4251,7 @@ test "main" {
{#header_close#}
{#header_open|@divExact#}
- @divExact(numerator: T, denominator: T) -> T
+ @divExact(numerator: T, denominator: T) T
Exact division. Caller guarantees denominator != 0 and
@divTrunc(numerator, denominator) * denominator == numerator.
@@ -4264,7 +4264,7 @@ test "main" {
{#see_also|@divTrunc|@divFloor#}
{#header_close#}
{#header_open|@divFloor#}
-
@divFloor(numerator: T, denominator: T) -> T
+ @divFloor(numerator: T, denominator: T) T
Floored division. Rounds toward negative infinity. For unsigned integers it is
the same as numerator / denominator. Caller guarantees denominator != 0 and
@@ -4278,7 +4278,7 @@ test "main" {
{#see_also|@divTrunc|@divExact#}
{#header_close#}
{#header_open|@divTrunc#}
-
@divTrunc(numerator: T, denominator: T) -> T
+ @divTrunc(numerator: T, denominator: T) T
Truncated division. Rounds toward zero. For unsigned integers it is
the same as numerator / denominator. Caller guarantees denominator != 0 and
@@ -4292,7 +4292,7 @@ test "main" {
{#see_also|@divFloor|@divExact#}
{#header_close#}
{#header_open|@embedFile#}
-
@embedFile(comptime path: []const u8) -> [X]u8
+ @embedFile(comptime path: []const u8) [X]u8
This function returns a compile time constant fixed-size array with length
equal to the byte count of the file given by path. The contents of the array
@@ -4304,19 +4304,19 @@ test "main" {
{#see_also|@import#}
{#header_close#}
{#header_open|@export#}
-
@export(comptime name: []const u8, target: var, linkage: builtin.GlobalLinkage) -> []const u8
+ @export(comptime name: []const u8, target: var, linkage: builtin.GlobalLinkage) []const u8
Creates a symbol in the output object file.
{#header_close#}
{#header_open|@tagName#}
- @tagName(value: var) -> []const u8
+ @tagName(value: var) []const u8
Converts an enum value or union value to a slice of bytes representing the name.
{#header_close#}
{#header_open|@TagType#}
- @TagType(T: type) -> type
+ @TagType(T: type) type
For an enum, returns the integer type that is used to store the enumeration value.
@@ -4325,7 +4325,7 @@ test "main" {
{#header_close#}
{#header_open|@errorName#}
- @errorName(err: error) -> []u8
+ @errorName(err: error) []u8
This function returns the string representation of an error. If an error
declaration is:
@@ -4341,7 +4341,7 @@ test "main" {
{#header_close#}
{#header_open|@errorReturnTrace#}
- @errorReturnTrace() -> ?&builtin.StackTrace
+ @errorReturnTrace() ?*builtin.StackTrace
If the binary is built with error return tracing, and this function is invoked in a
function that calls a function with an error or error union return type, returns a
@@ -4360,7 +4360,7 @@ test "main" {
{#header_close#}
{#header_open|@fieldParentPtr#}
@fieldParentPtr(comptime ParentType: type, comptime field_name: []const u8,
- field_ptr: &T) -> &ParentType
+ field_ptr: *T) *ParentType
Given a pointer to a field, returns the base pointer of a struct.
@@ -4380,7 +4380,7 @@ test "main" {
{#header_close#}
{#header_open|@import#}
- @import(comptime path: []u8) -> (namespace)
+ @import(comptime path: []u8) (namespace)
This function finds a zig file corresponding to path and imports all the
public top level declarations into the resulting namespace.
@@ -4400,7 +4400,7 @@ test "main" {
{#see_also|Compile Variables|@embedFile#}
{#header_close#}
{#header_open|@inlineCall#}
-
@inlineCall(function: X, args: ...) -> Y
+ @inlineCall(function: X, args: ...) Y
This calls a function, in the same way that invoking an expression with parentheses does:
@@ -4420,19 +4420,19 @@ fn add(a: i32, b: i32) i32 { return a + b; }
{#see_also|@noInlineCall#}
{#header_close#}
{#header_open|@intToPtr#}
- @intToPtr(comptime DestType: type, int: usize) -> DestType
+ @intToPtr(comptime DestType: type, int: usize) DestType
Converts an integer to a pointer. To convert the other way, use {#link|@ptrToInt#}.
{#header_close#}
{#header_open|@IntType#}
- @IntType(comptime is_signed: bool, comptime bit_count: u8) -> type
+ @IntType(comptime is_signed: bool, comptime bit_count: u8) type
This function returns an integer type with the given signness and bit count.
{#header_close#}
{#header_open|@maxValue#}
- @maxValue(comptime T: type) -> (number literal)
+ @maxValue(comptime T: type) (number literal)
This function returns the maximum value of the integer type T.
@@ -4441,7 +4441,7 @@ fn add(a: i32, b: i32) i32 { return a + b; }
{#header_close#}
{#header_open|@memberCount#}
- @memberCount(comptime T: type) -> (number literal)
+ @memberCount(comptime T: type) (number literal)
This function returns the number of members in a struct, enum, or union type.
@@ -4453,7 +4453,7 @@ fn add(a: i32, b: i32) i32 { return a + b; }
{#header_close#}
{#header_open|@memberName#}
- @memberName(comptime T: type, comptime index: usize) -> [N]u8
+ @memberName(comptime T: type, comptime index: usize) [N]u8
Returns the field name of a struct, union, or enum.
The result is a compile time constant.
@@ -4463,15 +4463,15 @@ fn add(a: i32, b: i32) i32 { return a + b; }
{#header_close#}
{#header_open|@field#}
- @field(lhs: var, comptime field_name: []const u8) -> (field)
+ @field(lhs: var, comptime field_name: []const u8) (field)
Preforms field access equivalent to lhs.->field_name-<.
{#header_close#}
{#header_open|@memberType#}
- @memberType(comptime T: type, comptime index: usize) -> type
+ @memberType(comptime T: type, comptime index: usize) type
Returns the field type of a struct or union.
{#header_close#}
{#header_open|@memcpy#}
- @memcpy(noalias dest: &u8, noalias source: &const u8, byte_count: usize)
+ @memcpy(noalias dest: *u8, noalias source: *const u8, byte_count: usize)
This function copies bytes from one region of memory to another. dest and
source are both pointers and must not overlap.
@@ -4489,7 +4489,7 @@ fn add(a: i32, b: i32) i32 { return a + b; }
mem.copy(u8, dest[0...byte_count], source[0...byte_count]);
{#header_close#}
{#header_open|@memset#}
-
@memset(dest: &u8, c: u8, byte_count: usize)
+ @memset(dest: *u8, c: u8, byte_count: usize)
This function sets a region of memory to c. dest is a pointer.
@@ -4506,7 +4506,7 @@ mem.copy(u8, dest[0...byte_count], source[0...byte_count]);
mem.set(u8, dest, c);
{#header_close#}
{#header_open|@minValue#}
- @minValue(comptime T: type) -> (number literal)
+ @minValue(comptime T: type) (number literal)
This function returns the minimum value of the integer type T.
@@ -4515,7 +4515,7 @@ mem.set(u8, dest, c);
{#header_close#}
{#header_open|@mod#}
- @mod(numerator: T, denominator: T) -> T
+ @mod(numerator: T, denominator: T) T
Modulus division. For unsigned integers this is the same as
numerator % denominator. Caller guarantees denominator > 0.
@@ -4528,7 +4528,7 @@ mem.set(u8, dest, c);
{#see_also|@rem#}
{#header_close#}
{#header_open|@mulWithOverflow#}
-
@mulWithOverflow(comptime T: type, a: T, b: T, result: &T) -> bool
+ @mulWithOverflow(comptime T: type, a: T, b: T, result: *T) bool
Performs result.* = a * b. If overflow or underflow occurs,
stores the overflowed bits in result and returns true.
@@ -4536,7 +4536,7 @@ mem.set(u8, dest, c);
{#header_close#}
{#header_open|@newStackCall#}
- @newStackCall(new_stack: []u8, function: var, args: ...) -> var
+ @newStackCall(new_stack: []u8, function: var, args: ...) var
This calls a function, in the same way that invoking an expression with parentheses does. However,
instead of using the same stack as the caller, the function uses the stack provided in the new_stack
@@ -4572,7 +4572,7 @@ fn targetFunction(x: i32) usize {
{#code_end#}
{#header_close#}
{#header_open|@noInlineCall#}
-
@noInlineCall(function: var, args: ...) -> var
+ @noInlineCall(function: var, args: ...) var
This calls a function, in the same way that invoking an expression with parentheses does:
@@ -4594,13 +4594,13 @@ fn add(a: i32, b: i32) i32 {
{#see_also|@inlineCall#}
{#header_close#}
{#header_open|@offsetOf#}
- @offsetOf(comptime T: type, comptime field_name: [] const u8) -> (number literal)
+ @offsetOf(comptime T: type, comptime field_name: [] const u8) (number literal)
This function returns the byte offset of a field relative to its containing struct.
{#header_close#}
{#header_open|@OpaqueType#}
- @OpaqueType() -> type
+ @OpaqueType() type
Creates a new type with an unknown size and alignment.
@@ -4608,12 +4608,12 @@ fn add(a: i32, b: i32) i32 {
This is typically used for type safety when interacting with C code that does not expose struct details.
Example:
- {#code_begin|test_err|expected type '&Derp', found '&Wat'#}
+ {#code_begin|test_err|expected type '*Derp', found '*Wat'#}
const Derp = @OpaqueType();
const Wat = @OpaqueType();
-extern fn bar(d: &Derp) void;
-export fn foo(w: &Wat) void {
+extern fn bar(d: *Derp) void;
+export fn foo(w: *Wat) void {
bar(w);
}
@@ -4623,7 +4623,7 @@ test "call foo" {
{#code_end#}
{#header_close#}
{#header_open|@panic#}
- @panic(message: []const u8) -> noreturn
+ @panic(message: []const u8) noreturn
Invokes the panic handler function. By default the panic handler function
calls the public panic function exposed in the root source file, or
@@ -4639,19 +4639,19 @@ test "call foo" {
{#see_also|Root Source File#}
{#header_close#}
{#header_open|@ptrCast#}
-
@ptrCast(comptime DestType: type, value: var) -> DestType
+ @ptrCast(comptime DestType: type, value: var) DestType
Converts a pointer of one type to a pointer of another type.
{#header_close#}
{#header_open|@ptrToInt#}
- @ptrToInt(value: var) -> usize
+ @ptrToInt(value: var) usize
Converts value to a usize which is the address of the pointer. value can be one of these types:
- &T
- ?&T
+ *T
+ ?*T
fn()
?fn()
@@ -4659,7 +4659,7 @@ test "call foo" {
{#header_close#}
{#header_open|@rem#}
- @rem(numerator: T, denominator: T) -> T
+ @rem(numerator: T, denominator: T) T
Remainder division. For unsigned integers this is the same as
numerator % denominator. Caller guarantees denominator > 0.
@@ -4776,13 +4776,13 @@ pub const FloatMode = enum {
{#see_also|Compile Variables#}
{#header_close#}
{#header_open|@setGlobalSection#}
-
@setGlobalSection(global_variable_name, comptime section_name: []const u8) -> bool
+ @setGlobalSection(global_variable_name, comptime section_name: []const u8) bool
Puts the global variable in the specified section.
{#header_close#}
{#header_open|@shlExact#}
- @shlExact(value: T, shift_amt: Log2T) -> T
+ @shlExact(value: T, shift_amt: Log2T) T
Performs the left shift operation (<<). Caller guarantees
that the shift will not shift any 1 bits out.
@@ -4794,7 +4794,7 @@ pub const FloatMode = enum {
{#see_also|@shrExact|@shlWithOverflow#}
{#header_close#}
{#header_open|@shlWithOverflow#}
-
@shlWithOverflow(comptime T: type, a: T, shift_amt: Log2T, result: &T) -> bool
+ @shlWithOverflow(comptime T: type, a: T, shift_amt: Log2T, result: *T) bool
Performs result.* = a << b. If overflow or underflow occurs,
stores the overflowed bits in result and returns true.
@@ -4807,7 +4807,7 @@ pub const FloatMode = enum {
{#see_also|@shlExact|@shrExact#}
{#header_close#}
{#header_open|@shrExact#}
-
@shrExact(value: T, shift_amt: Log2T) -> T
+ @shrExact(value: T, shift_amt: Log2T) T
Performs the right shift operation (>>). Caller guarantees
that the shift will not shift any 1 bits out.
@@ -4819,7 +4819,7 @@ pub const FloatMode = enum {
{#see_also|@shlExact|@shlWithOverflow#}
{#header_close#}
{#header_open|@sizeOf#}
-
@sizeOf(comptime T: type) -> (number literal)
+ @sizeOf(comptime T: type) (number literal)
This function returns the number of bytes it takes to store T in memory.
@@ -4828,7 +4828,7 @@ pub const FloatMode = enum {
{#header_close#}
{#header_open|@sqrt#}
- @sqrt(comptime T: type, value: T) -> T
+ @sqrt(comptime T: type, value: T) T
Performs the square root of a floating point number. Uses a dedicated hardware instruction
when available. Currently only supports f32 and f64 at runtime. f128 at runtime is TODO.
@@ -4838,7 +4838,7 @@ pub const FloatMode = enum {
{#header_close#}
{#header_open|@subWithOverflow#}
- @subWithOverflow(comptime T: type, a: T, b: T, result: &T) -> bool
+ @subWithOverflow(comptime T: type, a: T, b: T, result: *T) bool
Performs result.* = a - b. If overflow or underflow occurs,
stores the overflowed bits in result and returns true.
@@ -4846,7 +4846,7 @@ pub const FloatMode = enum {
{#header_close#}
{#header_open|@truncate#}
- @truncate(comptime T: type, integer) -> T
+ @truncate(comptime T: type, integer) T
This function truncates bits from an integer type, resulting in a smaller
integer type.
@@ -4870,7 +4870,7 @@ const b: u8 = @truncate(u8, a);
{#header_close#}
{#header_open|@typeId#}
-
@typeId(comptime T: type) -> @import("builtin").TypeId
+ @typeId(comptime T: type) @import("builtin").TypeId
Returns which kind of type something is. Possible values:
@@ -4904,7 +4904,7 @@ pub const TypeId = enum {
{#code_end#}
{#header_close#}
{#header_open|@typeInfo#}
- @typeInfo(comptime T: type) -> @import("builtin").TypeInfo
+ @typeInfo(comptime T: type) @import("builtin").TypeInfo
Returns information on the type. Returns a value of the following union:
@@ -5080,14 +5080,14 @@ pub const TypeInfo = union(TypeId) {
{#code_end#}
{#header_close#}
{#header_open|@typeName#}
- @typeName(T: type) -> []u8
+ @typeName(T: type) []u8
This function returns the string representation of a type.
{#header_close#}
{#header_open|@typeOf#}
- @typeOf(expression) -> type
+ @typeOf(expression) type
This function returns a compile-time constant, which is the type of the
expression passed as an argument. The expression is evaluated.
@@ -5937,7 +5937,7 @@ pub const __zig_test_fn_slice = {}; // overwritten later
{#header_open|C String Literals#}
{#code_begin|exe#}
{#link_libc#}
-extern fn puts(&const u8) void;
+extern fn puts(*const u8) void;
pub fn main() void {
puts(c"this has a null terminator");
@@ -5996,8 +5996,8 @@ const c = @cImport({
{#code_begin|syntax#}
const base64 = @import("std").base64;
-export fn decode_base_64(dest_ptr: &u8, dest_len: usize,
- source_ptr: &const u8, source_len: usize) usize
+export fn decode_base_64(dest_ptr: *u8, dest_len: usize,
+ source_ptr: *const u8, source_len: usize) usize
{
const src = source_ptr[0..source_len];
const dest = dest_ptr[0..dest_len];
@@ -6028,7 +6028,7 @@ int main(int argc, char **argv) {
{#code_begin|syntax#}
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const obj = b.addObject("base64", "base64.zig");
const exe = b.addCExecutable("test");
diff --git a/example/cat/main.zig b/example/cat/main.zig
index de0d323bed..1b34cb22eb 100644
--- a/example/cat/main.zig
+++ b/example/cat/main.zig
@@ -41,7 +41,7 @@ fn usage(exe: []const u8) !void {
return error.Invalid;
}
-fn cat_file(stdout: &os.File, file: &os.File) !void {
+fn cat_file(stdout: *os.File, file: *os.File) !void {
var buf: [1024 * 4]u8 = undefined;
while (true) {
diff --git a/example/hello_world/hello_libc.zig b/example/hello_world/hello_libc.zig
index 1df8f04ce4..f64beda40f 100644
--- a/example/hello_world/hello_libc.zig
+++ b/example/hello_world/hello_libc.zig
@@ -7,7 +7,7 @@ const c = @cImport({
const msg = c"Hello, world!\n";
-export fn main(argc: c_int, argv: &&u8) c_int {
+export fn main(argc: c_int, argv: **u8) c_int {
if (c.printf(msg) != c_int(c.strlen(msg))) return -1;
return 0;
diff --git a/example/mix_o_files/base64.zig b/example/mix_o_files/base64.zig
index e682a97055..35b090825b 100644
--- a/example/mix_o_files/base64.zig
+++ b/example/mix_o_files/base64.zig
@@ -1,6 +1,6 @@
const base64 = @import("std").base64;
-export fn decode_base_64(dest_ptr: &u8, dest_len: usize, source_ptr: &const u8, source_len: usize) usize {
+export fn decode_base_64(dest_ptr: *u8, dest_len: usize, source_ptr: *const u8, source_len: usize) usize {
const src = source_ptr[0..source_len];
const dest = dest_ptr[0..dest_len];
const base64_decoder = base64.standard_decoder_unsafe;
diff --git a/example/mix_o_files/build.zig b/example/mix_o_files/build.zig
index e5d2e6a446..a4e7fbbf8f 100644
--- a/example/mix_o_files/build.zig
+++ b/example/mix_o_files/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const obj = b.addObject("base64", "base64.zig");
const exe = b.addCExecutable("test");
diff --git a/example/shared_library/build.zig b/example/shared_library/build.zig
index 30c714c6c6..05648cf9eb 100644
--- a/example/shared_library/build.zig
+++ b/example/shared_library/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const lib = b.addSharedLibrary("mathtest", "mathtest.zig", b.version(1, 0, 0));
const exe = b.addCExecutable("test");
diff --git a/src-self-hosted/arg.zig b/src-self-hosted/arg.zig
index fa2166e3a5..df2c04ef1f 100644
--- a/src-self-hosted/arg.zig
+++ b/src-self-hosted/arg.zig
@@ -30,7 +30,7 @@ fn argInAllowedSet(maybe_set: ?[]const []const u8, arg: []const u8) bool {
}
// Modifies the current argument index during iteration
-fn readFlagArguments(allocator: &Allocator, args: []const []const u8, required: usize, allowed_set: ?[]const []const u8, index: &usize) !FlagArg {
+fn readFlagArguments(allocator: *Allocator, args: []const []const u8, required: usize, allowed_set: ?[]const []const u8, index: *usize) !FlagArg {
switch (required) {
0 => return FlagArg{ .None = undefined }, // TODO: Required to force non-tag but value?
1 => {
@@ -79,7 +79,7 @@ pub const Args = struct {
flags: HashMapFlags,
positionals: ArrayList([]const u8),
- pub fn parse(allocator: &Allocator, comptime spec: []const Flag, args: []const []const u8) !Args {
+ pub fn parse(allocator: *Allocator, comptime spec: []const Flag, args: []const []const u8) !Args {
var parsed = Args{
.flags = HashMapFlags.init(allocator),
.positionals = ArrayList([]const u8).init(allocator),
@@ -143,18 +143,18 @@ pub const Args = struct {
return parsed;
}
- pub fn deinit(self: &Args) void {
+ pub fn deinit(self: *Args) void {
self.flags.deinit();
self.positionals.deinit();
}
// e.g. --help
- pub fn present(self: &Args, name: []const u8) bool {
+ pub fn present(self: *Args, name: []const u8) bool {
return self.flags.contains(name);
}
// e.g. --name value
- pub fn single(self: &Args, name: []const u8) ?[]const u8 {
+ pub fn single(self: *Args, name: []const u8) ?[]const u8 {
if (self.flags.get(name)) |entry| {
switch (entry.value) {
FlagArg.Single => |inner| {
@@ -168,7 +168,7 @@ pub const Args = struct {
}
// e.g. --names value1 value2 value3
- pub fn many(self: &Args, name: []const u8) ?[]const []const u8 {
+ pub fn many(self: *Args, name: []const u8) ?[]const []const u8 {
if (self.flags.get(name)) |entry| {
switch (entry.value) {
FlagArg.Many => |inner| {
diff --git a/src-self-hosted/errmsg.zig b/src-self-hosted/errmsg.zig
index 9905b8e3a6..32d2450aac 100644
--- a/src-self-hosted/errmsg.zig
+++ b/src-self-hosted/errmsg.zig
@@ -16,18 +16,18 @@ pub const Msg = struct {
text: []u8,
first_token: TokenIndex,
last_token: TokenIndex,
- tree: &ast.Tree,
+ tree: *ast.Tree,
};
/// `path` must outlive the returned Msg
/// `tree` must outlive the returned Msg
/// Caller owns returned Msg and must free with `allocator`
pub fn createFromParseError(
- allocator: &mem.Allocator,
- parse_error: &const ast.Error,
- tree: &ast.Tree,
+ allocator: *mem.Allocator,
+ parse_error: *const ast.Error,
+ tree: *ast.Tree,
path: []const u8,
-) !&Msg {
+) !*Msg {
const loc_token = parse_error.loc();
var text_buf = try std.Buffer.initSize(allocator, 0);
defer text_buf.deinit();
@@ -47,7 +47,7 @@ pub fn createFromParseError(
return msg;
}
-pub fn printToStream(stream: var, msg: &const Msg, color_on: bool) !void {
+pub fn printToStream(stream: var, msg: *const Msg, color_on: bool) !void {
const first_token = msg.tree.tokens.at(msg.first_token);
const last_token = msg.tree.tokens.at(msg.last_token);
const start_loc = msg.tree.tokenLocationPtr(0, first_token);
@@ -76,7 +76,7 @@ pub fn printToStream(stream: var, msg: &const Msg, color_on: bool) !void {
try stream.write("\n");
}
-pub fn printToFile(file: &os.File, msg: &const Msg, color: Color) !void {
+pub fn printToFile(file: *os.File, msg: *const Msg, color: Color) !void {
const color_on = switch (color) {
Color.Auto => file.isTty(),
Color.On => true,
diff --git a/src-self-hosted/introspect.zig b/src-self-hosted/introspect.zig
index adab00286b..56b56c0c78 100644
--- a/src-self-hosted/introspect.zig
+++ b/src-self-hosted/introspect.zig
@@ -7,7 +7,7 @@ const os = std.os;
const warn = std.debug.warn;
/// Caller must free result
-pub fn testZigInstallPrefix(allocator: &mem.Allocator, test_path: []const u8) ![]u8 {
+pub fn testZigInstallPrefix(allocator: *mem.Allocator, test_path: []const u8) ![]u8 {
const test_zig_dir = try os.path.join(allocator, test_path, "lib", "zig");
errdefer allocator.free(test_zig_dir);
@@ -21,7 +21,7 @@ pub fn testZigInstallPrefix(allocator: &mem.Allocator, test_path: []const u8) ![
}
/// Caller must free result
-pub fn findZigLibDir(allocator: &mem.Allocator) ![]u8 {
+pub fn findZigLibDir(allocator: *mem.Allocator) ![]u8 {
const self_exe_path = try os.selfExeDirPath(allocator);
defer allocator.free(self_exe_path);
@@ -42,7 +42,7 @@ pub fn findZigLibDir(allocator: &mem.Allocator) ![]u8 {
return error.FileNotFound;
}
-pub fn resolveZigLibDir(allocator: &mem.Allocator) ![]u8 {
+pub fn resolveZigLibDir(allocator: *mem.Allocator) ![]u8 {
return findZigLibDir(allocator) catch |err| {
warn(
\\Unable to find zig lib directory: {}.
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index c4550b5179..3334d9511b 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -2,7 +2,7 @@ const Scope = @import("scope.zig").Scope;
pub const Instruction = struct {
id: Id,
- scope: &Scope,
+ scope: *Scope,
pub const Id = enum {
Br,
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 71838503b7..80b1c3889a 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -18,8 +18,8 @@ const Target = @import("target.zig").Target;
const errmsg = @import("errmsg.zig");
var stderr_file: os.File = undefined;
-var stderr: &io.OutStream(io.FileOutStream.Error) = undefined;
-var stdout: &io.OutStream(io.FileOutStream.Error) = undefined;
+var stderr: *io.OutStream(io.FileOutStream.Error) = undefined;
+var stdout: *io.OutStream(io.FileOutStream.Error) = undefined;
const usage =
\\usage: zig [command] [options]
@@ -43,7 +43,7 @@ const usage =
const Command = struct {
name: []const u8,
- exec: fn (&Allocator, []const []const u8) error!void,
+ exec: fn (*Allocator, []const []const u8) error!void,
};
pub fn main() !void {
@@ -191,7 +191,7 @@ const missing_build_file =
\\
;
-fn cmdBuild(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdBuild(allocator: *Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_build_spec, args);
defer flags.deinit();
@@ -426,7 +426,7 @@ const args_build_generic = []Flag{
Flag.Arg1("--ver-patch"),
};
-fn buildOutputType(allocator: &Allocator, args: []const []const u8, out_type: Module.Kind) !void {
+fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Module.Kind) !void {
var flags = try Args.parse(allocator, args_build_generic, args);
defer flags.deinit();
@@ -661,19 +661,19 @@ fn buildOutputType(allocator: &Allocator, args: []const []const u8, out_type: Mo
try stderr.print("building {}: {}\n", @tagName(out_type), in_file);
}
-fn cmdBuildExe(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdBuildExe(allocator: *Allocator, args: []const []const u8) !void {
try buildOutputType(allocator, args, Module.Kind.Exe);
}
// cmd:build-lib ///////////////////////////////////////////////////////////////////////////////////
-fn cmdBuildLib(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdBuildLib(allocator: *Allocator, args: []const []const u8) !void {
try buildOutputType(allocator, args, Module.Kind.Lib);
}
// cmd:build-obj ///////////////////////////////////////////////////////////////////////////////////
-fn cmdBuildObj(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdBuildObj(allocator: *Allocator, args: []const []const u8) !void {
try buildOutputType(allocator, args, Module.Kind.Obj);
}
@@ -700,7 +700,7 @@ const args_fmt_spec = []Flag{
}),
};
-fn cmdFmt(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_fmt_spec, args);
defer flags.deinit();
@@ -768,7 +768,7 @@ fn cmdFmt(allocator: &Allocator, args: []const []const u8) !void {
// cmd:targets /////////////////////////////////////////////////////////////////////////////////////
-fn cmdTargets(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdTargets(allocator: *Allocator, args: []const []const u8) !void {
try stdout.write("Architectures:\n");
{
comptime var i: usize = 0;
@@ -810,7 +810,7 @@ fn cmdTargets(allocator: &Allocator, args: []const []const u8) !void {
// cmd:version /////////////////////////////////////////////////////////////////////////////////////
-fn cmdVersion(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdVersion(allocator: *Allocator, args: []const []const u8) !void {
try stdout.print("{}\n", std.cstr.toSliceConst(c.ZIG_VERSION_STRING));
}
@@ -827,7 +827,7 @@ const usage_test =
const args_test_spec = []Flag{Flag.Bool("--help")};
-fn cmdTest(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdTest(allocator: *Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_build_spec, args);
defer flags.deinit();
@@ -862,7 +862,7 @@ const usage_run =
const args_run_spec = []Flag{Flag.Bool("--help")};
-fn cmdRun(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdRun(allocator: *Allocator, args: []const []const u8) !void {
var compile_args = args;
var runtime_args: []const []const u8 = []const []const u8{};
@@ -912,7 +912,7 @@ const args_translate_c_spec = []Flag{
Flag.Arg1("--output"),
};
-fn cmdTranslateC(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdTranslateC(allocator: *Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_translate_c_spec, args);
defer flags.deinit();
@@ -958,7 +958,7 @@ fn cmdTranslateC(allocator: &Allocator, args: []const []const u8) !void {
// cmd:help ////////////////////////////////////////////////////////////////////////////////////////
-fn cmdHelp(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdHelp(allocator: *Allocator, args: []const []const u8) !void {
try stderr.write(usage);
}
@@ -981,7 +981,7 @@ const info_zen =
\\
;
-fn cmdZen(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdZen(allocator: *Allocator, args: []const []const u8) !void {
try stdout.write(info_zen);
}
@@ -996,7 +996,7 @@ const usage_internal =
\\
;
-fn cmdInternal(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdInternal(allocator: *Allocator, args: []const []const u8) !void {
if (args.len == 0) {
try stderr.write(usage_internal);
os.exit(1);
@@ -1018,7 +1018,7 @@ fn cmdInternal(allocator: &Allocator, args: []const []const u8) !void {
try stderr.write(usage_internal);
}
-fn cmdInternalBuildInfo(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdInternalBuildInfo(allocator: *Allocator, args: []const []const u8) !void {
try stdout.print(
\\ZIG_CMAKE_BINARY_DIR {}
\\ZIG_CXX_COMPILER {}
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index 61834eab66..a7ddf3f9e9 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -13,7 +13,7 @@ const ArrayList = std.ArrayList;
const errmsg = @import("errmsg.zig");
pub const Module = struct {
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
name: Buffer,
root_src_path: ?[]const u8,
module: llvm.ModuleRef,
@@ -53,8 +53,8 @@ pub const Module = struct {
windows_subsystem_windows: bool,
windows_subsystem_console: bool,
- link_libs_list: ArrayList(&LinkLib),
- libc_link_lib: ?&LinkLib,
+ link_libs_list: ArrayList(*LinkLib),
+ libc_link_lib: ?*LinkLib,
err_color: errmsg.Color,
@@ -106,19 +106,19 @@ pub const Module = struct {
pub const CliPkg = struct {
name: []const u8,
path: []const u8,
- children: ArrayList(&CliPkg),
- parent: ?&CliPkg,
+ children: ArrayList(*CliPkg),
+ parent: ?*CliPkg,
- pub fn init(allocator: &mem.Allocator, name: []const u8, path: []const u8, parent: ?&CliPkg) !&CliPkg {
+ pub fn init(allocator: *mem.Allocator, name: []const u8, path: []const u8, parent: ?*CliPkg) !*CliPkg {
var pkg = try allocator.create(CliPkg);
pkg.name = name;
pkg.path = path;
- pkg.children = ArrayList(&CliPkg).init(allocator);
+ pkg.children = ArrayList(*CliPkg).init(allocator);
pkg.parent = parent;
return pkg;
}
- pub fn deinit(self: &CliPkg) void {
+ pub fn deinit(self: *CliPkg) void {
for (self.children.toSliceConst()) |child| {
child.deinit();
}
@@ -126,7 +126,7 @@ pub const Module = struct {
}
};
- pub fn create(allocator: &mem.Allocator, name: []const u8, root_src_path: ?[]const u8, target: &const Target, kind: Kind, build_mode: builtin.Mode, zig_lib_dir: []const u8, cache_dir: []const u8) !&Module {
+ pub fn create(allocator: *mem.Allocator, name: []const u8, root_src_path: ?[]const u8, target: *const Target, kind: Kind, build_mode: builtin.Mode, zig_lib_dir: []const u8, cache_dir: []const u8) !*Module {
var name_buffer = try Buffer.init(allocator, name);
errdefer name_buffer.deinit();
@@ -188,7 +188,7 @@ pub const Module = struct {
.link_objects = [][]const u8{},
.windows_subsystem_windows = false,
.windows_subsystem_console = false,
- .link_libs_list = ArrayList(&LinkLib).init(allocator),
+ .link_libs_list = ArrayList(*LinkLib).init(allocator),
.libc_link_lib = null,
.err_color = errmsg.Color.Auto,
.darwin_frameworks = [][]const u8{},
@@ -200,11 +200,11 @@ pub const Module = struct {
return module_ptr;
}
- fn dump(self: &Module) void {
+ fn dump(self: *Module) void {
c.LLVMDumpModule(self.module);
}
- pub fn destroy(self: &Module) void {
+ pub fn destroy(self: *Module) void {
c.LLVMDisposeBuilder(self.builder);
c.LLVMDisposeModule(self.module);
c.LLVMContextDispose(self.context);
@@ -213,7 +213,7 @@ pub const Module = struct {
self.allocator.destroy(self);
}
- pub fn build(self: &Module) !void {
+ pub fn build(self: *Module) !void {
if (self.llvm_argv.len != 0) {
var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.allocator, [][]const []const u8{
[][]const u8{"zig (LLVM option parsing)"},
@@ -259,12 +259,12 @@ pub const Module = struct {
self.dump();
}
- pub fn link(self: &Module, out_file: ?[]const u8) !void {
+ pub fn link(self: *Module, out_file: ?[]const u8) !void {
warn("TODO link");
return error.Todo;
}
- pub fn addLinkLib(self: &Module, name: []const u8, provided_explicitly: bool) !&LinkLib {
+ pub fn addLinkLib(self: *Module, name: []const u8, provided_explicitly: bool) !*LinkLib {
const is_libc = mem.eql(u8, name, "c");
if (is_libc) {
diff --git a/src-self-hosted/scope.zig b/src-self-hosted/scope.zig
index 05e586daae..b73dcb4ed3 100644
--- a/src-self-hosted/scope.zig
+++ b/src-self-hosted/scope.zig
@@ -1,6 +1,6 @@
pub const Scope = struct {
id: Id,
- parent: &Scope,
+ parent: *Scope,
pub const Id = enum {
Decls,
diff --git a/src-self-hosted/target.zig b/src-self-hosted/target.zig
index 7983a3ddec..724d99ea23 100644
--- a/src-self-hosted/target.zig
+++ b/src-self-hosted/target.zig
@@ -11,7 +11,7 @@ pub const Target = union(enum) {
Native,
Cross: CrossTarget,
- pub fn oFileExt(self: &const Target) []const u8 {
+ pub fn oFileExt(self: *const Target) []const u8 {
const environ = switch (self.*) {
Target.Native => builtin.environ,
Target.Cross => |t| t.environ,
@@ -22,28 +22,28 @@ pub const Target = union(enum) {
};
}
- pub fn exeFileExt(self: &const Target) []const u8 {
+ pub fn exeFileExt(self: *const Target) []const u8 {
return switch (self.getOs()) {
builtin.Os.windows => ".exe",
else => "",
};
}
- pub fn getOs(self: &const Target) builtin.Os {
+ pub fn getOs(self: *const Target) builtin.Os {
return switch (self.*) {
Target.Native => builtin.os,
Target.Cross => |t| t.os,
};
}
- pub fn isDarwin(self: &const Target) bool {
+ pub fn isDarwin(self: *const Target) bool {
return switch (self.getOs()) {
builtin.Os.ios, builtin.Os.macosx => true,
else => false,
};
}
- pub fn isWindows(self: &const Target) bool {
+ pub fn isWindows(self: *const Target) bool {
return switch (self.getOs()) {
builtin.Os.windows => true,
else => false,
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 9c156fb58b..b9199c2757 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -374,7 +374,7 @@ enum NodeType {
NodeTypeCharLiteral,
NodeTypeSymbol,
NodeTypePrefixOpExpr,
- NodeTypeAddrOfExpr,
+ NodeTypePointerType,
NodeTypeFnCallExpr,
NodeTypeArrayAccessExpr,
NodeTypeSliceExpr,
@@ -616,6 +616,7 @@ enum PrefixOp {
PrefixOpNegationWrap,
PrefixOpMaybe,
PrefixOpUnwrapMaybe,
+ PrefixOpAddrOf,
};
struct AstNodePrefixOpExpr {
@@ -623,7 +624,7 @@ struct AstNodePrefixOpExpr {
AstNode *primary_expr;
};
-struct AstNodeAddrOfExpr {
+struct AstNodePointerType {
AstNode *align_expr;
BigInt *bit_offset_start;
BigInt *bit_offset_end;
@@ -899,7 +900,7 @@ struct AstNode {
AstNodeBinOpExpr bin_op_expr;
AstNodeCatchExpr unwrap_err_expr;
AstNodePrefixOpExpr prefix_op_expr;
- AstNodeAddrOfExpr addr_of_expr;
+ AstNodePointerType pointer_type;
AstNodeFnCallExpr fn_call_expr;
AstNodeArrayAccessExpr array_access_expr;
AstNodeSliceExpr slice_expr;
@@ -2053,7 +2054,7 @@ enum IrInstructionId {
IrInstructionIdTypeInfo,
IrInstructionIdTypeId,
IrInstructionIdSetEvalBranchQuota,
- IrInstructionIdPtrTypeOf,
+ IrInstructionIdPtrType,
IrInstructionIdAlignCast,
IrInstructionIdOpaqueType,
IrInstructionIdSetAlignStack,
@@ -2412,6 +2413,17 @@ struct IrInstructionArrayType {
IrInstruction *child_type;
};
+struct IrInstructionPtrType {
+ IrInstruction base;
+
+ IrInstruction *align_value;
+ IrInstruction *child_type;
+ uint32_t bit_offset_start;
+ uint32_t bit_offset_end;
+ bool is_const;
+ bool is_volatile;
+};
+
struct IrInstructionPromiseType {
IrInstruction base;
@@ -2891,17 +2903,6 @@ struct IrInstructionSetEvalBranchQuota {
IrInstruction *new_quota;
};
-struct IrInstructionPtrTypeOf {
- IrInstruction base;
-
- IrInstruction *align_value;
- IrInstruction *child_type;
- uint32_t bit_offset_start;
- uint32_t bit_offset_end;
- bool is_const;
- bool is_volatile;
-};
-
struct IrInstructionAlignCast {
IrInstruction base;
diff --git a/src/analyze.cpp b/src/analyze.cpp
index b00e18a9a1..a5011035c5 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -418,12 +418,12 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type
const char *volatile_str = is_volatile ? "volatile " : "";
buf_resize(&entry->name, 0);
if (unaligned_bit_count == 0 && byte_alignment == abi_alignment) {
- buf_appendf(&entry->name, "&%s%s%s", const_str, volatile_str, buf_ptr(&child_type->name));
+ buf_appendf(&entry->name, "*%s%s%s", const_str, volatile_str, buf_ptr(&child_type->name));
} else if (unaligned_bit_count == 0) {
- buf_appendf(&entry->name, "&align(%" PRIu32 ") %s%s%s", byte_alignment,
+ buf_appendf(&entry->name, "*align(%" PRIu32 ") %s%s%s", byte_alignment,
const_str, volatile_str, buf_ptr(&child_type->name));
} else {
- buf_appendf(&entry->name, "&align(%" PRIu32 ":%" PRIu32 ":%" PRIu32 ") %s%s%s", byte_alignment,
+ buf_appendf(&entry->name, "*align(%" PRIu32 ":%" PRIu32 ":%" PRIu32 ") %s%s%s", byte_alignment,
bit_offset, bit_offset + unaligned_bit_count, const_str, volatile_str, buf_ptr(&child_type->name));
}
@@ -3270,7 +3270,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
case NodeTypeThisLiteral:
case NodeTypeSymbol:
case NodeTypePrefixOpExpr:
- case NodeTypeAddrOfExpr:
+ case NodeTypePointerType:
case NodeTypeIfBoolExpr:
case NodeTypeWhileExpr:
case NodeTypeForExpr:
diff --git a/src/ast_render.cpp b/src/ast_render.cpp
index 5a1e81b36d..f356f406b0 100644
--- a/src/ast_render.cpp
+++ b/src/ast_render.cpp
@@ -68,6 +68,7 @@ static const char *prefix_op_str(PrefixOp prefix_op) {
case PrefixOpBinNot: return "~";
case PrefixOpMaybe: return "?";
case PrefixOpUnwrapMaybe: return "??";
+ case PrefixOpAddrOf: return "&";
}
zig_unreachable();
}
@@ -185,8 +186,6 @@ static const char *node_type_str(NodeType node_type) {
return "Symbol";
case NodeTypePrefixOpExpr:
return "PrefixOpExpr";
- case NodeTypeAddrOfExpr:
- return "AddrOfExpr";
case NodeTypeUse:
return "Use";
case NodeTypeBoolLiteral:
@@ -251,6 +250,8 @@ static const char *node_type_str(NodeType node_type) {
return "Suspend";
case NodeTypePromiseType:
return "PromiseType";
+ case NodeTypePointerType:
+ return "PointerType";
}
zig_unreachable();
}
@@ -616,41 +617,41 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, "%s", prefix_op_str(op));
AstNode *child_node = node->data.prefix_op_expr.primary_expr;
- bool new_grouped = child_node->type == NodeTypePrefixOpExpr || child_node->type == NodeTypeAddrOfExpr;
+ bool new_grouped = child_node->type == NodeTypePrefixOpExpr || child_node->type == NodeTypePointerType;
render_node_extra(ar, child_node, new_grouped);
if (!grouped) fprintf(ar->f, ")");
break;
}
- case NodeTypeAddrOfExpr:
+ case NodeTypePointerType:
{
if (!grouped) fprintf(ar->f, "(");
- fprintf(ar->f, "&");
- if (node->data.addr_of_expr.align_expr != nullptr) {
+ fprintf(ar->f, "*");
+ if (node->data.pointer_type.align_expr != nullptr) {
fprintf(ar->f, "align(");
- render_node_grouped(ar, node->data.addr_of_expr.align_expr);
- if (node->data.addr_of_expr.bit_offset_start != nullptr) {
- assert(node->data.addr_of_expr.bit_offset_end != nullptr);
+ render_node_grouped(ar, node->data.pointer_type.align_expr);
+ if (node->data.pointer_type.bit_offset_start != nullptr) {
+ assert(node->data.pointer_type.bit_offset_end != nullptr);
Buf offset_start_buf = BUF_INIT;
buf_resize(&offset_start_buf, 0);
- bigint_append_buf(&offset_start_buf, node->data.addr_of_expr.bit_offset_start, 10);
+ bigint_append_buf(&offset_start_buf, node->data.pointer_type.bit_offset_start, 10);
Buf offset_end_buf = BUF_INIT;
buf_resize(&offset_end_buf, 0);
- bigint_append_buf(&offset_end_buf, node->data.addr_of_expr.bit_offset_end, 10);
+ bigint_append_buf(&offset_end_buf, node->data.pointer_type.bit_offset_end, 10);
fprintf(ar->f, ":%s:%s ", buf_ptr(&offset_start_buf), buf_ptr(&offset_end_buf));
}
fprintf(ar->f, ") ");
}
- if (node->data.addr_of_expr.is_const) {
+ if (node->data.pointer_type.is_const) {
fprintf(ar->f, "const ");
}
- if (node->data.addr_of_expr.is_volatile) {
+ if (node->data.pointer_type.is_volatile) {
fprintf(ar->f, "volatile ");
}
- render_node_ungrouped(ar, node->data.addr_of_expr.op_expr);
+ render_node_ungrouped(ar, node->data.pointer_type.op_expr);
if (!grouped) fprintf(ar->f, ")");
break;
}
@@ -669,7 +670,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, " ");
}
AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr;
- bool grouped = (fn_ref_node->type != NodeTypePrefixOpExpr && fn_ref_node->type != NodeTypeAddrOfExpr);
+ bool grouped = (fn_ref_node->type != NodeTypePrefixOpExpr && fn_ref_node->type != NodeTypePointerType);
render_node_extra(ar, fn_ref_node, grouped);
fprintf(ar->f, "(");
for (size_t i = 0; i < node->data.fn_call_expr.params.length; i += 1) {
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 69542b3e67..d07d427729 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -4600,7 +4600,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdTypeInfo:
case IrInstructionIdTypeId:
case IrInstructionIdSetEvalBranchQuota:
- case IrInstructionIdPtrTypeOf:
+ case IrInstructionIdPtrType:
case IrInstructionIdOpaqueType:
case IrInstructionIdSetAlignStack:
case IrInstructionIdArgType:
diff --git a/src/ir.cpp b/src/ir.cpp
index 6e944a8976..b1fac9f485 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -41,10 +41,6 @@ struct IrAnalyze {
static const LVal LVAL_NONE = { false, false, false };
static const LVal LVAL_PTR = { true, false, false };
-static LVal make_lval_addr(bool is_const, bool is_volatile) {
- return { true, is_const, is_volatile };
-}
-
enum ConstCastResultId {
ConstCastResultIdOk,
ConstCastResultIdErrSet,
@@ -629,8 +625,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionSetEvalBranchQuo
return IrInstructionIdSetEvalBranchQuota;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrTypeOf *) {
- return IrInstructionIdPtrTypeOf;
+static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrType *) {
+ return IrInstructionIdPtrType;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionAlignCast *) {
@@ -1196,11 +1192,11 @@ static IrInstruction *ir_build_br_from(IrBuilder *irb, IrInstruction *old_instru
return new_instruction;
}
-static IrInstruction *ir_build_ptr_type_of(IrBuilder *irb, Scope *scope, AstNode *source_node,
+static IrInstruction *ir_build_ptr_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *child_type, bool is_const, bool is_volatile, IrInstruction *align_value,
uint32_t bit_offset_start, uint32_t bit_offset_end)
{
- IrInstructionPtrTypeOf *ptr_type_of_instruction = ir_build_instruction(irb, scope, source_node);
+ IrInstructionPtrType *ptr_type_of_instruction = ir_build_instruction(irb, scope, source_node);
ptr_type_of_instruction->align_value = align_value;
ptr_type_of_instruction->child_type = child_type;
ptr_type_of_instruction->is_const = is_const;
@@ -4609,14 +4605,8 @@ static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode
}
static IrInstruction *ir_gen_prefix_op_id_lval(IrBuilder *irb, Scope *scope, AstNode *node, IrUnOp op_id, LVal lval) {
- AstNode *expr_node;
- if (node->type == NodeTypePrefixOpExpr) {
- expr_node = node->data.prefix_op_expr.primary_expr;
- } else if (node->type == NodeTypePtrDeref) {
- expr_node = node->data.ptr_deref_expr.target;
- } else {
- zig_unreachable();
- }
+ assert(node->type == NodeTypePrefixOpExpr);
+ AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval);
if (value == irb->codegen->invalid_instruction)
@@ -4640,16 +4630,12 @@ static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *
return ir_build_ref(irb, scope, value->source_node, value, lval.is_const, lval.is_volatile);
}
-static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *node) {
- assert(node->type == NodeTypeAddrOfExpr);
- bool is_const = node->data.addr_of_expr.is_const;
- bool is_volatile = node->data.addr_of_expr.is_volatile;
- AstNode *expr_node = node->data.addr_of_expr.op_expr;
- AstNode *align_expr = node->data.addr_of_expr.align_expr;
-
- if (align_expr == nullptr && !is_const && !is_volatile) {
- return ir_gen_node_extra(irb, expr_node, scope, make_lval_addr(is_const, is_volatile));
- }
+static IrInstruction *ir_gen_pointer_type(IrBuilder *irb, Scope *scope, AstNode *node) {
+ assert(node->type == NodeTypePointerType);
+ bool is_const = node->data.pointer_type.is_const;
+ bool is_volatile = node->data.pointer_type.is_volatile;
+ AstNode *expr_node = node->data.pointer_type.op_expr;
+ AstNode *align_expr = node->data.pointer_type.align_expr;
IrInstruction *align_value;
if (align_expr != nullptr) {
@@ -4665,27 +4651,27 @@ static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *n
return child_type;
uint32_t bit_offset_start = 0;
- if (node->data.addr_of_expr.bit_offset_start != nullptr) {
- if (!bigint_fits_in_bits(node->data.addr_of_expr.bit_offset_start, 32, false)) {
+ if (node->data.pointer_type.bit_offset_start != nullptr) {
+ if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_start, 32, false)) {
Buf *val_buf = buf_alloc();
- bigint_append_buf(val_buf, node->data.addr_of_expr.bit_offset_start, 10);
+ bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_start, 10);
exec_add_error_node(irb->codegen, irb->exec, node,
buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf)));
return irb->codegen->invalid_instruction;
}
- bit_offset_start = bigint_as_unsigned(node->data.addr_of_expr.bit_offset_start);
+ bit_offset_start = bigint_as_unsigned(node->data.pointer_type.bit_offset_start);
}
uint32_t bit_offset_end = 0;
- if (node->data.addr_of_expr.bit_offset_end != nullptr) {
- if (!bigint_fits_in_bits(node->data.addr_of_expr.bit_offset_end, 32, false)) {
+ if (node->data.pointer_type.bit_offset_end != nullptr) {
+ if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_end, 32, false)) {
Buf *val_buf = buf_alloc();
- bigint_append_buf(val_buf, node->data.addr_of_expr.bit_offset_end, 10);
+ bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_end, 10);
exec_add_error_node(irb->codegen, irb->exec, node,
buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf)));
return irb->codegen->invalid_instruction;
}
- bit_offset_end = bigint_as_unsigned(node->data.addr_of_expr.bit_offset_end);
+ bit_offset_end = bigint_as_unsigned(node->data.pointer_type.bit_offset_end);
}
if ((bit_offset_start != 0 || bit_offset_end != 0) && bit_offset_start >= bit_offset_end) {
@@ -4694,7 +4680,7 @@ static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *n
return irb->codegen->invalid_instruction;
}
- return ir_build_ptr_type_of(irb, scope, node, child_type, is_const, is_volatile,
+ return ir_build_ptr_type(irb, scope, node, child_type, is_const, is_volatile,
align_value, bit_offset_start, bit_offset_end);
}
@@ -4761,6 +4747,10 @@ static IrInstruction *ir_gen_prefix_op_expr(IrBuilder *irb, Scope *scope, AstNod
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpMaybe), lval);
case PrefixOpUnwrapMaybe:
return ir_gen_maybe_assert_ok(irb, scope, node, lval);
+ case PrefixOpAddrOf: {
+ AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
+ return ir_lval_wrap(irb, scope, ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR), lval);
+ }
}
zig_unreachable();
}
@@ -6568,8 +6558,6 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_lval_wrap(irb, scope, ir_gen_if_bool_expr(irb, scope, node), lval);
case NodeTypePrefixOpExpr:
return ir_gen_prefix_op_expr(irb, scope, node, lval);
- case NodeTypeAddrOfExpr:
- return ir_lval_wrap(irb, scope, ir_gen_address_of(irb, scope, node), lval);
case NodeTypeContainerInitExpr:
return ir_lval_wrap(irb, scope, ir_gen_container_init_expr(irb, scope, node), lval);
case NodeTypeVariableDeclaration:
@@ -6592,14 +6580,23 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_build_load_ptr(irb, scope, node, ptr_instruction);
}
- case NodeTypePtrDeref:
- return ir_gen_prefix_op_id_lval(irb, scope, node, IrUnOpDereference, lval);
+ case NodeTypePtrDeref: {
+ assert(node->type == NodeTypePtrDeref);
+ AstNode *expr_node = node->data.ptr_deref_expr.target;
+ IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval);
+ if (value == irb->codegen->invalid_instruction)
+ return value;
+
+ return ir_build_un_op(irb, scope, node, IrUnOpDereference, value);
+ }
case NodeTypeThisLiteral:
return ir_lval_wrap(irb, scope, ir_gen_this_literal(irb, scope, node), lval);
case NodeTypeBoolLiteral:
return ir_lval_wrap(irb, scope, ir_gen_bool_literal(irb, scope, node), lval);
case NodeTypeArrayType:
return ir_lval_wrap(irb, scope, ir_gen_array_type(irb, scope, node), lval);
+ case NodeTypePointerType:
+ return ir_lval_wrap(irb, scope, ir_gen_pointer_type(irb, scope, node), lval);
case NodeTypePromiseType:
return ir_lval_wrap(irb, scope, ir_gen_promise_type(irb, scope, node), lval);
case NodeTypeStringLiteral:
@@ -8961,6 +8958,7 @@ static IrInstruction *ir_get_const_ptr(IrAnalyze *ira, IrInstruction *instructio
ConstExprValue *pointee, TypeTableEntry *pointee_type,
ConstPtrMut ptr_mut, bool ptr_is_const, bool ptr_is_volatile, uint32_t ptr_align)
{
+ // TODO remove this special case for types
if (pointee_type->id == TypeTableEntryIdMetaType) {
TypeTableEntry *type_entry = pointee->data.x_type;
if (type_entry->id == TypeTableEntryIdUnreachable) {
@@ -18778,11 +18776,16 @@ static TypeTableEntry *ir_analyze_instruction_ptr_to_int(IrAnalyze *ira, IrInstr
return usize;
}
-static TypeTableEntry *ir_analyze_instruction_ptr_type_of(IrAnalyze *ira, IrInstructionPtrTypeOf *instruction) {
+static TypeTableEntry *ir_analyze_instruction_ptr_type(IrAnalyze *ira, IrInstructionPtrType *instruction) {
TypeTableEntry *child_type = ir_resolve_type(ira, instruction->child_type->other);
if (type_is_invalid(child_type))
return ira->codegen->builtin_types.entry_invalid;
+ if (child_type->id == TypeTableEntryIdUnreachable) {
+ ir_add_error(ira, &instruction->base, buf_sprintf("pointer to noreturn not allowed"));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
uint32_t align_bytes;
if (instruction->align_value != nullptr) {
if (!ir_resolve_align(ira, instruction->align_value->other, &align_bytes))
@@ -19606,8 +19609,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_type_id(ira, (IrInstructionTypeId *)instruction);
case IrInstructionIdSetEvalBranchQuota:
return ir_analyze_instruction_set_eval_branch_quota(ira, (IrInstructionSetEvalBranchQuota *)instruction);
- case IrInstructionIdPtrTypeOf:
- return ir_analyze_instruction_ptr_type_of(ira, (IrInstructionPtrTypeOf *)instruction);
+ case IrInstructionIdPtrType:
+ return ir_analyze_instruction_ptr_type(ira, (IrInstructionPtrType *)instruction);
case IrInstructionIdAlignCast:
return ir_analyze_instruction_align_cast(ira, (IrInstructionAlignCast *)instruction);
case IrInstructionIdOpaqueType:
@@ -19783,7 +19786,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdCheckStatementIsVoid:
case IrInstructionIdPanic:
case IrInstructionIdSetEvalBranchQuota:
- case IrInstructionIdPtrTypeOf:
+ case IrInstructionIdPtrType:
case IrInstructionIdSetAlignStack:
case IrInstructionIdExport:
case IrInstructionIdCancel:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 9678120f1d..3c177a8bbf 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -921,7 +921,7 @@ static void ir_print_can_implicit_cast(IrPrint *irp, IrInstructionCanImplicitCas
fprintf(irp->f, ")");
}
-static void ir_print_ptr_type_of(IrPrint *irp, IrInstructionPtrTypeOf *instruction) {
+static void ir_print_ptr_type(IrPrint *irp, IrInstructionPtrType *instruction) {
fprintf(irp->f, "&");
if (instruction->align_value != nullptr) {
fprintf(irp->f, "align(");
@@ -1527,8 +1527,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdCanImplicitCast:
ir_print_can_implicit_cast(irp, (IrInstructionCanImplicitCast *)instruction);
break;
- case IrInstructionIdPtrTypeOf:
- ir_print_ptr_type_of(irp, (IrInstructionPtrTypeOf *)instruction);
+ case IrInstructionIdPtrType:
+ ir_print_ptr_type(irp, (IrInstructionPtrType *)instruction);
break;
case IrInstructionIdDeclRef:
ir_print_decl_ref(irp, (IrInstructionDeclRef *)instruction);
diff --git a/src/parser.cpp b/src/parser.cpp
index 4763d3b987..ef390a3a2e 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -1167,20 +1167,19 @@ static PrefixOp tok_to_prefix_op(Token *token) {
case TokenIdTilde: return PrefixOpBinNot;
case TokenIdMaybe: return PrefixOpMaybe;
case TokenIdDoubleQuestion: return PrefixOpUnwrapMaybe;
+ case TokenIdAmpersand: return PrefixOpAddrOf;
default: return PrefixOpInvalid;
}
}
-static AstNode *ast_parse_addr_of(ParseContext *pc, size_t *token_index) {
- Token *ampersand_tok = ast_eat_token(pc, token_index, TokenIdAmpersand);
-
- AstNode *node = ast_create_node(pc, NodeTypeAddrOfExpr, ampersand_tok);
+static AstNode *ast_parse_pointer_type(ParseContext *pc, size_t *token_index, Token *star_tok) {
+ AstNode *node = ast_create_node(pc, NodeTypePointerType, star_tok);
Token *token = &pc->tokens->at(*token_index);
if (token->id == TokenIdKeywordAlign) {
*token_index += 1;
ast_eat_token(pc, token_index, TokenIdLParen);
- node->data.addr_of_expr.align_expr = ast_parse_expression(pc, token_index, true);
+ node->data.pointer_type.align_expr = ast_parse_expression(pc, token_index, true);
token = &pc->tokens->at(*token_index);
if (token->id == TokenIdColon) {
@@ -1189,24 +1188,24 @@ static AstNode *ast_parse_addr_of(ParseContext *pc, size_t *token_index) {
ast_eat_token(pc, token_index, TokenIdColon);
Token *bit_offset_end_tok = ast_eat_token(pc, token_index, TokenIdIntLiteral);
- node->data.addr_of_expr.bit_offset_start = token_bigint(bit_offset_start_tok);
- node->data.addr_of_expr.bit_offset_end = token_bigint(bit_offset_end_tok);
+ node->data.pointer_type.bit_offset_start = token_bigint(bit_offset_start_tok);
+ node->data.pointer_type.bit_offset_end = token_bigint(bit_offset_end_tok);
}
ast_eat_token(pc, token_index, TokenIdRParen);
token = &pc->tokens->at(*token_index);
}
if (token->id == TokenIdKeywordConst) {
*token_index += 1;
- node->data.addr_of_expr.is_const = true;
+ node->data.pointer_type.is_const = true;
token = &pc->tokens->at(*token_index);
}
if (token->id == TokenIdKeywordVolatile) {
*token_index += 1;
- node->data.addr_of_expr.is_volatile = true;
+ node->data.pointer_type.is_volatile = true;
}
- node->data.addr_of_expr.op_expr = ast_parse_prefix_op_expr(pc, token_index, true);
+ node->data.pointer_type.op_expr = ast_parse_prefix_op_expr(pc, token_index, true);
return node;
}
@@ -1216,8 +1215,17 @@ PrefixOp = "!" | "-" | "~" | ("*" option("align" "(" Expression option(":" Integ
*/
static AstNode *ast_parse_prefix_op_expr(ParseContext *pc, size_t *token_index, bool mandatory) {
Token *token = &pc->tokens->at(*token_index);
- if (token->id == TokenIdAmpersand) {
- return ast_parse_addr_of(pc, token_index);
+ if (token->id == TokenIdStar) {
+ *token_index += 1;
+ return ast_parse_pointer_type(pc, token_index, token);
+ }
+ if (token->id == TokenIdStarStar) {
+ *token_index += 1;
+ AstNode *child_node = ast_parse_pointer_type(pc, token_index, token);
+ child_node->column += 1;
+ AstNode *parent_node = ast_create_node(pc, NodeTypePointerType, token);
+ parent_node->data.pointer_type.op_expr = child_node;
+ return parent_node;
}
if (token->id == TokenIdKeywordTry) {
return ast_parse_try_expr(pc, token_index);
@@ -1234,13 +1242,12 @@ static AstNode *ast_parse_prefix_op_expr(ParseContext *pc, size_t *token_index,
AstNode *node = ast_create_node(pc, NodeTypePrefixOpExpr, token);
- AstNode *parent_node = node;
AstNode *prefix_op_expr = ast_parse_error_set_expr(pc, token_index, true);
node->data.prefix_op_expr.primary_expr = prefix_op_expr;
node->data.prefix_op_expr.prefix_op = prefix_op;
- return parent_node;
+ return node;
}
@@ -3121,9 +3128,9 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeErrorType:
// none
break;
- case NodeTypeAddrOfExpr:
- visit_field(&node->data.addr_of_expr.align_expr, visit, context);
- visit_field(&node->data.addr_of_expr.op_expr, visit, context);
+ case NodeTypePointerType:
+ visit_field(&node->data.pointer_type.align_expr, visit, context);
+ visit_field(&node->data.pointer_type.op_expr, visit, context);
break;
case NodeTypeErrorSetDecl:
visit_node_list(&node->data.err_set_decl.decls, visit, context);
diff --git a/src/translate_c.cpp b/src/translate_c.cpp
index 50ff073008..db541d34f3 100644
--- a/src/translate_c.cpp
+++ b/src/translate_c.cpp
@@ -276,11 +276,18 @@ static AstNode *maybe_suppress_result(Context *c, ResultUsed result_used, AstNod
node);
}
-static AstNode *trans_create_node_addr_of(Context *c, bool is_const, bool is_volatile, AstNode *child_node) {
- AstNode *node = trans_create_node(c, NodeTypeAddrOfExpr);
- node->data.addr_of_expr.is_const = is_const;
- node->data.addr_of_expr.is_volatile = is_volatile;
- node->data.addr_of_expr.op_expr = child_node;
+static AstNode *trans_create_node_ptr_type(Context *c, bool is_const, bool is_volatile, AstNode *child_node) {
+ AstNode *node = trans_create_node(c, NodeTypePointerType);
+ node->data.pointer_type.is_const = is_const;
+ node->data.pointer_type.is_volatile = is_volatile;
+ node->data.pointer_type.op_expr = child_node;
+ return node;
+}
+
+static AstNode *trans_create_node_addr_of(Context *c, AstNode *child_node) {
+ AstNode *node = trans_create_node(c, NodeTypePrefixOpExpr);
+ node->data.prefix_op_expr.prefix_op = PrefixOpAddrOf;
+ node->data.prefix_op_expr.primary_expr = child_node;
return node;
}
@@ -848,7 +855,7 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou
return trans_create_node_prefix_op(c, PrefixOpMaybe, child_node);
}
- AstNode *pointer_node = trans_create_node_addr_of(c, child_qt.isConstQualified(),
+ AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(),
child_qt.isVolatileQualified(), child_node);
return trans_create_node_prefix_op(c, PrefixOpMaybe, pointer_node);
}
@@ -1033,7 +1040,7 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou
emit_warning(c, source_loc, "unresolved array element type");
return nullptr;
}
- AstNode *pointer_node = trans_create_node_addr_of(c, child_qt.isConstQualified(),
+ AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(),
child_qt.isVolatileQualified(), child_type_node);
return pointer_node;
}
@@ -1402,7 +1409,7 @@ static AstNode *trans_create_compound_assign_shift(Context *c, ResultUsed result
// const _ref = &lhs;
AstNode *lhs = trans_expr(c, ResultUsedYes, &child_scope->base, stmt->getLHS(), TransLValue);
if (lhs == nullptr) return nullptr;
- AstNode *addr_of_lhs = trans_create_node_addr_of(c, false, false, lhs);
+ AstNode *addr_of_lhs = trans_create_node_addr_of(c, lhs);
// TODO: avoid name collisions with generated variable names
Buf* tmp_var_name = buf_create_from_str("_ref");
AstNode *tmp_var_decl = trans_create_node_var_decl_local(c, true, tmp_var_name, nullptr, addr_of_lhs);
@@ -1476,7 +1483,7 @@ static AstNode *trans_create_compound_assign(Context *c, ResultUsed result_used,
// const _ref = &lhs;
AstNode *lhs = trans_expr(c, ResultUsedYes, &child_scope->base, stmt->getLHS(), TransLValue);
if (lhs == nullptr) return nullptr;
- AstNode *addr_of_lhs = trans_create_node_addr_of(c, false, false, lhs);
+ AstNode *addr_of_lhs = trans_create_node_addr_of(c, lhs);
// TODO: avoid name collisions with generated variable names
Buf* tmp_var_name = buf_create_from_str("_ref");
AstNode *tmp_var_decl = trans_create_node_var_decl_local(c, true, tmp_var_name, nullptr, addr_of_lhs);
@@ -1813,7 +1820,7 @@ static AstNode *trans_create_post_crement(Context *c, ResultUsed result_used, Tr
// const _ref = &expr;
AstNode *expr = trans_expr(c, ResultUsedYes, &child_scope->base, op_expr, TransLValue);
if (expr == nullptr) return nullptr;
- AstNode *addr_of_expr = trans_create_node_addr_of(c, false, false, expr);
+ AstNode *addr_of_expr = trans_create_node_addr_of(c, expr);
// TODO: avoid name collisions with generated variable names
Buf* ref_var_name = buf_create_from_str("_ref");
AstNode *ref_var_decl = trans_create_node_var_decl_local(c, true, ref_var_name, nullptr, addr_of_expr);
@@ -1868,7 +1875,7 @@ static AstNode *trans_create_pre_crement(Context *c, ResultUsed result_used, Tra
// const _ref = &expr;
AstNode *expr = trans_expr(c, ResultUsedYes, &child_scope->base, op_expr, TransLValue);
if (expr == nullptr) return nullptr;
- AstNode *addr_of_expr = trans_create_node_addr_of(c, false, false, expr);
+ AstNode *addr_of_expr = trans_create_node_addr_of(c, expr);
// TODO: avoid name collisions with generated variable names
Buf* ref_var_name = buf_create_from_str("_ref");
AstNode *ref_var_decl = trans_create_node_var_decl_local(c, true, ref_var_name, nullptr, addr_of_expr);
@@ -1917,7 +1924,7 @@ static AstNode *trans_unary_operator(Context *c, ResultUsed result_used, TransSc
AstNode *value_node = trans_expr(c, result_used, scope, stmt->getSubExpr(), TransLValue);
if (value_node == nullptr)
return value_node;
- return trans_create_node_addr_of(c, false, false, value_node);
+ return trans_create_node_addr_of(c, value_node);
}
case UO_Deref:
{
@@ -4441,7 +4448,7 @@ static AstNode *parse_ctok_suffix_op_expr(Context *c, CTokenize *ctok, size_t *t
} else if (first_tok->id == CTokIdAsterisk) {
*tok_i += 1;
- node = trans_create_node_addr_of(c, false, false, node);
+ node = trans_create_node_ptr_type(c, false, false, node);
} else {
return node;
}
diff --git a/std/array_list.zig b/std/array_list.zig
index b315194c33..07a1db6451 100644
--- a/std/array_list.zig
+++ b/std/array_list.zig
@@ -17,10 +17,10 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
/// you uninitialized memory.
items: []align(A) T,
len: usize,
- allocator: &Allocator,
+ allocator: *Allocator,
/// Deinitialize with `deinit` or use `toOwnedSlice`.
- pub fn init(allocator: &Allocator) Self {
+ pub fn init(allocator: *Allocator) Self {
return Self{
.items = []align(A) T{},
.len = 0,
@@ -28,30 +28,30 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
};
}
- pub fn deinit(l: &const Self) void {
+ pub fn deinit(l: *const Self) void {
l.allocator.free(l.items);
}
- pub fn toSlice(l: &const Self) []align(A) T {
+ pub fn toSlice(l: *const Self) []align(A) T {
return l.items[0..l.len];
}
- pub fn toSliceConst(l: &const Self) []align(A) const T {
+ pub fn toSliceConst(l: *const Self) []align(A) const T {
return l.items[0..l.len];
}
- pub fn at(l: &const Self, n: usize) T {
+ pub fn at(l: *const Self, n: usize) T {
return l.toSliceConst()[n];
}
- pub fn count(self: &const Self) usize {
+ pub fn count(self: *const Self) usize {
return self.len;
}
/// ArrayList takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
- pub fn fromOwnedSlice(allocator: &Allocator, slice: []align(A) T) Self {
+ pub fn fromOwnedSlice(allocator: *Allocator, slice: []align(A) T) Self {
return Self{
.items = slice,
.len = slice.len,
@@ -60,14 +60,14 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
}
/// The caller owns the returned memory. ArrayList becomes empty.
- pub fn toOwnedSlice(self: &Self) []align(A) T {
+ pub fn toOwnedSlice(self: *Self) []align(A) T {
const allocator = self.allocator;
const result = allocator.alignedShrink(T, A, self.items, self.len);
self.* = init(allocator);
return result;
}
- pub fn insert(l: &Self, n: usize, item: &const T) !void {
+ pub fn insert(l: *Self, n: usize, item: *const T) !void {
try l.ensureCapacity(l.len + 1);
l.len += 1;
@@ -75,7 +75,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
l.items[n] = item.*;
}
- pub fn insertSlice(l: &Self, n: usize, items: []align(A) const T) !void {
+ pub fn insertSlice(l: *Self, n: usize, items: []align(A) const T) !void {
try l.ensureCapacity(l.len + items.len);
l.len += items.len;
@@ -83,28 +83,28 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
mem.copy(T, l.items[n .. n + items.len], items);
}
- pub fn append(l: &Self, item: &const T) !void {
+ pub fn append(l: *Self, item: *const T) !void {
const new_item_ptr = try l.addOne();
new_item_ptr.* = item.*;
}
- pub fn appendSlice(l: &Self, items: []align(A) const T) !void {
+ pub fn appendSlice(l: *Self, items: []align(A) const T) !void {
try l.ensureCapacity(l.len + items.len);
mem.copy(T, l.items[l.len..], items);
l.len += items.len;
}
- pub fn resize(l: &Self, new_len: usize) !void {
+ pub fn resize(l: *Self, new_len: usize) !void {
try l.ensureCapacity(new_len);
l.len = new_len;
}
- pub fn shrink(l: &Self, new_len: usize) void {
+ pub fn shrink(l: *Self, new_len: usize) void {
assert(new_len <= l.len);
l.len = new_len;
}
- pub fn ensureCapacity(l: &Self, new_capacity: usize) !void {
+ pub fn ensureCapacity(l: *Self, new_capacity: usize) !void {
var better_capacity = l.items.len;
if (better_capacity >= new_capacity) return;
while (true) {
@@ -114,7 +114,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
l.items = try l.allocator.alignedRealloc(T, A, l.items, better_capacity);
}
- pub fn addOne(l: &Self) !&T {
+ pub fn addOne(l: *Self) !*T {
const new_length = l.len + 1;
try l.ensureCapacity(new_length);
const result = &l.items[l.len];
@@ -122,34 +122,34 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
return result;
}
- pub fn pop(self: &Self) T {
+ pub fn pop(self: *Self) T {
self.len -= 1;
return self.items[self.len];
}
- pub fn popOrNull(self: &Self) ?T {
+ pub fn popOrNull(self: *Self) ?T {
if (self.len == 0) return null;
return self.pop();
}
pub const Iterator = struct {
- list: &const Self,
+ list: *const Self,
// how many items have we returned
count: usize,
- pub fn next(it: &Iterator) ?T {
+ pub fn next(it: *Iterator) ?T {
if (it.count >= it.list.len) return null;
const val = it.list.at(it.count);
it.count += 1;
return val;
}
- pub fn reset(it: &Iterator) void {
+ pub fn reset(it: *Iterator) void {
it.count = 0;
}
};
- pub fn iterator(self: &const Self) Iterator {
+ pub fn iterator(self: *const Self) Iterator {
return Iterator{
.list = self,
.count = 0,
diff --git a/std/atomic/queue.zig b/std/atomic/queue.zig
index 35180da8d1..142c958173 100644
--- a/std/atomic/queue.zig
+++ b/std/atomic/queue.zig
@@ -5,36 +5,36 @@ const AtomicRmwOp = builtin.AtomicRmwOp;
/// Many reader, many writer, non-allocating, thread-safe, lock-free
pub fn Queue(comptime T: type) type {
return struct {
- head: &Node,
- tail: &Node,
+ head: *Node,
+ tail: *Node,
root: Node,
pub const Self = this;
pub const Node = struct {
- next: ?&Node,
+ next: ?*Node,
data: T,
};
// TODO: well defined copy elision: https://github.com/ziglang/zig/issues/287
- pub fn init(self: &Self) void {
+ pub fn init(self: *Self) void {
self.root.next = null;
self.head = &self.root;
self.tail = &self.root;
}
- pub fn put(self: &Self, node: &Node) void {
+ pub fn put(self: *Self, node: *Node) void {
node.next = null;
- const tail = @atomicRmw(&Node, &self.tail, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
- _ = @atomicRmw(?&Node, &tail.next, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
+ const tail = @atomicRmw(*Node, &self.tail, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
+ _ = @atomicRmw(?*Node, &tail.next, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
}
- pub fn get(self: &Self) ?&Node {
- var head = @atomicLoad(&Node, &self.head, AtomicOrder.SeqCst);
+ pub fn get(self: *Self) ?*Node {
+ var head = @atomicLoad(*Node, &self.head, AtomicOrder.SeqCst);
while (true) {
const node = head.next ?? return null;
- head = @cmpxchgWeak(&Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return node;
+ head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return node;
}
}
};
@@ -42,8 +42,8 @@ pub fn Queue(comptime T: type) type {
const std = @import("std");
const Context = struct {
- allocator: &std.mem.Allocator,
- queue: &Queue(i32),
+ allocator: *std.mem.Allocator,
+ queue: *Queue(i32),
put_sum: isize,
get_sum: isize,
get_count: usize,
@@ -79,11 +79,11 @@ test "std.atomic.queue" {
.get_count = 0,
};
- var putters: [put_thread_count]&std.os.Thread = undefined;
+ var putters: [put_thread_count]*std.os.Thread = undefined;
for (putters) |*t| {
t.* = try std.os.spawnThread(&context, startPuts);
}
- var getters: [put_thread_count]&std.os.Thread = undefined;
+ var getters: [put_thread_count]*std.os.Thread = undefined;
for (getters) |*t| {
t.* = try std.os.spawnThread(&context, startGets);
}
@@ -98,7 +98,7 @@ test "std.atomic.queue" {
std.debug.assert(context.get_count == puts_per_thread * put_thread_count);
}
-fn startPuts(ctx: &Context) u8 {
+fn startPuts(ctx: *Context) u8 {
var put_count: usize = puts_per_thread;
var r = std.rand.DefaultPrng.init(0xdeadbeef);
while (put_count != 0) : (put_count -= 1) {
@@ -112,7 +112,7 @@ fn startPuts(ctx: &Context) u8 {
return 0;
}
-fn startGets(ctx: &Context) u8 {
+fn startGets(ctx: *Context) u8 {
while (true) {
while (ctx.queue.get()) |node| {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
diff --git a/std/atomic/stack.zig b/std/atomic/stack.zig
index 400a1a3c4f..15611188d2 100644
--- a/std/atomic/stack.zig
+++ b/std/atomic/stack.zig
@@ -4,12 +4,12 @@ const AtomicOrder = builtin.AtomicOrder;
/// Many reader, many writer, non-allocating, thread-safe, lock-free
pub fn Stack(comptime T: type) type {
return struct {
- root: ?&Node,
+ root: ?*Node,
pub const Self = this;
pub const Node = struct {
- next: ?&Node,
+ next: ?*Node,
data: T,
};
@@ -19,36 +19,36 @@ pub fn Stack(comptime T: type) type {
/// push operation, but only if you are the first item in the stack. if you did not succeed in
/// being the first item in the stack, returns the other item that was there.
- pub fn pushFirst(self: &Self, node: &Node) ?&Node {
+ pub fn pushFirst(self: *Self, node: *Node) ?*Node {
node.next = null;
- return @cmpxchgStrong(?&Node, &self.root, null, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst);
+ return @cmpxchgStrong(?*Node, &self.root, null, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst);
}
- pub fn push(self: &Self, node: &Node) void {
- var root = @atomicLoad(?&Node, &self.root, AtomicOrder.SeqCst);
+ pub fn push(self: *Self, node: *Node) void {
+ var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst);
while (true) {
node.next = root;
- root = @cmpxchgWeak(?&Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? break;
+ root = @cmpxchgWeak(?*Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? break;
}
}
- pub fn pop(self: &Self) ?&Node {
- var root = @atomicLoad(?&Node, &self.root, AtomicOrder.SeqCst);
+ pub fn pop(self: *Self) ?*Node {
+ var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst);
while (true) {
- root = @cmpxchgWeak(?&Node, &self.root, root, (root ?? return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return root;
+ root = @cmpxchgWeak(?*Node, &self.root, root, (root ?? return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return root;
}
}
- pub fn isEmpty(self: &Self) bool {
- return @atomicLoad(?&Node, &self.root, AtomicOrder.SeqCst) == null;
+ pub fn isEmpty(self: *Self) bool {
+ return @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst) == null;
}
};
}
const std = @import("std");
const Context = struct {
- allocator: &std.mem.Allocator,
- stack: &Stack(i32),
+ allocator: *std.mem.Allocator,
+ stack: *Stack(i32),
put_sum: isize,
get_sum: isize,
get_count: usize,
@@ -82,11 +82,11 @@ test "std.atomic.stack" {
.get_count = 0,
};
- var putters: [put_thread_count]&std.os.Thread = undefined;
+ var putters: [put_thread_count]*std.os.Thread = undefined;
for (putters) |*t| {
t.* = try std.os.spawnThread(&context, startPuts);
}
- var getters: [put_thread_count]&std.os.Thread = undefined;
+ var getters: [put_thread_count]*std.os.Thread = undefined;
for (getters) |*t| {
t.* = try std.os.spawnThread(&context, startGets);
}
@@ -101,7 +101,7 @@ test "std.atomic.stack" {
std.debug.assert(context.get_count == puts_per_thread * put_thread_count);
}
-fn startPuts(ctx: &Context) u8 {
+fn startPuts(ctx: *Context) u8 {
var put_count: usize = puts_per_thread;
var r = std.rand.DefaultPrng.init(0xdeadbeef);
while (put_count != 0) : (put_count -= 1) {
@@ -115,7 +115,7 @@ fn startPuts(ctx: &Context) u8 {
return 0;
}
-fn startGets(ctx: &Context) u8 {
+fn startGets(ctx: *Context) u8 {
while (true) {
while (ctx.stack.pop()) |node| {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
diff --git a/std/base64.zig b/std/base64.zig
index 204628a405..d27bcbd201 100644
--- a/std/base64.zig
+++ b/std/base64.zig
@@ -32,7 +32,7 @@ pub const Base64Encoder = struct {
}
/// dest.len must be what you get from ::calcSize.
- pub fn encode(encoder: &const Base64Encoder, dest: []u8, source: []const u8) void {
+ pub fn encode(encoder: *const Base64Encoder, dest: []u8, source: []const u8) void {
assert(dest.len == Base64Encoder.calcSize(source.len));
var i: usize = 0;
@@ -107,7 +107,7 @@ pub const Base64Decoder = struct {
}
/// If the encoded buffer is detected to be invalid, returns error.InvalidPadding.
- pub fn calcSize(decoder: &const Base64Decoder, source: []const u8) !usize {
+ pub fn calcSize(decoder: *const Base64Decoder, source: []const u8) !usize {
if (source.len % 4 != 0) return error.InvalidPadding;
return calcDecodedSizeExactUnsafe(source, decoder.pad_char);
}
@@ -115,7 +115,7 @@ pub const Base64Decoder = struct {
/// dest.len must be what you get from ::calcSize.
/// invalid characters result in error.InvalidCharacter.
/// invalid padding results in error.InvalidPadding.
- pub fn decode(decoder: &const Base64Decoder, dest: []u8, source: []const u8) !void {
+ pub fn decode(decoder: *const Base64Decoder, dest: []u8, source: []const u8) !void {
assert(dest.len == (decoder.calcSize(source) catch unreachable));
assert(source.len % 4 == 0);
@@ -181,7 +181,7 @@ pub const Base64DecoderWithIgnore = struct {
/// Invalid padding results in error.InvalidPadding.
/// Decoding more data than can fit in dest results in error.OutputTooSmall. See also ::calcSizeUpperBound.
/// Returns the number of bytes writen to dest.
- pub fn decode(decoder_with_ignore: &const Base64DecoderWithIgnore, dest: []u8, source: []const u8) !usize {
+ pub fn decode(decoder_with_ignore: *const Base64DecoderWithIgnore, dest: []u8, source: []const u8) !usize {
const decoder = &decoder_with_ignore.decoder;
var src_cursor: usize = 0;
@@ -290,13 +290,13 @@ pub const Base64DecoderUnsafe = struct {
}
/// The source buffer must be valid.
- pub fn calcSize(decoder: &const Base64DecoderUnsafe, source: []const u8) usize {
+ pub fn calcSize(decoder: *const Base64DecoderUnsafe, source: []const u8) usize {
return calcDecodedSizeExactUnsafe(source, decoder.pad_char);
}
/// dest.len must be what you get from ::calcDecodedSizeExactUnsafe.
/// invalid characters or padding will result in undefined values.
- pub fn decode(decoder: &const Base64DecoderUnsafe, dest: []u8, source: []const u8) void {
+ pub fn decode(decoder: *const Base64DecoderUnsafe, dest: []u8, source: []const u8) void {
assert(dest.len == decoder.calcSize(source));
var src_index: usize = 0;
diff --git a/std/buf_map.zig b/std/buf_map.zig
index 930fc36a78..22d821ae7b 100644
--- a/std/buf_map.zig
+++ b/std/buf_map.zig
@@ -11,12 +11,12 @@ pub const BufMap = struct {
const BufMapHashMap = HashMap([]const u8, []const u8, mem.hash_slice_u8, mem.eql_slice_u8);
- pub fn init(allocator: &Allocator) BufMap {
+ pub fn init(allocator: *Allocator) BufMap {
var self = BufMap{ .hash_map = BufMapHashMap.init(allocator) };
return self;
}
- pub fn deinit(self: &const BufMap) void {
+ pub fn deinit(self: *const BufMap) void {
var it = self.hash_map.iterator();
while (true) {
const entry = it.next() ?? break;
@@ -27,7 +27,7 @@ pub const BufMap = struct {
self.hash_map.deinit();
}
- pub fn set(self: &BufMap, key: []const u8, value: []const u8) !void {
+ pub fn set(self: *BufMap, key: []const u8, value: []const u8) !void {
self.delete(key);
const key_copy = try self.copy(key);
errdefer self.free(key_copy);
@@ -36,30 +36,30 @@ pub const BufMap = struct {
_ = try self.hash_map.put(key_copy, value_copy);
}
- pub fn get(self: &const BufMap, key: []const u8) ?[]const u8 {
+ pub fn get(self: *const BufMap, key: []const u8) ?[]const u8 {
const entry = self.hash_map.get(key) ?? return null;
return entry.value;
}
- pub fn delete(self: &BufMap, key: []const u8) void {
+ pub fn delete(self: *BufMap, key: []const u8) void {
const entry = self.hash_map.remove(key) ?? return;
self.free(entry.key);
self.free(entry.value);
}
- pub fn count(self: &const BufMap) usize {
+ pub fn count(self: *const BufMap) usize {
return self.hash_map.count();
}
- pub fn iterator(self: &const BufMap) BufMapHashMap.Iterator {
+ pub fn iterator(self: *const BufMap) BufMapHashMap.Iterator {
return self.hash_map.iterator();
}
- fn free(self: &const BufMap, value: []const u8) void {
+ fn free(self: *const BufMap, value: []const u8) void {
self.hash_map.allocator.free(value);
}
- fn copy(self: &const BufMap, value: []const u8) ![]const u8 {
+ fn copy(self: *const BufMap, value: []const u8) ![]const u8 {
return mem.dupe(self.hash_map.allocator, u8, value);
}
};
diff --git a/std/buf_set.zig b/std/buf_set.zig
index c5a80e16fb..03a050ed8b 100644
--- a/std/buf_set.zig
+++ b/std/buf_set.zig
@@ -9,12 +9,12 @@ pub const BufSet = struct {
const BufSetHashMap = HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8);
- pub fn init(a: &Allocator) BufSet {
+ pub fn init(a: *Allocator) BufSet {
var self = BufSet{ .hash_map = BufSetHashMap.init(a) };
return self;
}
- pub fn deinit(self: &const BufSet) void {
+ pub fn deinit(self: *const BufSet) void {
var it = self.hash_map.iterator();
while (true) {
const entry = it.next() ?? break;
@@ -24,7 +24,7 @@ pub const BufSet = struct {
self.hash_map.deinit();
}
- pub fn put(self: &BufSet, key: []const u8) !void {
+ pub fn put(self: *BufSet, key: []const u8) !void {
if (self.hash_map.get(key) == null) {
const key_copy = try self.copy(key);
errdefer self.free(key_copy);
@@ -32,28 +32,28 @@ pub const BufSet = struct {
}
}
- pub fn delete(self: &BufSet, key: []const u8) void {
+ pub fn delete(self: *BufSet, key: []const u8) void {
const entry = self.hash_map.remove(key) ?? return;
self.free(entry.key);
}
- pub fn count(self: &const BufSet) usize {
+ pub fn count(self: *const BufSet) usize {
return self.hash_map.count();
}
- pub fn iterator(self: &const BufSet) BufSetHashMap.Iterator {
+ pub fn iterator(self: *const BufSet) BufSetHashMap.Iterator {
return self.hash_map.iterator();
}
- pub fn allocator(self: &const BufSet) &Allocator {
+ pub fn allocator(self: *const BufSet) *Allocator {
return self.hash_map.allocator;
}
- fn free(self: &const BufSet, value: []const u8) void {
+ fn free(self: *const BufSet, value: []const u8) void {
self.hash_map.allocator.free(value);
}
- fn copy(self: &const BufSet, value: []const u8) ![]const u8 {
+ fn copy(self: *const BufSet, value: []const u8) ![]const u8 {
const result = try self.hash_map.allocator.alloc(u8, value.len);
mem.copy(u8, result, value);
return result;
diff --git a/std/buffer.zig b/std/buffer.zig
index 90d63719e3..305746e183 100644
--- a/std/buffer.zig
+++ b/std/buffer.zig
@@ -12,14 +12,14 @@ pub const Buffer = struct {
list: ArrayList(u8),
/// Must deinitialize with deinit.
- pub fn init(allocator: &Allocator, m: []const u8) !Buffer {
+ pub fn init(allocator: *Allocator, m: []const u8) !Buffer {
var self = try initSize(allocator, m.len);
mem.copy(u8, self.list.items, m);
return self;
}
/// Must deinitialize with deinit.
- pub fn initSize(allocator: &Allocator, size: usize) !Buffer {
+ pub fn initSize(allocator: *Allocator, size: usize) !Buffer {
var self = initNull(allocator);
try self.resize(size);
return self;
@@ -30,19 +30,19 @@ pub const Buffer = struct {
/// * ::replaceContents
/// * ::replaceContentsBuffer
/// * ::resize
- pub fn initNull(allocator: &Allocator) Buffer {
+ pub fn initNull(allocator: *Allocator) Buffer {
return Buffer{ .list = ArrayList(u8).init(allocator) };
}
/// Must deinitialize with deinit.
- pub fn initFromBuffer(buffer: &const Buffer) !Buffer {
+ pub fn initFromBuffer(buffer: *const Buffer) !Buffer {
return Buffer.init(buffer.list.allocator, buffer.toSliceConst());
}
/// Buffer takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Must deinitialize with deinit.
- pub fn fromOwnedSlice(allocator: &Allocator, slice: []u8) Buffer {
+ pub fn fromOwnedSlice(allocator: *Allocator, slice: []u8) Buffer {
var self = Buffer{ .list = ArrayList(u8).fromOwnedSlice(allocator, slice) };
self.list.append(0);
return self;
@@ -50,79 +50,79 @@ pub const Buffer = struct {
/// The caller owns the returned memory. The Buffer becomes null and
/// is safe to `deinit`.
- pub fn toOwnedSlice(self: &Buffer) []u8 {
+ pub fn toOwnedSlice(self: *Buffer) []u8 {
const allocator = self.list.allocator;
const result = allocator.shrink(u8, self.list.items, self.len());
self.* = initNull(allocator);
return result;
}
- pub fn deinit(self: &Buffer) void {
+ pub fn deinit(self: *Buffer) void {
self.list.deinit();
}
- pub fn toSlice(self: &const Buffer) []u8 {
+ pub fn toSlice(self: *const Buffer) []u8 {
return self.list.toSlice()[0..self.len()];
}
- pub fn toSliceConst(self: &const Buffer) []const u8 {
+ pub fn toSliceConst(self: *const Buffer) []const u8 {
return self.list.toSliceConst()[0..self.len()];
}
- pub fn shrink(self: &Buffer, new_len: usize) void {
+ pub fn shrink(self: *Buffer, new_len: usize) void {
assert(new_len <= self.len());
self.list.shrink(new_len + 1);
self.list.items[self.len()] = 0;
}
- pub fn resize(self: &Buffer, new_len: usize) !void {
+ pub fn resize(self: *Buffer, new_len: usize) !void {
try self.list.resize(new_len + 1);
self.list.items[self.len()] = 0;
}
- pub fn isNull(self: &const Buffer) bool {
+ pub fn isNull(self: *const Buffer) bool {
return self.list.len == 0;
}
- pub fn len(self: &const Buffer) usize {
+ pub fn len(self: *const Buffer) usize {
return self.list.len - 1;
}
- pub fn append(self: &Buffer, m: []const u8) !void {
+ pub fn append(self: *Buffer, m: []const u8) !void {
const old_len = self.len();
try self.resize(old_len + m.len);
mem.copy(u8, self.list.toSlice()[old_len..], m);
}
- pub fn appendByte(self: &Buffer, byte: u8) !void {
+ pub fn appendByte(self: *Buffer, byte: u8) !void {
const old_len = self.len();
try self.resize(old_len + 1);
self.list.toSlice()[old_len] = byte;
}
- pub fn eql(self: &const Buffer, m: []const u8) bool {
+ pub fn eql(self: *const Buffer, m: []const u8) bool {
return mem.eql(u8, self.toSliceConst(), m);
}
- pub fn startsWith(self: &const Buffer, m: []const u8) bool {
+ pub fn startsWith(self: *const Buffer, m: []const u8) bool {
if (self.len() < m.len) return false;
return mem.eql(u8, self.list.items[0..m.len], m);
}
- pub fn endsWith(self: &const Buffer, m: []const u8) bool {
+ pub fn endsWith(self: *const Buffer, m: []const u8) bool {
const l = self.len();
if (l < m.len) return false;
const start = l - m.len;
return mem.eql(u8, self.list.items[start..l], m);
}
- pub fn replaceContents(self: &const Buffer, m: []const u8) !void {
+ pub fn replaceContents(self: *const Buffer, m: []const u8) !void {
try self.resize(m.len);
mem.copy(u8, self.list.toSlice(), m);
}
/// For passing to C functions.
- pub fn ptr(self: &const Buffer) &u8 {
+ pub fn ptr(self: *const Buffer) *u8 {
return self.list.items.ptr;
}
};
diff --git a/std/build.zig b/std/build.zig
index 9a6e17f728..fed02e0815 100644
--- a/std/build.zig
+++ b/std/build.zig
@@ -20,7 +20,7 @@ pub const Builder = struct {
install_tls: TopLevelStep,
have_uninstall_step: bool,
have_install_step: bool,
- allocator: &Allocator,
+ allocator: *Allocator,
lib_paths: ArrayList([]const u8),
include_paths: ArrayList([]const u8),
rpaths: ArrayList([]const u8),
@@ -36,9 +36,9 @@ pub const Builder = struct {
verbose_cimport: bool,
invalid_user_input: bool,
zig_exe: []const u8,
- default_step: &Step,
+ default_step: *Step,
env_map: BufMap,
- top_level_steps: ArrayList(&TopLevelStep),
+ top_level_steps: ArrayList(*TopLevelStep),
prefix: []const u8,
search_prefixes: ArrayList([]const u8),
lib_dir: []const u8,
@@ -82,7 +82,7 @@ pub const Builder = struct {
description: []const u8,
};
- pub fn init(allocator: &Allocator, zig_exe: []const u8, build_root: []const u8, cache_root: []const u8) Builder {
+ pub fn init(allocator: *Allocator, zig_exe: []const u8, build_root: []const u8, cache_root: []const u8) Builder {
var self = Builder{
.zig_exe = zig_exe,
.build_root = build_root,
@@ -102,7 +102,7 @@ pub const Builder = struct {
.user_input_options = UserInputOptionsMap.init(allocator),
.available_options_map = AvailableOptionsMap.init(allocator),
.available_options_list = ArrayList(AvailableOption).init(allocator),
- .top_level_steps = ArrayList(&TopLevelStep).init(allocator),
+ .top_level_steps = ArrayList(*TopLevelStep).init(allocator),
.default_step = undefined,
.env_map = os.getEnvMap(allocator) catch unreachable,
.prefix = undefined,
@@ -127,7 +127,7 @@ pub const Builder = struct {
return self;
}
- pub fn deinit(self: &Builder) void {
+ pub fn deinit(self: *Builder) void {
self.lib_paths.deinit();
self.include_paths.deinit();
self.rpaths.deinit();
@@ -135,81 +135,81 @@ pub const Builder = struct {
self.top_level_steps.deinit();
}
- pub fn setInstallPrefix(self: &Builder, maybe_prefix: ?[]const u8) void {
+ pub fn setInstallPrefix(self: *Builder, maybe_prefix: ?[]const u8) void {
self.prefix = maybe_prefix ?? "/usr/local"; // TODO better default
self.lib_dir = os.path.join(self.allocator, self.prefix, "lib") catch unreachable;
self.exe_dir = os.path.join(self.allocator, self.prefix, "bin") catch unreachable;
}
- pub fn addExecutable(self: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep {
+ pub fn addExecutable(self: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
return LibExeObjStep.createExecutable(self, name, root_src);
}
- pub fn addObject(self: &Builder, name: []const u8, root_src: []const u8) &LibExeObjStep {
+ pub fn addObject(self: *Builder, name: []const u8, root_src: []const u8) *LibExeObjStep {
return LibExeObjStep.createObject(self, name, root_src);
}
- pub fn addSharedLibrary(self: &Builder, name: []const u8, root_src: ?[]const u8, ver: &const Version) &LibExeObjStep {
+ pub fn addSharedLibrary(self: *Builder, name: []const u8, root_src: ?[]const u8, ver: *const Version) *LibExeObjStep {
return LibExeObjStep.createSharedLibrary(self, name, root_src, ver);
}
- pub fn addStaticLibrary(self: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep {
+ pub fn addStaticLibrary(self: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
return LibExeObjStep.createStaticLibrary(self, name, root_src);
}
- pub fn addTest(self: &Builder, root_src: []const u8) &TestStep {
+ pub fn addTest(self: *Builder, root_src: []const u8) *TestStep {
const test_step = self.allocator.create(TestStep) catch unreachable;
test_step.* = TestStep.init(self, root_src);
return test_step;
}
- pub fn addAssemble(self: &Builder, name: []const u8, src: []const u8) &LibExeObjStep {
+ pub fn addAssemble(self: *Builder, name: []const u8, src: []const u8) *LibExeObjStep {
const obj_step = LibExeObjStep.createObject(self, name, null);
obj_step.addAssemblyFile(src);
return obj_step;
}
- pub fn addCStaticLibrary(self: &Builder, name: []const u8) &LibExeObjStep {
+ pub fn addCStaticLibrary(self: *Builder, name: []const u8) *LibExeObjStep {
return LibExeObjStep.createCStaticLibrary(self, name);
}
- pub fn addCSharedLibrary(self: &Builder, name: []const u8, ver: &const Version) &LibExeObjStep {
+ pub fn addCSharedLibrary(self: *Builder, name: []const u8, ver: *const Version) *LibExeObjStep {
return LibExeObjStep.createCSharedLibrary(self, name, ver);
}
- pub fn addCExecutable(self: &Builder, name: []const u8) &LibExeObjStep {
+ pub fn addCExecutable(self: *Builder, name: []const u8) *LibExeObjStep {
return LibExeObjStep.createCExecutable(self, name);
}
- pub fn addCObject(self: &Builder, name: []const u8, src: []const u8) &LibExeObjStep {
+ pub fn addCObject(self: *Builder, name: []const u8, src: []const u8) *LibExeObjStep {
return LibExeObjStep.createCObject(self, name, src);
}
/// ::argv is copied.
- pub fn addCommand(self: &Builder, cwd: ?[]const u8, env_map: &const BufMap, argv: []const []const u8) &CommandStep {
+ pub fn addCommand(self: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) *CommandStep {
return CommandStep.create(self, cwd, env_map, argv);
}
- pub fn addWriteFile(self: &Builder, file_path: []const u8, data: []const u8) &WriteFileStep {
+ pub fn addWriteFile(self: *Builder, file_path: []const u8, data: []const u8) *WriteFileStep {
const write_file_step = self.allocator.create(WriteFileStep) catch unreachable;
write_file_step.* = WriteFileStep.init(self, file_path, data);
return write_file_step;
}
- pub fn addLog(self: &Builder, comptime format: []const u8, args: ...) &LogStep {
+ pub fn addLog(self: *Builder, comptime format: []const u8, args: ...) *LogStep {
const data = self.fmt(format, args);
const log_step = self.allocator.create(LogStep) catch unreachable;
log_step.* = LogStep.init(self, data);
return log_step;
}
- pub fn addRemoveDirTree(self: &Builder, dir_path: []const u8) &RemoveDirStep {
+ pub fn addRemoveDirTree(self: *Builder, dir_path: []const u8) *RemoveDirStep {
const remove_dir_step = self.allocator.create(RemoveDirStep) catch unreachable;
remove_dir_step.* = RemoveDirStep.init(self, dir_path);
return remove_dir_step;
}
- pub fn version(self: &const Builder, major: u32, minor: u32, patch: u32) Version {
+ pub fn version(self: *const Builder, major: u32, minor: u32, patch: u32) Version {
return Version{
.major = major,
.minor = minor,
@@ -217,20 +217,20 @@ pub const Builder = struct {
};
}
- pub fn addCIncludePath(self: &Builder, path: []const u8) void {
+ pub fn addCIncludePath(self: *Builder, path: []const u8) void {
self.include_paths.append(path) catch unreachable;
}
- pub fn addRPath(self: &Builder, path: []const u8) void {
+ pub fn addRPath(self: *Builder, path: []const u8) void {
self.rpaths.append(path) catch unreachable;
}
- pub fn addLibPath(self: &Builder, path: []const u8) void {
+ pub fn addLibPath(self: *Builder, path: []const u8) void {
self.lib_paths.append(path) catch unreachable;
}
- pub fn make(self: &Builder, step_names: []const []const u8) !void {
- var wanted_steps = ArrayList(&Step).init(self.allocator);
+ pub fn make(self: *Builder, step_names: []const []const u8) !void {
+ var wanted_steps = ArrayList(*Step).init(self.allocator);
defer wanted_steps.deinit();
if (step_names.len == 0) {
@@ -247,7 +247,7 @@ pub const Builder = struct {
}
}
- pub fn getInstallStep(self: &Builder) &Step {
+ pub fn getInstallStep(self: *Builder) *Step {
if (self.have_install_step) return &self.install_tls.step;
self.top_level_steps.append(&self.install_tls) catch unreachable;
@@ -255,7 +255,7 @@ pub const Builder = struct {
return &self.install_tls.step;
}
- pub fn getUninstallStep(self: &Builder) &Step {
+ pub fn getUninstallStep(self: *Builder) *Step {
if (self.have_uninstall_step) return &self.uninstall_tls.step;
self.top_level_steps.append(&self.uninstall_tls) catch unreachable;
@@ -263,7 +263,7 @@ pub const Builder = struct {
return &self.uninstall_tls.step;
}
- fn makeUninstall(uninstall_step: &Step) error!void {
+ fn makeUninstall(uninstall_step: *Step) error!void {
const uninstall_tls = @fieldParentPtr(TopLevelStep, "step", uninstall_step);
const self = @fieldParentPtr(Builder, "uninstall_tls", uninstall_tls);
@@ -277,7 +277,7 @@ pub const Builder = struct {
// TODO remove empty directories
}
- fn makeOneStep(self: &Builder, s: &Step) error!void {
+ fn makeOneStep(self: *Builder, s: *Step) error!void {
if (s.loop_flag) {
warn("Dependency loop detected:\n {}\n", s.name);
return error.DependencyLoopDetected;
@@ -298,7 +298,7 @@ pub const Builder = struct {
try s.make();
}
- fn getTopLevelStepByName(self: &Builder, name: []const u8) !&Step {
+ fn getTopLevelStepByName(self: *Builder, name: []const u8) !*Step {
for (self.top_level_steps.toSliceConst()) |top_level_step| {
if (mem.eql(u8, top_level_step.step.name, name)) {
return &top_level_step.step;
@@ -308,7 +308,7 @@ pub const Builder = struct {
return error.InvalidStepName;
}
- fn processNixOSEnvVars(self: &Builder) void {
+ fn processNixOSEnvVars(self: *Builder) void {
if (os.getEnvVarOwned(self.allocator, "NIX_CFLAGS_COMPILE")) |nix_cflags_compile| {
var it = mem.split(nix_cflags_compile, " ");
while (true) {
@@ -350,7 +350,7 @@ pub const Builder = struct {
}
}
- pub fn option(self: &Builder, comptime T: type, name: []const u8, description: []const u8) ?T {
+ pub fn option(self: *Builder, comptime T: type, name: []const u8, description: []const u8) ?T {
const type_id = comptime typeToEnum(T);
const available_option = AvailableOption{
.name = name,
@@ -403,7 +403,7 @@ pub const Builder = struct {
}
}
- pub fn step(self: &Builder, name: []const u8, description: []const u8) &Step {
+ pub fn step(self: *Builder, name: []const u8, description: []const u8) *Step {
const step_info = self.allocator.create(TopLevelStep) catch unreachable;
step_info.* = TopLevelStep{
.step = Step.initNoOp(name, self.allocator),
@@ -413,7 +413,7 @@ pub const Builder = struct {
return &step_info.step;
}
- pub fn standardReleaseOptions(self: &Builder) builtin.Mode {
+ pub fn standardReleaseOptions(self: *Builder) builtin.Mode {
if (self.release_mode) |mode| return mode;
const release_safe = self.option(bool, "release-safe", "optimizations on and safety on") ?? false;
@@ -429,7 +429,7 @@ pub const Builder = struct {
return mode;
}
- pub fn addUserInputOption(self: &Builder, name: []const u8, value: []const u8) bool {
+ pub fn addUserInputOption(self: *Builder, name: []const u8, value: []const u8) bool {
if (self.user_input_options.put(name, UserInputOption{
.name = name,
.value = UserValue{ .Scalar = value },
@@ -466,7 +466,7 @@ pub const Builder = struct {
return false;
}
- pub fn addUserInputFlag(self: &Builder, name: []const u8) bool {
+ pub fn addUserInputFlag(self: *Builder, name: []const u8) bool {
if (self.user_input_options.put(name, UserInputOption{
.name = name,
.value = UserValue{ .Flag = {} },
@@ -500,7 +500,7 @@ pub const Builder = struct {
};
}
- fn markInvalidUserInput(self: &Builder) void {
+ fn markInvalidUserInput(self: *Builder) void {
self.invalid_user_input = true;
}
@@ -514,7 +514,7 @@ pub const Builder = struct {
};
}
- pub fn validateUserInputDidItFail(self: &Builder) bool {
+ pub fn validateUserInputDidItFail(self: *Builder) bool {
// make sure all args are used
var it = self.user_input_options.iterator();
while (true) {
@@ -528,7 +528,7 @@ pub const Builder = struct {
return self.invalid_user_input;
}
- fn spawnChild(self: &Builder, argv: []const []const u8) !void {
+ fn spawnChild(self: *Builder, argv: []const []const u8) !void {
return self.spawnChildEnvMap(null, &self.env_map, argv);
}
@@ -540,7 +540,7 @@ pub const Builder = struct {
warn("\n");
}
- fn spawnChildEnvMap(self: &Builder, cwd: ?[]const u8, env_map: &const BufMap, argv: []const []const u8) !void {
+ fn spawnChildEnvMap(self: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) !void {
if (self.verbose) {
printCmd(cwd, argv);
}
@@ -573,28 +573,28 @@ pub const Builder = struct {
}
}
- pub fn makePath(self: &Builder, path: []const u8) !void {
+ pub fn makePath(self: *Builder, path: []const u8) !void {
os.makePath(self.allocator, self.pathFromRoot(path)) catch |err| {
warn("Unable to create path {}: {}\n", path, @errorName(err));
return err;
};
}
- pub fn installArtifact(self: &Builder, artifact: &LibExeObjStep) void {
+ pub fn installArtifact(self: *Builder, artifact: *LibExeObjStep) void {
self.getInstallStep().dependOn(&self.addInstallArtifact(artifact).step);
}
- pub fn addInstallArtifact(self: &Builder, artifact: &LibExeObjStep) &InstallArtifactStep {
+ pub fn addInstallArtifact(self: *Builder, artifact: *LibExeObjStep) *InstallArtifactStep {
return InstallArtifactStep.create(self, artifact);
}
///::dest_rel_path is relative to prefix path or it can be an absolute path
- pub fn installFile(self: &Builder, src_path: []const u8, dest_rel_path: []const u8) void {
+ pub fn installFile(self: *Builder, src_path: []const u8, dest_rel_path: []const u8) void {
self.getInstallStep().dependOn(&self.addInstallFile(src_path, dest_rel_path).step);
}
///::dest_rel_path is relative to prefix path or it can be an absolute path
- pub fn addInstallFile(self: &Builder, src_path: []const u8, dest_rel_path: []const u8) &InstallFileStep {
+ pub fn addInstallFile(self: *Builder, src_path: []const u8, dest_rel_path: []const u8) *InstallFileStep {
const full_dest_path = os.path.resolve(self.allocator, self.prefix, dest_rel_path) catch unreachable;
self.pushInstalledFile(full_dest_path);
@@ -603,16 +603,16 @@ pub const Builder = struct {
return install_step;
}
- pub fn pushInstalledFile(self: &Builder, full_path: []const u8) void {
+ pub fn pushInstalledFile(self: *Builder, full_path: []const u8) void {
_ = self.getUninstallStep();
self.installed_files.append(full_path) catch unreachable;
}
- fn copyFile(self: &Builder, source_path: []const u8, dest_path: []const u8) !void {
+ fn copyFile(self: *Builder, source_path: []const u8, dest_path: []const u8) !void {
return self.copyFileMode(source_path, dest_path, os.default_file_mode);
}
- fn copyFileMode(self: &Builder, source_path: []const u8, dest_path: []const u8, mode: os.FileMode) !void {
+ fn copyFileMode(self: *Builder, source_path: []const u8, dest_path: []const u8, mode: os.FileMode) !void {
if (self.verbose) {
warn("cp {} {}\n", source_path, dest_path);
}
@@ -629,15 +629,15 @@ pub const Builder = struct {
};
}
- fn pathFromRoot(self: &Builder, rel_path: []const u8) []u8 {
+ fn pathFromRoot(self: *Builder, rel_path: []const u8) []u8 {
return os.path.resolve(self.allocator, self.build_root, rel_path) catch unreachable;
}
- pub fn fmt(self: &Builder, comptime format: []const u8, args: ...) []u8 {
+ pub fn fmt(self: *Builder, comptime format: []const u8, args: ...) []u8 {
return fmt_lib.allocPrint(self.allocator, format, args) catch unreachable;
}
- fn getCCExe(self: &Builder) []const u8 {
+ fn getCCExe(self: *Builder) []const u8 {
if (builtin.environ == builtin.Environ.msvc) {
return "cl.exe";
} else {
@@ -645,7 +645,7 @@ pub const Builder = struct {
}
}
- pub fn findProgram(self: &Builder, names: []const []const u8, paths: []const []const u8) ![]const u8 {
+ pub fn findProgram(self: *Builder, names: []const []const u8, paths: []const []const u8) ![]const u8 {
// TODO report error for ambiguous situations
const exe_extension = (Target{ .Native = {} }).exeFileExt();
for (self.search_prefixes.toSliceConst()) |search_prefix| {
@@ -693,7 +693,7 @@ pub const Builder = struct {
return error.FileNotFound;
}
- pub fn exec(self: &Builder, argv: []const []const u8) ![]u8 {
+ pub fn exec(self: *Builder, argv: []const []const u8) ![]u8 {
const max_output_size = 100 * 1024;
const result = try os.ChildProcess.exec(self.allocator, argv, null, null, max_output_size);
switch (result.term) {
@@ -715,7 +715,7 @@ pub const Builder = struct {
}
}
- pub fn addSearchPrefix(self: &Builder, search_prefix: []const u8) void {
+ pub fn addSearchPrefix(self: *Builder, search_prefix: []const u8) void {
self.search_prefixes.append(search_prefix) catch unreachable;
}
};
@@ -736,7 +736,7 @@ pub const Target = union(enum) {
Native: void,
Cross: CrossTarget,
- pub fn oFileExt(self: &const Target) []const u8 {
+ pub fn oFileExt(self: *const Target) []const u8 {
const environ = switch (self.*) {
Target.Native => builtin.environ,
Target.Cross => |t| t.environ,
@@ -747,49 +747,49 @@ pub const Target = union(enum) {
};
}
- pub fn exeFileExt(self: &const Target) []const u8 {
+ pub fn exeFileExt(self: *const Target) []const u8 {
return switch (self.getOs()) {
builtin.Os.windows => ".exe",
else => "",
};
}
- pub fn libFileExt(self: &const Target) []const u8 {
+ pub fn libFileExt(self: *const Target) []const u8 {
return switch (self.getOs()) {
builtin.Os.windows => ".lib",
else => ".a",
};
}
- pub fn getOs(self: &const Target) builtin.Os {
+ pub fn getOs(self: *const Target) builtin.Os {
return switch (self.*) {
Target.Native => builtin.os,
Target.Cross => |t| t.os,
};
}
- pub fn isDarwin(self: &const Target) bool {
+ pub fn isDarwin(self: *const Target) bool {
return switch (self.getOs()) {
builtin.Os.ios, builtin.Os.macosx => true,
else => false,
};
}
- pub fn isWindows(self: &const Target) bool {
+ pub fn isWindows(self: *const Target) bool {
return switch (self.getOs()) {
builtin.Os.windows => true,
else => false,
};
}
- pub fn wantSharedLibSymLinks(self: &const Target) bool {
+ pub fn wantSharedLibSymLinks(self: *const Target) bool {
return !self.isWindows();
}
};
pub const LibExeObjStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
name: []const u8,
target: Target,
link_libs: BufSet,
@@ -836,56 +836,56 @@ pub const LibExeObjStep = struct {
Obj,
};
- pub fn createSharedLibrary(builder: &Builder, name: []const u8, root_src: ?[]const u8, ver: &const Version) &LibExeObjStep {
+ pub fn createSharedLibrary(builder: *Builder, name: []const u8, root_src: ?[]const u8, ver: *const Version) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initExtraArgs(builder, name, root_src, Kind.Lib, false, ver);
return self;
}
- pub fn createCSharedLibrary(builder: &Builder, name: []const u8, version: &const Version) &LibExeObjStep {
+ pub fn createCSharedLibrary(builder: *Builder, name: []const u8, version: *const Version) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initC(builder, name, Kind.Lib, version, false);
return self;
}
- pub fn createStaticLibrary(builder: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep {
+ pub fn createStaticLibrary(builder: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initExtraArgs(builder, name, root_src, Kind.Lib, true, builder.version(0, 0, 0));
return self;
}
- pub fn createCStaticLibrary(builder: &Builder, name: []const u8) &LibExeObjStep {
+ pub fn createCStaticLibrary(builder: *Builder, name: []const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initC(builder, name, Kind.Lib, builder.version(0, 0, 0), true);
return self;
}
- pub fn createObject(builder: &Builder, name: []const u8, root_src: []const u8) &LibExeObjStep {
+ pub fn createObject(builder: *Builder, name: []const u8, root_src: []const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initExtraArgs(builder, name, root_src, Kind.Obj, false, builder.version(0, 0, 0));
return self;
}
- pub fn createCObject(builder: &Builder, name: []const u8, src: []const u8) &LibExeObjStep {
+ pub fn createCObject(builder: *Builder, name: []const u8, src: []const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initC(builder, name, Kind.Obj, builder.version(0, 0, 0), false);
self.object_src = src;
return self;
}
- pub fn createExecutable(builder: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep {
+ pub fn createExecutable(builder: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initExtraArgs(builder, name, root_src, Kind.Exe, false, builder.version(0, 0, 0));
return self;
}
- pub fn createCExecutable(builder: &Builder, name: []const u8) &LibExeObjStep {
+ pub fn createCExecutable(builder: *Builder, name: []const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initC(builder, name, Kind.Exe, builder.version(0, 0, 0), false);
return self;
}
- fn initExtraArgs(builder: &Builder, name: []const u8, root_src: ?[]const u8, kind: Kind, static: bool, ver: &const Version) LibExeObjStep {
+ fn initExtraArgs(builder: *Builder, name: []const u8, root_src: ?[]const u8, kind: Kind, static: bool, ver: *const Version) LibExeObjStep {
var self = LibExeObjStep{
.strip = false,
.builder = builder,
@@ -924,7 +924,7 @@ pub const LibExeObjStep = struct {
return self;
}
- fn initC(builder: &Builder, name: []const u8, kind: Kind, version: &const Version, static: bool) LibExeObjStep {
+ fn initC(builder: *Builder, name: []const u8, kind: Kind, version: *const Version, static: bool) LibExeObjStep {
var self = LibExeObjStep{
.builder = builder,
.name = name,
@@ -964,7 +964,7 @@ pub const LibExeObjStep = struct {
return self;
}
- fn computeOutFileNames(self: &LibExeObjStep) void {
+ fn computeOutFileNames(self: *LibExeObjStep) void {
switch (self.kind) {
Kind.Obj => {
self.out_filename = self.builder.fmt("{}{}", self.name, self.target.oFileExt());
@@ -996,7 +996,7 @@ pub const LibExeObjStep = struct {
}
}
- pub fn setTarget(self: &LibExeObjStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void {
+ pub fn setTarget(self: *LibExeObjStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void {
self.target = Target{
.Cross = CrossTarget{
.arch = target_arch,
@@ -1008,16 +1008,16 @@ pub const LibExeObjStep = struct {
}
// TODO respect this in the C args
- pub fn setLinkerScriptPath(self: &LibExeObjStep, path: []const u8) void {
+ pub fn setLinkerScriptPath(self: *LibExeObjStep, path: []const u8) void {
self.linker_script = path;
}
- pub fn linkFramework(self: &LibExeObjStep, framework_name: []const u8) void {
+ pub fn linkFramework(self: *LibExeObjStep, framework_name: []const u8) void {
assert(self.target.isDarwin());
self.frameworks.put(framework_name) catch unreachable;
}
- pub fn linkLibrary(self: &LibExeObjStep, lib: &LibExeObjStep) void {
+ pub fn linkLibrary(self: *LibExeObjStep, lib: *LibExeObjStep) void {
assert(self.kind != Kind.Obj);
assert(lib.kind == Kind.Lib);
@@ -1038,26 +1038,26 @@ pub const LibExeObjStep = struct {
}
}
- pub fn linkSystemLibrary(self: &LibExeObjStep, name: []const u8) void {
+ pub fn linkSystemLibrary(self: *LibExeObjStep, name: []const u8) void {
assert(self.kind != Kind.Obj);
self.link_libs.put(name) catch unreachable;
}
- pub fn addSourceFile(self: &LibExeObjStep, file: []const u8) void {
+ pub fn addSourceFile(self: *LibExeObjStep, file: []const u8) void {
assert(self.kind != Kind.Obj);
assert(!self.is_zig);
self.source_files.append(file) catch unreachable;
}
- pub fn setVerboseLink(self: &LibExeObjStep, value: bool) void {
+ pub fn setVerboseLink(self: *LibExeObjStep, value: bool) void {
self.verbose_link = value;
}
- pub fn setBuildMode(self: &LibExeObjStep, mode: builtin.Mode) void {
+ pub fn setBuildMode(self: *LibExeObjStep, mode: builtin.Mode) void {
self.build_mode = mode;
}
- pub fn setOutputPath(self: &LibExeObjStep, file_path: []const u8) void {
+ pub fn setOutputPath(self: *LibExeObjStep, file_path: []const u8) void {
self.output_path = file_path;
// catch a common mistake
@@ -1066,11 +1066,11 @@ pub const LibExeObjStep = struct {
}
}
- pub fn getOutputPath(self: &LibExeObjStep) []const u8 {
+ pub fn getOutputPath(self: *LibExeObjStep) []const u8 {
return if (self.output_path) |output_path| output_path else os.path.join(self.builder.allocator, self.builder.cache_root, self.out_filename) catch unreachable;
}
- pub fn setOutputHPath(self: &LibExeObjStep, file_path: []const u8) void {
+ pub fn setOutputHPath(self: *LibExeObjStep, file_path: []const u8) void {
self.output_h_path = file_path;
// catch a common mistake
@@ -1079,21 +1079,21 @@ pub const LibExeObjStep = struct {
}
}
- pub fn getOutputHPath(self: &LibExeObjStep) []const u8 {
+ pub fn getOutputHPath(self: *LibExeObjStep) []const u8 {
return if (self.output_h_path) |output_h_path| output_h_path else os.path.join(self.builder.allocator, self.builder.cache_root, self.out_h_filename) catch unreachable;
}
- pub fn addAssemblyFile(self: &LibExeObjStep, path: []const u8) void {
+ pub fn addAssemblyFile(self: *LibExeObjStep, path: []const u8) void {
self.assembly_files.append(path) catch unreachable;
}
- pub fn addObjectFile(self: &LibExeObjStep, path: []const u8) void {
+ pub fn addObjectFile(self: *LibExeObjStep, path: []const u8) void {
assert(self.kind != Kind.Obj);
self.object_files.append(path) catch unreachable;
}
- pub fn addObject(self: &LibExeObjStep, obj: &LibExeObjStep) void {
+ pub fn addObject(self: *LibExeObjStep, obj: *LibExeObjStep) void {
assert(obj.kind == Kind.Obj);
assert(self.kind != Kind.Obj);
@@ -1110,15 +1110,15 @@ pub const LibExeObjStep = struct {
self.include_dirs.append(self.builder.cache_root) catch unreachable;
}
- pub fn addIncludeDir(self: &LibExeObjStep, path: []const u8) void {
+ pub fn addIncludeDir(self: *LibExeObjStep, path: []const u8) void {
self.include_dirs.append(path) catch unreachable;
}
- pub fn addLibPath(self: &LibExeObjStep, path: []const u8) void {
+ pub fn addLibPath(self: *LibExeObjStep, path: []const u8) void {
self.lib_paths.append(path) catch unreachable;
}
- pub fn addPackagePath(self: &LibExeObjStep, name: []const u8, pkg_index_path: []const u8) void {
+ pub fn addPackagePath(self: *LibExeObjStep, name: []const u8, pkg_index_path: []const u8) void {
assert(self.is_zig);
self.packages.append(Pkg{
@@ -1127,23 +1127,23 @@ pub const LibExeObjStep = struct {
}) catch unreachable;
}
- pub fn addCompileFlags(self: &LibExeObjStep, flags: []const []const u8) void {
+ pub fn addCompileFlags(self: *LibExeObjStep, flags: []const []const u8) void {
for (flags) |flag| {
self.cflags.append(flag) catch unreachable;
}
}
- pub fn setNoStdLib(self: &LibExeObjStep, disable: bool) void {
+ pub fn setNoStdLib(self: *LibExeObjStep, disable: bool) void {
assert(!self.is_zig);
self.disable_libc = disable;
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(LibExeObjStep, "step", step);
return if (self.is_zig) self.makeZig() else self.makeC();
}
- fn makeZig(self: &LibExeObjStep) !void {
+ fn makeZig(self: *LibExeObjStep) !void {
const builder = self.builder;
assert(self.is_zig);
@@ -1309,7 +1309,7 @@ pub const LibExeObjStep = struct {
}
}
- fn appendCompileFlags(self: &LibExeObjStep, args: &ArrayList([]const u8)) void {
+ fn appendCompileFlags(self: *LibExeObjStep, args: *ArrayList([]const u8)) void {
if (!self.strip) {
args.append("-g") catch unreachable;
}
@@ -1354,7 +1354,7 @@ pub const LibExeObjStep = struct {
}
}
- fn makeC(self: &LibExeObjStep) !void {
+ fn makeC(self: *LibExeObjStep) !void {
const builder = self.builder;
const cc = builder.getCCExe();
@@ -1580,7 +1580,7 @@ pub const LibExeObjStep = struct {
pub const TestStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
root_src: []const u8,
build_mode: builtin.Mode,
verbose: bool,
@@ -1591,7 +1591,7 @@ pub const TestStep = struct {
exec_cmd_args: ?[]const ?[]const u8,
include_dirs: ArrayList([]const u8),
- pub fn init(builder: &Builder, root_src: []const u8) TestStep {
+ pub fn init(builder: *Builder, root_src: []const u8) TestStep {
const step_name = builder.fmt("test {}", root_src);
return TestStep{
.step = Step.init(step_name, builder.allocator, make),
@@ -1608,31 +1608,31 @@ pub const TestStep = struct {
};
}
- pub fn setVerbose(self: &TestStep, value: bool) void {
+ pub fn setVerbose(self: *TestStep, value: bool) void {
self.verbose = value;
}
- pub fn addIncludeDir(self: &TestStep, path: []const u8) void {
+ pub fn addIncludeDir(self: *TestStep, path: []const u8) void {
self.include_dirs.append(path) catch unreachable;
}
- pub fn setBuildMode(self: &TestStep, mode: builtin.Mode) void {
+ pub fn setBuildMode(self: *TestStep, mode: builtin.Mode) void {
self.build_mode = mode;
}
- pub fn linkSystemLibrary(self: &TestStep, name: []const u8) void {
+ pub fn linkSystemLibrary(self: *TestStep, name: []const u8) void {
self.link_libs.put(name) catch unreachable;
}
- pub fn setNamePrefix(self: &TestStep, text: []const u8) void {
+ pub fn setNamePrefix(self: *TestStep, text: []const u8) void {
self.name_prefix = text;
}
- pub fn setFilter(self: &TestStep, text: ?[]const u8) void {
+ pub fn setFilter(self: *TestStep, text: ?[]const u8) void {
self.filter = text;
}
- pub fn setTarget(self: &TestStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void {
+ pub fn setTarget(self: *TestStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void {
self.target = Target{
.Cross = CrossTarget{
.arch = target_arch,
@@ -1642,11 +1642,11 @@ pub const TestStep = struct {
};
}
- pub fn setExecCmd(self: &TestStep, args: []const ?[]const u8) void {
+ pub fn setExecCmd(self: *TestStep, args: []const ?[]const u8) void {
self.exec_cmd_args = args;
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(TestStep, "step", step);
const builder = self.builder;
@@ -1739,13 +1739,13 @@ pub const TestStep = struct {
pub const CommandStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
argv: [][]const u8,
cwd: ?[]const u8,
- env_map: &const BufMap,
+ env_map: *const BufMap,
/// ::argv is copied.
- pub fn create(builder: &Builder, cwd: ?[]const u8, env_map: &const BufMap, argv: []const []const u8) &CommandStep {
+ pub fn create(builder: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) *CommandStep {
const self = builder.allocator.create(CommandStep) catch unreachable;
self.* = CommandStep{
.builder = builder,
@@ -1759,7 +1759,7 @@ pub const CommandStep = struct {
return self;
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(CommandStep, "step", step);
const cwd = if (self.cwd) |cwd| self.builder.pathFromRoot(cwd) else self.builder.build_root;
@@ -1769,13 +1769,13 @@ pub const CommandStep = struct {
const InstallArtifactStep = struct {
step: Step,
- builder: &Builder,
- artifact: &LibExeObjStep,
+ builder: *Builder,
+ artifact: *LibExeObjStep,
dest_file: []const u8,
const Self = this;
- pub fn create(builder: &Builder, artifact: &LibExeObjStep) &Self {
+ pub fn create(builder: *Builder, artifact: *LibExeObjStep) *Self {
const self = builder.allocator.create(Self) catch unreachable;
const dest_dir = switch (artifact.kind) {
LibExeObjStep.Kind.Obj => unreachable,
@@ -1797,7 +1797,7 @@ const InstallArtifactStep = struct {
return self;
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(Self, "step", step);
const builder = self.builder;
@@ -1818,11 +1818,11 @@ const InstallArtifactStep = struct {
pub const InstallFileStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
src_path: []const u8,
dest_path: []const u8,
- pub fn init(builder: &Builder, src_path: []const u8, dest_path: []const u8) InstallFileStep {
+ pub fn init(builder: *Builder, src_path: []const u8, dest_path: []const u8) InstallFileStep {
return InstallFileStep{
.builder = builder,
.step = Step.init(builder.fmt("install {}", src_path), builder.allocator, make),
@@ -1831,7 +1831,7 @@ pub const InstallFileStep = struct {
};
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(InstallFileStep, "step", step);
try self.builder.copyFile(self.src_path, self.dest_path);
}
@@ -1839,11 +1839,11 @@ pub const InstallFileStep = struct {
pub const WriteFileStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
file_path: []const u8,
data: []const u8,
- pub fn init(builder: &Builder, file_path: []const u8, data: []const u8) WriteFileStep {
+ pub fn init(builder: *Builder, file_path: []const u8, data: []const u8) WriteFileStep {
return WriteFileStep{
.builder = builder,
.step = Step.init(builder.fmt("writefile {}", file_path), builder.allocator, make),
@@ -1852,7 +1852,7 @@ pub const WriteFileStep = struct {
};
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(WriteFileStep, "step", step);
const full_path = self.builder.pathFromRoot(self.file_path);
const full_path_dir = os.path.dirname(full_path);
@@ -1869,10 +1869,10 @@ pub const WriteFileStep = struct {
pub const LogStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
data: []const u8,
- pub fn init(builder: &Builder, data: []const u8) LogStep {
+ pub fn init(builder: *Builder, data: []const u8) LogStep {
return LogStep{
.builder = builder,
.step = Step.init(builder.fmt("log {}", data), builder.allocator, make),
@@ -1880,7 +1880,7 @@ pub const LogStep = struct {
};
}
- fn make(step: &Step) error!void {
+ fn make(step: *Step) error!void {
const self = @fieldParentPtr(LogStep, "step", step);
warn("{}", self.data);
}
@@ -1888,10 +1888,10 @@ pub const LogStep = struct {
pub const RemoveDirStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
dir_path: []const u8,
- pub fn init(builder: &Builder, dir_path: []const u8) RemoveDirStep {
+ pub fn init(builder: *Builder, dir_path: []const u8) RemoveDirStep {
return RemoveDirStep{
.builder = builder,
.step = Step.init(builder.fmt("RemoveDir {}", dir_path), builder.allocator, make),
@@ -1899,7 +1899,7 @@ pub const RemoveDirStep = struct {
};
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(RemoveDirStep, "step", step);
const full_path = self.builder.pathFromRoot(self.dir_path);
@@ -1912,39 +1912,39 @@ pub const RemoveDirStep = struct {
pub const Step = struct {
name: []const u8,
- makeFn: fn (self: &Step) error!void,
- dependencies: ArrayList(&Step),
+ makeFn: fn (self: *Step) error!void,
+ dependencies: ArrayList(*Step),
loop_flag: bool,
done_flag: bool,
- pub fn init(name: []const u8, allocator: &Allocator, makeFn: fn (&Step) error!void) Step {
+ pub fn init(name: []const u8, allocator: *Allocator, makeFn: fn (*Step) error!void) Step {
return Step{
.name = name,
.makeFn = makeFn,
- .dependencies = ArrayList(&Step).init(allocator),
+ .dependencies = ArrayList(*Step).init(allocator),
.loop_flag = false,
.done_flag = false,
};
}
- pub fn initNoOp(name: []const u8, allocator: &Allocator) Step {
+ pub fn initNoOp(name: []const u8, allocator: *Allocator) Step {
return init(name, allocator, makeNoOp);
}
- pub fn make(self: &Step) !void {
+ pub fn make(self: *Step) !void {
if (self.done_flag) return;
try self.makeFn(self);
self.done_flag = true;
}
- pub fn dependOn(self: &Step, other: &Step) void {
+ pub fn dependOn(self: *Step, other: *Step) void {
self.dependencies.append(other) catch unreachable;
}
- fn makeNoOp(self: &Step) error!void {}
+ fn makeNoOp(self: *Step) error!void {}
};
-fn doAtomicSymLinks(allocator: &Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
+fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
const out_dir = os.path.dirname(output_path);
const out_basename = os.path.basename(output_path);
// sym link for libfoo.so.1 to libfoo.so.1.2.3
diff --git a/std/c/darwin.zig b/std/c/darwin.zig
index 6a33c994bf..69395e6b27 100644
--- a/std/c/darwin.zig
+++ b/std/c/darwin.zig
@@ -1,10 +1,10 @@
-extern "c" fn __error() &c_int;
-pub extern "c" fn _NSGetExecutablePath(buf: &u8, bufsize: &u32) c_int;
+extern "c" fn __error() *c_int;
+pub extern "c" fn _NSGetExecutablePath(buf: *u8, bufsize: *u32) c_int;
-pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: &u8, buf_len: usize, basep: &i64) usize;
+pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: *u8, buf_len: usize, basep: *i64) usize;
pub extern "c" fn mach_absolute_time() u64;
-pub extern "c" fn mach_timebase_info(tinfo: ?&mach_timebase_info_data) void;
+pub extern "c" fn mach_timebase_info(tinfo: ?*mach_timebase_info_data) void;
pub use @import("../os/darwin_errno.zig");
diff --git a/std/c/index.zig b/std/c/index.zig
index f9704f4738..114b79cdae 100644
--- a/std/c/index.zig
+++ b/std/c/index.zig
@@ -13,49 +13,49 @@ pub extern "c" fn abort() noreturn;
pub extern "c" fn exit(code: c_int) noreturn;
pub extern "c" fn isatty(fd: c_int) c_int;
pub extern "c" fn close(fd: c_int) c_int;
-pub extern "c" fn fstat(fd: c_int, buf: &Stat) c_int;
-pub extern "c" fn @"fstat$INODE64"(fd: c_int, buf: &Stat) c_int;
+pub extern "c" fn fstat(fd: c_int, buf: *Stat) c_int;
+pub extern "c" fn @"fstat$INODE64"(fd: c_int, buf: *Stat) c_int;
pub extern "c" fn lseek(fd: c_int, offset: isize, whence: c_int) isize;
-pub extern "c" fn open(path: &const u8, oflag: c_int, ...) c_int;
+pub extern "c" fn open(path: *const u8, oflag: c_int, ...) c_int;
pub extern "c" fn raise(sig: c_int) c_int;
-pub extern "c" fn read(fd: c_int, buf: &c_void, nbyte: usize) isize;
-pub extern "c" fn stat(noalias path: &const u8, noalias buf: &Stat) c_int;
-pub extern "c" fn write(fd: c_int, buf: &const c_void, nbyte: usize) isize;
-pub extern "c" fn mmap(addr: ?&c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?&c_void;
-pub extern "c" fn munmap(addr: &c_void, len: usize) c_int;
-pub extern "c" fn unlink(path: &const u8) c_int;
-pub extern "c" fn getcwd(buf: &u8, size: usize) ?&u8;
-pub extern "c" fn waitpid(pid: c_int, stat_loc: &c_int, options: c_int) c_int;
+pub extern "c" fn read(fd: c_int, buf: *c_void, nbyte: usize) isize;
+pub extern "c" fn stat(noalias path: *const u8, noalias buf: *Stat) c_int;
+pub extern "c" fn write(fd: c_int, buf: *const c_void, nbyte: usize) isize;
+pub extern "c" fn mmap(addr: ?*c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?*c_void;
+pub extern "c" fn munmap(addr: *c_void, len: usize) c_int;
+pub extern "c" fn unlink(path: *const u8) c_int;
+pub extern "c" fn getcwd(buf: *u8, size: usize) ?*u8;
+pub extern "c" fn waitpid(pid: c_int, stat_loc: *c_int, options: c_int) c_int;
pub extern "c" fn fork() c_int;
-pub extern "c" fn access(path: &const u8, mode: c_uint) c_int;
-pub extern "c" fn pipe(fds: &c_int) c_int;
-pub extern "c" fn mkdir(path: &const u8, mode: c_uint) c_int;
-pub extern "c" fn symlink(existing: &const u8, new: &const u8) c_int;
-pub extern "c" fn rename(old: &const u8, new: &const u8) c_int;
-pub extern "c" fn chdir(path: &const u8) c_int;
-pub extern "c" fn execve(path: &const u8, argv: &const ?&const u8, envp: &const ?&const u8) c_int;
+pub extern "c" fn access(path: *const u8, mode: c_uint) c_int;
+pub extern "c" fn pipe(fds: *c_int) c_int;
+pub extern "c" fn mkdir(path: *const u8, mode: c_uint) c_int;
+pub extern "c" fn symlink(existing: *const u8, new: *const u8) c_int;
+pub extern "c" fn rename(old: *const u8, new: *const u8) c_int;
+pub extern "c" fn chdir(path: *const u8) c_int;
+pub extern "c" fn execve(path: *const u8, argv: *const ?*const u8, envp: *const ?*const u8) c_int;
pub extern "c" fn dup(fd: c_int) c_int;
pub extern "c" fn dup2(old_fd: c_int, new_fd: c_int) c_int;
-pub extern "c" fn readlink(noalias path: &const u8, noalias buf: &u8, bufsize: usize) isize;
-pub extern "c" fn realpath(noalias file_name: &const u8, noalias resolved_name: &u8) ?&u8;
-pub extern "c" fn sigprocmask(how: c_int, noalias set: &const sigset_t, noalias oset: ?&sigset_t) c_int;
-pub extern "c" fn gettimeofday(tv: ?&timeval, tz: ?&timezone) c_int;
-pub extern "c" fn sigaction(sig: c_int, noalias act: &const Sigaction, noalias oact: ?&Sigaction) c_int;
-pub extern "c" fn nanosleep(rqtp: &const timespec, rmtp: ?×pec) c_int;
+pub extern "c" fn readlink(noalias path: *const u8, noalias buf: *u8, bufsize: usize) isize;
+pub extern "c" fn realpath(noalias file_name: *const u8, noalias resolved_name: *u8) ?*u8;
+pub extern "c" fn sigprocmask(how: c_int, noalias set: *const sigset_t, noalias oset: ?*sigset_t) c_int;
+pub extern "c" fn gettimeofday(tv: ?*timeval, tz: ?*timezone) c_int;
+pub extern "c" fn sigaction(sig: c_int, noalias act: *const Sigaction, noalias oact: ?*Sigaction) c_int;
+pub extern "c" fn nanosleep(rqtp: *const timespec, rmtp: ?*timespec) c_int;
pub extern "c" fn setreuid(ruid: c_uint, euid: c_uint) c_int;
pub extern "c" fn setregid(rgid: c_uint, egid: c_uint) c_int;
-pub extern "c" fn rmdir(path: &const u8) c_int;
+pub extern "c" fn rmdir(path: *const u8) c_int;
-pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?&c_void;
-pub extern "c" fn malloc(usize) ?&c_void;
-pub extern "c" fn realloc(&c_void, usize) ?&c_void;
-pub extern "c" fn free(&c_void) void;
-pub extern "c" fn posix_memalign(memptr: &&c_void, alignment: usize, size: usize) c_int;
+pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?*c_void;
+pub extern "c" fn malloc(usize) ?*c_void;
+pub extern "c" fn realloc(*c_void, usize) ?*c_void;
+pub extern "c" fn free(*c_void) void;
+pub extern "c" fn posix_memalign(memptr: **c_void, alignment: usize, size: usize) c_int;
-pub extern "pthread" fn pthread_create(noalias newthread: &pthread_t, noalias attr: ?&const pthread_attr_t, start_routine: extern fn (?&c_void) ?&c_void, noalias arg: ?&c_void) c_int;
-pub extern "pthread" fn pthread_attr_init(attr: &pthread_attr_t) c_int;
-pub extern "pthread" fn pthread_attr_setstack(attr: &pthread_attr_t, stackaddr: &c_void, stacksize: usize) c_int;
-pub extern "pthread" fn pthread_attr_destroy(attr: &pthread_attr_t) c_int;
-pub extern "pthread" fn pthread_join(thread: pthread_t, arg_return: ?&?&c_void) c_int;
+pub extern "pthread" fn pthread_create(noalias newthread: *pthread_t, noalias attr: ?*const pthread_attr_t, start_routine: extern fn (?*c_void) ?*c_void, noalias arg: ?*c_void) c_int;
+pub extern "pthread" fn pthread_attr_init(attr: *pthread_attr_t) c_int;
+pub extern "pthread" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: *c_void, stacksize: usize) c_int;
+pub extern "pthread" fn pthread_attr_destroy(attr: *pthread_attr_t) c_int;
+pub extern "pthread" fn pthread_join(thread: pthread_t, arg_return: ?*?*c_void) c_int;
-pub const pthread_t = &@OpaqueType();
+pub const pthread_t = *@OpaqueType();
diff --git a/std/c/linux.zig b/std/c/linux.zig
index 7810fec130..0ab043533e 100644
--- a/std/c/linux.zig
+++ b/std/c/linux.zig
@@ -1,7 +1,7 @@
pub use @import("../os/linux/errno.zig");
-pub extern "c" fn getrandom(buf_ptr: &u8, buf_len: usize, flags: c_uint) c_int;
-extern "c" fn __errno_location() &c_int;
+pub extern "c" fn getrandom(buf_ptr: *u8, buf_len: usize, flags: c_uint) c_int;
+extern "c" fn __errno_location() *c_int;
pub const _errno = __errno_location;
pub const pthread_attr_t = extern struct {
diff --git a/std/c/windows.zig b/std/c/windows.zig
index 6e8b17eda8..35ca217131 100644
--- a/std/c/windows.zig
+++ b/std/c/windows.zig
@@ -1 +1 @@
-pub extern "c" fn _errno() &c_int;
+pub extern "c" fn _errno() *c_int;
diff --git a/std/crypto/blake2.zig b/std/crypto/blake2.zig
index bf3193b5d9..f0a9766c00 100644
--- a/std/crypto/blake2.zig
+++ b/std/crypto/blake2.zig
@@ -75,7 +75,7 @@ fn Blake2s(comptime out_len: usize) type {
return s;
}
- pub fn reset(d: &Self) void {
+ pub fn reset(d: *Self) void {
mem.copy(u32, d.h[0..], iv[0..]);
// No key plus default parameters
@@ -90,7 +90,7 @@ fn Blake2s(comptime out_len: usize) type {
d.final(out);
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@@ -113,7 +113,7 @@ fn Blake2s(comptime out_len: usize) type {
d.buf_len += u8(b[off..].len);
}
- pub fn final(d: &Self, out: []u8) void {
+ pub fn final(d: *Self, out: []u8) void {
debug.assert(out.len >= out_len / 8);
mem.set(u8, d.buf[d.buf_len..], 0);
@@ -127,7 +127,7 @@ fn Blake2s(comptime out_len: usize) type {
}
}
- fn round(d: &Self, b: []const u8, last: bool) void {
+ fn round(d: *Self, b: []const u8, last: bool) void {
debug.assert(b.len == 64);
var m: [16]u32 = undefined;
@@ -310,7 +310,7 @@ fn Blake2b(comptime out_len: usize) type {
return s;
}
- pub fn reset(d: &Self) void {
+ pub fn reset(d: *Self) void {
mem.copy(u64, d.h[0..], iv[0..]);
// No key plus default parameters
@@ -325,7 +325,7 @@ fn Blake2b(comptime out_len: usize) type {
d.final(out);
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@@ -348,7 +348,7 @@ fn Blake2b(comptime out_len: usize) type {
d.buf_len += u8(b[off..].len);
}
- pub fn final(d: &Self, out: []u8) void {
+ pub fn final(d: *Self, out: []u8) void {
mem.set(u8, d.buf[d.buf_len..], 0);
d.t += d.buf_len;
d.round(d.buf[0..], true);
@@ -360,7 +360,7 @@ fn Blake2b(comptime out_len: usize) type {
}
}
- fn round(d: &Self, b: []const u8, last: bool) void {
+ fn round(d: *Self, b: []const u8, last: bool) void {
debug.assert(b.len == 128);
var m: [16]u64 = undefined;
diff --git a/std/crypto/md5.zig b/std/crypto/md5.zig
index 3d05597273..c0d1732d37 100644
--- a/std/crypto/md5.zig
+++ b/std/crypto/md5.zig
@@ -44,7 +44,7 @@ pub const Md5 = struct {
return d;
}
- pub fn reset(d: &Self) void {
+ pub fn reset(d: *Self) void {
d.s[0] = 0x67452301;
d.s[1] = 0xEFCDAB89;
d.s[2] = 0x98BADCFE;
@@ -59,7 +59,7 @@ pub const Md5 = struct {
d.final(out);
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@@ -84,7 +84,7 @@ pub const Md5 = struct {
d.total_len +%= b.len;
}
- pub fn final(d: &Self, out: []u8) void {
+ pub fn final(d: *Self, out: []u8) void {
debug.assert(out.len >= 16);
// The buffer here will never be completely full.
@@ -116,7 +116,7 @@ pub const Md5 = struct {
}
}
- fn round(d: &Self, b: []const u8) void {
+ fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 64);
var s: [16]u32 = undefined;
diff --git a/std/crypto/sha1.zig b/std/crypto/sha1.zig
index e9d8e3e132..9e46fc9239 100644
--- a/std/crypto/sha1.zig
+++ b/std/crypto/sha1.zig
@@ -43,7 +43,7 @@ pub const Sha1 = struct {
return d;
}
- pub fn reset(d: &Self) void {
+ pub fn reset(d: *Self) void {
d.s[0] = 0x67452301;
d.s[1] = 0xEFCDAB89;
d.s[2] = 0x98BADCFE;
@@ -59,7 +59,7 @@ pub const Sha1 = struct {
d.final(out);
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@@ -83,7 +83,7 @@ pub const Sha1 = struct {
d.total_len += b.len;
}
- pub fn final(d: &Self, out: []u8) void {
+ pub fn final(d: *Self, out: []u8) void {
debug.assert(out.len >= 20);
// The buffer here will never be completely full.
@@ -115,7 +115,7 @@ pub const Sha1 = struct {
}
}
- fn round(d: &Self, b: []const u8) void {
+ fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 64);
var s: [16]u32 = undefined;
diff --git a/std/crypto/sha2.zig b/std/crypto/sha2.zig
index aedc820f44..d1375d73e8 100644
--- a/std/crypto/sha2.zig
+++ b/std/crypto/sha2.zig
@@ -93,7 +93,7 @@ fn Sha2_32(comptime params: Sha2Params32) type {
return d;
}
- pub fn reset(d: &Self) void {
+ pub fn reset(d: *Self) void {
d.s[0] = params.iv0;
d.s[1] = params.iv1;
d.s[2] = params.iv2;
@@ -112,7 +112,7 @@ fn Sha2_32(comptime params: Sha2Params32) type {
d.final(out);
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@@ -136,7 +136,7 @@ fn Sha2_32(comptime params: Sha2Params32) type {
d.total_len += b.len;
}
- pub fn final(d: &Self, out: []u8) void {
+ pub fn final(d: *Self, out: []u8) void {
debug.assert(out.len >= params.out_len / 8);
// The buffer here will never be completely full.
@@ -171,7 +171,7 @@ fn Sha2_32(comptime params: Sha2Params32) type {
}
}
- fn round(d: &Self, b: []const u8) void {
+ fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 64);
var s: [64]u32 = undefined;
@@ -434,7 +434,7 @@ fn Sha2_64(comptime params: Sha2Params64) type {
return d;
}
- pub fn reset(d: &Self) void {
+ pub fn reset(d: *Self) void {
d.s[0] = params.iv0;
d.s[1] = params.iv1;
d.s[2] = params.iv2;
@@ -453,7 +453,7 @@ fn Sha2_64(comptime params: Sha2Params64) type {
d.final(out);
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@@ -477,7 +477,7 @@ fn Sha2_64(comptime params: Sha2Params64) type {
d.total_len += b.len;
}
- pub fn final(d: &Self, out: []u8) void {
+ pub fn final(d: *Self, out: []u8) void {
debug.assert(out.len >= params.out_len / 8);
// The buffer here will never be completely full.
@@ -512,7 +512,7 @@ fn Sha2_64(comptime params: Sha2Params64) type {
}
}
- fn round(d: &Self, b: []const u8) void {
+ fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 128);
var s: [80]u64 = undefined;
diff --git a/std/crypto/sha3.zig b/std/crypto/sha3.zig
index 75bec57a87..ae02d7a482 100644
--- a/std/crypto/sha3.zig
+++ b/std/crypto/sha3.zig
@@ -26,7 +26,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
return d;
}
- pub fn reset(d: &Self) void {
+ pub fn reset(d: *Self) void {
mem.set(u8, d.s[0..], 0);
d.offset = 0;
d.rate = 200 - (bits / 4);
@@ -38,7 +38,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
d.final(out);
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var ip: usize = 0;
var len = b.len;
var rate = d.rate - d.offset;
@@ -63,7 +63,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
d.offset = offset + len;
}
- pub fn final(d: &Self, out: []u8) void {
+ pub fn final(d: *Self, out: []u8) void {
// padding
d.s[d.offset] ^= delim;
d.s[d.rate - 1] ^= 0x80;
diff --git a/std/crypto/throughput_test.zig b/std/crypto/throughput_test.zig
index c5c4f9fe10..0ad6845d1a 100644
--- a/std/crypto/throughput_test.zig
+++ b/std/crypto/throughput_test.zig
@@ -15,8 +15,8 @@ const BytesToHash = 1024 * MiB;
pub fn main() !void {
var stdout_file = try std.io.getStdOut();
- var stdout_out_stream = std.io.FileOutStream.init(&stdout_file);
- const stdout = &stdout_out_stream.stream;
+ var stdout_out_stream = std.io.FileOutStream.init(*stdout_file);
+ const stdout = *stdout_out_stream.stream;
var block: [HashFunction.block_size]u8 = undefined;
std.mem.set(u8, block[0..], 0);
diff --git a/std/cstr.zig b/std/cstr.zig
index c9f3026064..dfbfb8047f 100644
--- a/std/cstr.zig
+++ b/std/cstr.zig
@@ -9,13 +9,13 @@ pub const line_sep = switch (builtin.os) {
else => "\n",
};
-pub fn len(ptr: &const u8) usize {
+pub fn len(ptr: *const u8) usize {
var count: usize = 0;
while (ptr[count] != 0) : (count += 1) {}
return count;
}
-pub fn cmp(a: &const u8, b: &const u8) i8 {
+pub fn cmp(a: *const u8, b: *const u8) i8 {
var index: usize = 0;
while (a[index] == b[index] and a[index] != 0) : (index += 1) {}
if (a[index] > b[index]) {
@@ -27,11 +27,11 @@ pub fn cmp(a: &const u8, b: &const u8) i8 {
}
}
-pub fn toSliceConst(str: &const u8) []const u8 {
+pub fn toSliceConst(str: *const u8) []const u8 {
return str[0..len(str)];
}
-pub fn toSlice(str: &u8) []u8 {
+pub fn toSlice(str: *u8) []u8 {
return str[0..len(str)];
}
@@ -47,7 +47,7 @@ fn testCStrFnsImpl() void {
/// Returns a mutable slice with 1 more byte of length which is a null byte.
/// Caller owns the returned memory.
-pub fn addNullByte(allocator: &mem.Allocator, slice: []const u8) ![]u8 {
+pub fn addNullByte(allocator: *mem.Allocator, slice: []const u8) ![]u8 {
const result = try allocator.alloc(u8, slice.len + 1);
mem.copy(u8, result, slice);
result[slice.len] = 0;
@@ -55,13 +55,13 @@ pub fn addNullByte(allocator: &mem.Allocator, slice: []const u8) ![]u8 {
}
pub const NullTerminated2DArray = struct {
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
byte_count: usize,
- ptr: ?&?&u8,
+ ptr: ?*?*u8,
/// Takes N lists of strings, concatenates the lists together, and adds a null terminator
/// Caller must deinit result
- pub fn fromSlices(allocator: &mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
+ pub fn fromSlices(allocator: *mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
var new_len: usize = 1; // 1 for the list null
var byte_count: usize = 0;
for (slices) |slice| {
@@ -75,11 +75,11 @@ pub const NullTerminated2DArray = struct {
const index_size = @sizeOf(usize) * new_len; // size of the ptrs
byte_count += index_size;
- const buf = try allocator.alignedAlloc(u8, @alignOf(?&u8), byte_count);
+ const buf = try allocator.alignedAlloc(u8, @alignOf(?*u8), byte_count);
errdefer allocator.free(buf);
var write_index = index_size;
- const index_buf = ([]?&u8)(buf);
+ const index_buf = ([]?*u8)(buf);
var i: usize = 0;
for (slices) |slice| {
@@ -97,12 +97,12 @@ pub const NullTerminated2DArray = struct {
return NullTerminated2DArray{
.allocator = allocator,
.byte_count = byte_count,
- .ptr = @ptrCast(?&?&u8, buf.ptr),
+ .ptr = @ptrCast(?*?*u8, buf.ptr),
};
}
- pub fn deinit(self: &NullTerminated2DArray) void {
- const buf = @ptrCast(&u8, self.ptr);
+ pub fn deinit(self: *NullTerminated2DArray) void {
+ const buf = @ptrCast(*u8, self.ptr);
self.allocator.free(buf[0..self.byte_count]);
}
};
diff --git a/std/debug/failing_allocator.zig b/std/debug/failing_allocator.zig
index 6b5edff5bf..e16dd21db4 100644
--- a/std/debug/failing_allocator.zig
+++ b/std/debug/failing_allocator.zig
@@ -7,12 +7,12 @@ pub const FailingAllocator = struct {
allocator: mem.Allocator,
index: usize,
fail_index: usize,
- internal_allocator: &mem.Allocator,
+ internal_allocator: *mem.Allocator,
allocated_bytes: usize,
freed_bytes: usize,
deallocations: usize,
- pub fn init(allocator: &mem.Allocator, fail_index: usize) FailingAllocator {
+ pub fn init(allocator: *mem.Allocator, fail_index: usize) FailingAllocator {
return FailingAllocator{
.internal_allocator = allocator,
.fail_index = fail_index,
@@ -28,7 +28,7 @@ pub const FailingAllocator = struct {
};
}
- fn alloc(allocator: &mem.Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *mem.Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
if (self.index == self.fail_index) {
return error.OutOfMemory;
@@ -39,7 +39,7 @@ pub const FailingAllocator = struct {
return result;
}
- fn realloc(allocator: &mem.Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ fn realloc(allocator: *mem.Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
if (new_size <= old_mem.len) {
self.freed_bytes += old_mem.len - new_size;
@@ -55,7 +55,7 @@ pub const FailingAllocator = struct {
return result;
}
- fn free(allocator: &mem.Allocator, bytes: []u8) void {
+ fn free(allocator: *mem.Allocator, bytes: []u8) void {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
self.freed_bytes += bytes.len;
self.deallocations += 1;
diff --git a/std/debug/index.zig b/std/debug/index.zig
index 92e565b391..00d9bef121 100644
--- a/std/debug/index.zig
+++ b/std/debug/index.zig
@@ -16,12 +16,12 @@ pub const FailingAllocator = @import("failing_allocator.zig").FailingAllocator;
/// TODO atomic/multithread support
var stderr_file: os.File = undefined;
var stderr_file_out_stream: io.FileOutStream = undefined;
-var stderr_stream: ?&io.OutStream(io.FileOutStream.Error) = null;
+var stderr_stream: ?*io.OutStream(io.FileOutStream.Error) = null;
pub fn warn(comptime fmt: []const u8, args: ...) void {
const stderr = getStderrStream() catch return;
stderr.print(fmt, args) catch return;
}
-fn getStderrStream() !&io.OutStream(io.FileOutStream.Error) {
+fn getStderrStream() !*io.OutStream(io.FileOutStream.Error) {
if (stderr_stream) |st| {
return st;
} else {
@@ -33,8 +33,8 @@ fn getStderrStream() !&io.OutStream(io.FileOutStream.Error) {
}
}
-var self_debug_info: ?&ElfStackTrace = null;
-pub fn getSelfDebugInfo() !&ElfStackTrace {
+var self_debug_info: ?*ElfStackTrace = null;
+pub fn getSelfDebugInfo() !*ElfStackTrace {
if (self_debug_info) |info| {
return info;
} else {
@@ -58,7 +58,7 @@ pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
}
/// Tries to print a stack trace to stderr, unbuffered, and ignores any error returned.
-pub fn dumpStackTrace(stack_trace: &const builtin.StackTrace) void {
+pub fn dumpStackTrace(stack_trace: *const builtin.StackTrace) void {
const stderr = getStderrStream() catch return;
const debug_info = getSelfDebugInfo() catch |err| {
stderr.print("Unable to dump stack trace: Unable to open debug info: {}\n", @errorName(err)) catch return;
@@ -104,7 +104,7 @@ pub fn panic(comptime format: []const u8, args: ...) noreturn {
var panicking: u8 = 0; // TODO make this a bool
-pub fn panicExtra(trace: ?&const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: ...) noreturn {
+pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: ...) noreturn {
@setCold(true);
if (@atomicRmw(u8, &panicking, builtin.AtomicRmwOp.Xchg, 1, builtin.AtomicOrder.SeqCst) == 1) {
@@ -130,7 +130,7 @@ const WHITE = "\x1b[37;1m";
const DIM = "\x1b[2m";
const RESET = "\x1b[0m";
-pub fn writeStackTrace(stack_trace: &const builtin.StackTrace, out_stream: var, allocator: &mem.Allocator, debug_info: &ElfStackTrace, tty_color: bool) !void {
+pub fn writeStackTrace(stack_trace: *const builtin.StackTrace, out_stream: var, allocator: *mem.Allocator, debug_info: *ElfStackTrace, tty_color: bool) !void {
var frame_index: usize = undefined;
var frames_left: usize = undefined;
if (stack_trace.index < stack_trace.instruction_addresses.len) {
@@ -150,7 +150,7 @@ pub fn writeStackTrace(stack_trace: &const builtin.StackTrace, out_stream: var,
}
}
-pub fn writeCurrentStackTrace(out_stream: var, allocator: &mem.Allocator, debug_info: &ElfStackTrace, tty_color: bool, start_addr: ?usize) !void {
+pub fn writeCurrentStackTrace(out_stream: var, allocator: *mem.Allocator, debug_info: *ElfStackTrace, tty_color: bool, start_addr: ?usize) !void {
const AddressState = union(enum) {
NotLookingForStartAddress,
LookingForStartAddress: usize,
@@ -166,8 +166,8 @@ pub fn writeCurrentStackTrace(out_stream: var, allocator: &mem.Allocator, debug_
}
var fp = @ptrToInt(@frameAddress());
- while (fp != 0) : (fp = @intToPtr(&const usize, fp).*) {
- const return_address = @intToPtr(&const usize, fp + @sizeOf(usize)).*;
+ while (fp != 0) : (fp = @intToPtr(*const usize, fp).*) {
+ const return_address = @intToPtr(*const usize, fp + @sizeOf(usize)).*;
switch (addr_state) {
AddressState.NotLookingForStartAddress => {},
@@ -183,7 +183,7 @@ pub fn writeCurrentStackTrace(out_stream: var, allocator: &mem.Allocator, debug_
}
}
-fn printSourceAtAddress(debug_info: &ElfStackTrace, out_stream: var, address: usize) !void {
+fn printSourceAtAddress(debug_info: *ElfStackTrace, out_stream: var, address: usize) !void {
const ptr_hex = "0x{x}";
switch (builtin.os) {
@@ -236,7 +236,7 @@ fn printSourceAtAddress(debug_info: &ElfStackTrace, out_stream: var, address: us
}
}
-pub fn openSelfDebugInfo(allocator: &mem.Allocator) !&ElfStackTrace {
+pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
switch (builtin.object_format) {
builtin.ObjectFormat.elf => {
const st = try allocator.create(ElfStackTrace);
@@ -289,7 +289,7 @@ pub fn openSelfDebugInfo(allocator: &mem.Allocator) !&ElfStackTrace {
}
}
-fn printLineFromFile(allocator: &mem.Allocator, out_stream: var, line_info: &const LineInfo) !void {
+fn printLineFromFile(allocator: *mem.Allocator, out_stream: var, line_info: *const LineInfo) !void {
var f = try os.File.openRead(allocator, line_info.file_name);
defer f.close();
// TODO fstat and make sure that the file has the correct size
@@ -325,32 +325,32 @@ pub const ElfStackTrace = switch (builtin.os) {
builtin.Os.macosx => struct {
symbol_table: macho.SymbolTable,
- pub fn close(self: &ElfStackTrace) void {
+ pub fn close(self: *ElfStackTrace) void {
self.symbol_table.deinit();
}
},
else => struct {
self_exe_file: os.File,
elf: elf.Elf,
- debug_info: &elf.SectionHeader,
- debug_abbrev: &elf.SectionHeader,
- debug_str: &elf.SectionHeader,
- debug_line: &elf.SectionHeader,
- debug_ranges: ?&elf.SectionHeader,
+ debug_info: *elf.SectionHeader,
+ debug_abbrev: *elf.SectionHeader,
+ debug_str: *elf.SectionHeader,
+ debug_line: *elf.SectionHeader,
+ debug_ranges: ?*elf.SectionHeader,
abbrev_table_list: ArrayList(AbbrevTableHeader),
compile_unit_list: ArrayList(CompileUnit),
- pub fn allocator(self: &const ElfStackTrace) &mem.Allocator {
+ pub fn allocator(self: *const ElfStackTrace) *mem.Allocator {
return self.abbrev_table_list.allocator;
}
- pub fn readString(self: &ElfStackTrace) ![]u8 {
+ pub fn readString(self: *ElfStackTrace) ![]u8 {
var in_file_stream = io.FileInStream.init(&self.self_exe_file);
const in_stream = &in_file_stream.stream;
return readStringRaw(self.allocator(), in_stream);
}
- pub fn close(self: &ElfStackTrace) void {
+ pub fn close(self: *ElfStackTrace) void {
self.self_exe_file.close();
self.elf.close();
}
@@ -365,7 +365,7 @@ const PcRange = struct {
const CompileUnit = struct {
version: u16,
is_64: bool,
- die: &Die,
+ die: *Die,
index: usize,
pc_range: ?PcRange,
};
@@ -408,7 +408,7 @@ const Constant = struct {
payload: []u8,
signed: bool,
- fn asUnsignedLe(self: &const Constant) !u64 {
+ fn asUnsignedLe(self: *const Constant) !u64 {
if (self.payload.len > @sizeOf(u64)) return error.InvalidDebugInfo;
if (self.signed) return error.InvalidDebugInfo;
return mem.readInt(self.payload, u64, builtin.Endian.Little);
@@ -425,14 +425,14 @@ const Die = struct {
value: FormValue,
};
- fn getAttr(self: &const Die, id: u64) ?&const FormValue {
+ fn getAttr(self: *const Die, id: u64) ?*const FormValue {
for (self.attrs.toSliceConst()) |*attr| {
if (attr.id == id) return &attr.value;
}
return null;
}
- fn getAttrAddr(self: &const Die, id: u64) !u64 {
+ fn getAttrAddr(self: *const Die, id: u64) !u64 {
const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Address => |value| value,
@@ -440,7 +440,7 @@ const Die = struct {
};
}
- fn getAttrSecOffset(self: &const Die, id: u64) !u64 {
+ fn getAttrSecOffset(self: *const Die, id: u64) !u64 {
const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Const => |value| value.asUnsignedLe(),
@@ -449,7 +449,7 @@ const Die = struct {
};
}
- fn getAttrUnsignedLe(self: &const Die, id: u64) !u64 {
+ fn getAttrUnsignedLe(self: *const Die, id: u64) !u64 {
const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Const => |value| value.asUnsignedLe(),
@@ -457,7 +457,7 @@ const Die = struct {
};
}
- fn getAttrString(self: &const Die, st: &ElfStackTrace, id: u64) ![]u8 {
+ fn getAttrString(self: *const Die, st: *ElfStackTrace, id: u64) ![]u8 {
const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.String => |value| value,
@@ -478,9 +478,9 @@ const LineInfo = struct {
line: usize,
column: usize,
file_name: []u8,
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
- fn deinit(self: &const LineInfo) void {
+ fn deinit(self: *const LineInfo) void {
self.allocator.free(self.file_name);
}
};
@@ -496,7 +496,7 @@ const LineNumberProgram = struct {
target_address: usize,
include_dirs: []const []const u8,
- file_entries: &ArrayList(FileEntry),
+ file_entries: *ArrayList(FileEntry),
prev_address: usize,
prev_file: usize,
@@ -506,7 +506,7 @@ const LineNumberProgram = struct {
prev_basic_block: bool,
prev_end_sequence: bool,
- pub fn init(is_stmt: bool, include_dirs: []const []const u8, file_entries: &ArrayList(FileEntry), target_address: usize) LineNumberProgram {
+ pub fn init(is_stmt: bool, include_dirs: []const []const u8, file_entries: *ArrayList(FileEntry), target_address: usize) LineNumberProgram {
return LineNumberProgram{
.address = 0,
.file = 1,
@@ -528,7 +528,7 @@ const LineNumberProgram = struct {
};
}
- pub fn checkLineMatch(self: &LineNumberProgram) !?LineInfo {
+ pub fn checkLineMatch(self: *LineNumberProgram) !?LineInfo {
if (self.target_address >= self.prev_address and self.target_address < self.address) {
const file_entry = if (self.prev_file == 0) {
return error.MissingDebugInfo;
@@ -562,7 +562,7 @@ const LineNumberProgram = struct {
}
};
-fn readStringRaw(allocator: &mem.Allocator, in_stream: var) ![]u8 {
+fn readStringRaw(allocator: *mem.Allocator, in_stream: var) ![]u8 {
var buf = ArrayList(u8).init(allocator);
while (true) {
const byte = try in_stream.readByte();
@@ -572,30 +572,30 @@ fn readStringRaw(allocator: &mem.Allocator, in_stream: var) ![]u8 {
return buf.toSlice();
}
-fn getString(st: &ElfStackTrace, offset: u64) ![]u8 {
+fn getString(st: *ElfStackTrace, offset: u64) ![]u8 {
const pos = st.debug_str.offset + offset;
try st.self_exe_file.seekTo(pos);
return st.readString();
}
-fn readAllocBytes(allocator: &mem.Allocator, in_stream: var, size: usize) ![]u8 {
+fn readAllocBytes(allocator: *mem.Allocator, in_stream: var, size: usize) ![]u8 {
const buf = try allocator.alloc(u8, size);
errdefer allocator.free(buf);
if ((try in_stream.read(buf)) < size) return error.EndOfFile;
return buf;
}
-fn parseFormValueBlockLen(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue {
+fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue {
const buf = try readAllocBytes(allocator, in_stream, size);
return FormValue{ .Block = buf };
}
-fn parseFormValueBlock(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue {
+fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue {
const block_len = try in_stream.readVarInt(builtin.Endian.Little, usize, size);
return parseFormValueBlockLen(allocator, in_stream, block_len);
}
-fn parseFormValueConstant(allocator: &mem.Allocator, in_stream: var, signed: bool, size: usize) !FormValue {
+fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: var, signed: bool, size: usize) !FormValue {
return FormValue{
.Const = Constant{
.signed = signed,
@@ -612,12 +612,12 @@ fn parseFormValueTargetAddrSize(in_stream: var) !u64 {
return if (@sizeOf(usize) == 4) u64(try in_stream.readIntLe(u32)) else if (@sizeOf(usize) == 8) try in_stream.readIntLe(u64) else unreachable;
}
-fn parseFormValueRefLen(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue {
+fn parseFormValueRefLen(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue {
const buf = try readAllocBytes(allocator, in_stream, size);
return FormValue{ .Ref = buf };
}
-fn parseFormValueRef(allocator: &mem.Allocator, in_stream: var, comptime T: type) !FormValue {
+fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, comptime T: type) !FormValue {
const block_len = try in_stream.readIntLe(T);
return parseFormValueRefLen(allocator, in_stream, block_len);
}
@@ -632,7 +632,7 @@ const ParseFormValueError = error{
OutOfMemory,
};
-fn parseFormValue(allocator: &mem.Allocator, in_stream: var, form_id: u64, is_64: bool) ParseFormValueError!FormValue {
+fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, is_64: bool) ParseFormValueError!FormValue {
return switch (form_id) {
DW.FORM_addr => FormValue{ .Address = try parseFormValueTargetAddrSize(in_stream) },
DW.FORM_block1 => parseFormValueBlock(allocator, in_stream, 1),
@@ -682,7 +682,7 @@ fn parseFormValue(allocator: &mem.Allocator, in_stream: var, form_id: u64, is_64
};
}
-fn parseAbbrevTable(st: &ElfStackTrace) !AbbrevTable {
+fn parseAbbrevTable(st: *ElfStackTrace) !AbbrevTable {
const in_file = &st.self_exe_file;
var in_file_stream = io.FileInStream.init(in_file);
const in_stream = &in_file_stream.stream;
@@ -712,7 +712,7 @@ fn parseAbbrevTable(st: &ElfStackTrace) !AbbrevTable {
/// Gets an already existing AbbrevTable given the abbrev_offset, or if not found,
/// seeks in the stream and parses it.
-fn getAbbrevTable(st: &ElfStackTrace, abbrev_offset: u64) !&const AbbrevTable {
+fn getAbbrevTable(st: *ElfStackTrace, abbrev_offset: u64) !*const AbbrevTable {
for (st.abbrev_table_list.toSlice()) |*header| {
if (header.offset == abbrev_offset) {
return &header.table;
@@ -726,14 +726,14 @@ fn getAbbrevTable(st: &ElfStackTrace, abbrev_offset: u64) !&const AbbrevTable {
return &st.abbrev_table_list.items[st.abbrev_table_list.len - 1].table;
}
-fn getAbbrevTableEntry(abbrev_table: &const AbbrevTable, abbrev_code: u64) ?&const AbbrevTableEntry {
+fn getAbbrevTableEntry(abbrev_table: *const AbbrevTable, abbrev_code: u64) ?*const AbbrevTableEntry {
for (abbrev_table.toSliceConst()) |*table_entry| {
if (table_entry.abbrev_code == abbrev_code) return table_entry;
}
return null;
}
-fn parseDie(st: &ElfStackTrace, abbrev_table: &const AbbrevTable, is_64: bool) !Die {
+fn parseDie(st: *ElfStackTrace, abbrev_table: *const AbbrevTable, is_64: bool) !Die {
const in_file = &st.self_exe_file;
var in_file_stream = io.FileInStream.init(in_file);
const in_stream = &in_file_stream.stream;
@@ -755,7 +755,7 @@ fn parseDie(st: &ElfStackTrace, abbrev_table: &const AbbrevTable, is_64: bool) !
return result;
}
-fn getLineNumberInfo(st: &ElfStackTrace, compile_unit: &const CompileUnit, target_address: usize) !LineInfo {
+fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, target_address: usize) !LineInfo {
const compile_unit_cwd = try compile_unit.die.getAttrString(st, DW.AT_comp_dir);
const in_file = &st.self_exe_file;
@@ -934,7 +934,7 @@ fn getLineNumberInfo(st: &ElfStackTrace, compile_unit: &const CompileUnit, targe
return error.MissingDebugInfo;
}
-fn scanAllCompileUnits(st: &ElfStackTrace) !void {
+fn scanAllCompileUnits(st: *ElfStackTrace) !void {
const debug_info_end = st.debug_info.offset + st.debug_info.size;
var this_unit_offset = st.debug_info.offset;
var cu_index: usize = 0;
@@ -1005,7 +1005,7 @@ fn scanAllCompileUnits(st: &ElfStackTrace) !void {
}
}
-fn findCompileUnit(st: &ElfStackTrace, target_address: u64) !&const CompileUnit {
+fn findCompileUnit(st: *ElfStackTrace, target_address: u64) !*const CompileUnit {
var in_file_stream = io.FileInStream.init(&st.self_exe_file);
const in_stream = &in_file_stream.stream;
for (st.compile_unit_list.toSlice()) |*compile_unit| {
@@ -1039,7 +1039,7 @@ fn findCompileUnit(st: &ElfStackTrace, target_address: u64) !&const CompileUnit
return error.MissingDebugInfo;
}
-fn readInitialLength(comptime E: type, in_stream: &io.InStream(E), is_64: &bool) !u64 {
+fn readInitialLength(comptime E: type, in_stream: *io.InStream(E), is_64: *bool) !u64 {
const first_32_bits = try in_stream.readIntLe(u32);
is_64.* = (first_32_bits == 0xffffffff);
if (is_64.*) {
@@ -1096,10 +1096,10 @@ var global_fixed_allocator = std.heap.FixedBufferAllocator.init(global_allocator
var global_allocator_mem: [100 * 1024]u8 = undefined;
// TODO make thread safe
-var debug_info_allocator: ?&mem.Allocator = null;
+var debug_info_allocator: ?*mem.Allocator = null;
var debug_info_direct_allocator: std.heap.DirectAllocator = undefined;
var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined;
-fn getDebugInfoAllocator() &mem.Allocator {
+fn getDebugInfoAllocator() *mem.Allocator {
if (debug_info_allocator) |a| return a;
debug_info_direct_allocator = std.heap.DirectAllocator.init();
diff --git a/std/elf.zig b/std/elf.zig
index 29b9473f98..50e97ab271 100644
--- a/std/elf.zig
+++ b/std/elf.zig
@@ -338,7 +338,7 @@ pub const SectionHeader = struct {
};
pub const Elf = struct {
- in_file: &os.File,
+ in_file: *os.File,
auto_close_stream: bool,
is_64: bool,
endian: builtin.Endian,
@@ -348,20 +348,20 @@ pub const Elf = struct {
program_header_offset: u64,
section_header_offset: u64,
string_section_index: u64,
- string_section: &SectionHeader,
+ string_section: *SectionHeader,
section_headers: []SectionHeader,
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
prealloc_file: os.File,
/// Call close when done.
- pub fn openPath(elf: &Elf, allocator: &mem.Allocator, path: []const u8) !void {
+ pub fn openPath(elf: *Elf, allocator: *mem.Allocator, path: []const u8) !void {
try elf.prealloc_file.open(path);
- try elf.openFile(allocator, &elf.prealloc_file);
+ try elf.openFile(allocator, *elf.prealloc_file);
elf.auto_close_stream = true;
}
/// Call close when done.
- pub fn openFile(elf: &Elf, allocator: &mem.Allocator, file: &os.File) !void {
+ pub fn openFile(elf: *Elf, allocator: *mem.Allocator, file: *os.File) !void {
elf.allocator = allocator;
elf.in_file = file;
elf.auto_close_stream = false;
@@ -503,13 +503,13 @@ pub const Elf = struct {
}
}
- pub fn close(elf: &Elf) void {
+ pub fn close(elf: *Elf) void {
elf.allocator.free(elf.section_headers);
if (elf.auto_close_stream) elf.in_file.close();
}
- pub fn findSection(elf: &Elf, name: []const u8) !?&SectionHeader {
+ pub fn findSection(elf: *Elf, name: []const u8) !?*SectionHeader {
var file_stream = io.FileInStream.init(elf.in_file);
const in = &file_stream.stream;
@@ -533,7 +533,7 @@ pub const Elf = struct {
return null;
}
- pub fn seekToSection(elf: &Elf, elf_section: &SectionHeader) !void {
+ pub fn seekToSection(elf: *Elf, elf_section: *SectionHeader) !void {
try elf.in_file.seekTo(elf_section.offset);
}
};
diff --git a/std/event.zig b/std/event.zig
index 4604eb8d02..89ab816bb6 100644
--- a/std/event.zig
+++ b/std/event.zig
@@ -6,9 +6,9 @@ const mem = std.mem;
const posix = std.os.posix;
pub const TcpServer = struct {
- handleRequestFn: async<&mem.Allocator> fn (&TcpServer, &const std.net.Address, &const std.os.File) void,
+ handleRequestFn: async<*mem.Allocator> fn (*TcpServer, *const std.net.Address, *const std.os.File) void,
- loop: &Loop,
+ loop: *Loop,
sockfd: i32,
accept_coro: ?promise,
listen_address: std.net.Address,
@@ -17,7 +17,7 @@ pub const TcpServer = struct {
const PromiseNode = std.LinkedList(promise).Node;
- pub fn init(loop: &Loop) !TcpServer {
+ pub fn init(loop: *Loop) !TcpServer {
const sockfd = try std.os.posixSocket(posix.AF_INET, posix.SOCK_STREAM | posix.SOCK_CLOEXEC | posix.SOCK_NONBLOCK, posix.PROTO_tcp);
errdefer std.os.close(sockfd);
@@ -32,7 +32,7 @@ pub const TcpServer = struct {
};
}
- pub fn listen(self: &TcpServer, address: &const std.net.Address, handleRequestFn: async<&mem.Allocator> fn (&TcpServer, &const std.net.Address, &const std.os.File) void) !void {
+ pub fn listen(self: *TcpServer, address: *const std.net.Address, handleRequestFn: async<*mem.Allocator> fn (*TcpServer, *const std.net.Address, *const std.os.File) void) !void {
self.handleRequestFn = handleRequestFn;
try std.os.posixBind(self.sockfd, &address.os_addr);
@@ -46,13 +46,13 @@ pub const TcpServer = struct {
errdefer self.loop.removeFd(self.sockfd);
}
- pub fn deinit(self: &TcpServer) void {
+ pub fn deinit(self: *TcpServer) void {
self.loop.removeFd(self.sockfd);
if (self.accept_coro) |accept_coro| cancel accept_coro;
std.os.close(self.sockfd);
}
- pub async fn handler(self: &TcpServer) void {
+ pub async fn handler(self: *TcpServer) void {
while (true) {
var accepted_addr: std.net.Address = undefined;
if (std.os.posixAccept(self.sockfd, &accepted_addr.os_addr, posix.SOCK_NONBLOCK | posix.SOCK_CLOEXEC)) |accepted_fd| {
@@ -92,11 +92,11 @@ pub const TcpServer = struct {
};
pub const Loop = struct {
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
epollfd: i32,
keep_running: bool,
- fn init(allocator: &mem.Allocator) !Loop {
+ fn init(allocator: *mem.Allocator) !Loop {
const epollfd = try std.os.linuxEpollCreate(std.os.linux.EPOLL_CLOEXEC);
return Loop{
.keep_running = true,
@@ -105,7 +105,7 @@ pub const Loop = struct {
};
}
- pub fn addFd(self: &Loop, fd: i32, prom: promise) !void {
+ pub fn addFd(self: *Loop, fd: i32, prom: promise) !void {
var ev = std.os.linux.epoll_event{
.events = std.os.linux.EPOLLIN | std.os.linux.EPOLLOUT | std.os.linux.EPOLLET,
.data = std.os.linux.epoll_data{ .ptr = @ptrToInt(prom) },
@@ -113,23 +113,23 @@ pub const Loop = struct {
try std.os.linuxEpollCtl(self.epollfd, std.os.linux.EPOLL_CTL_ADD, fd, &ev);
}
- pub fn removeFd(self: &Loop, fd: i32) void {
+ pub fn removeFd(self: *Loop, fd: i32) void {
std.os.linuxEpollCtl(self.epollfd, std.os.linux.EPOLL_CTL_DEL, fd, undefined) catch {};
}
- async fn waitFd(self: &Loop, fd: i32) !void {
+ async fn waitFd(self: *Loop, fd: i32) !void {
defer self.removeFd(fd);
suspend |p| {
try self.addFd(fd, p);
}
}
- pub fn stop(self: &Loop) void {
+ pub fn stop(self: *Loop) void {
// TODO make atomic
self.keep_running = false;
// TODO activate an fd in the epoll set
}
- pub fn run(self: &Loop) void {
+ pub fn run(self: *Loop) void {
while (self.keep_running) {
var events: [16]std.os.linux.epoll_event = undefined;
const count = std.os.linuxEpollWait(self.epollfd, events[0..], -1);
@@ -141,7 +141,7 @@ pub const Loop = struct {
}
};
-pub async fn connect(loop: &Loop, _address: &const std.net.Address) !std.os.File {
+pub async fn connect(loop: *Loop, _address: *const std.net.Address) !std.os.File {
var address = _address.*; // TODO https://github.com/ziglang/zig/issues/733
const sockfd = try std.os.posixSocket(posix.AF_INET, posix.SOCK_STREAM | posix.SOCK_CLOEXEC | posix.SOCK_NONBLOCK, posix.PROTO_tcp);
@@ -163,7 +163,7 @@ test "listen on a port, send bytes, receive bytes" {
tcp_server: TcpServer,
const Self = this;
- async<&mem.Allocator> fn handler(tcp_server: &TcpServer, _addr: &const std.net.Address, _socket: &const std.os.File) void {
+ async<*mem.Allocator> fn handler(tcp_server: *TcpServer, _addr: *const std.net.Address, _socket: *const std.os.File) void {
const self = @fieldParentPtr(Self, "tcp_server", tcp_server);
var socket = _socket.*; // TODO https://github.com/ziglang/zig/issues/733
defer socket.close();
@@ -177,7 +177,7 @@ test "listen on a port, send bytes, receive bytes" {
cancel p;
}
}
- async fn errorableHandler(self: &Self, _addr: &const std.net.Address, _socket: &const std.os.File) !void {
+ async fn errorableHandler(self: *Self, _addr: *const std.net.Address, _socket: *const std.os.File) !void {
const addr = _addr.*; // TODO https://github.com/ziglang/zig/issues/733
var socket = _socket.*; // TODO https://github.com/ziglang/zig/issues/733
@@ -199,7 +199,7 @@ test "listen on a port, send bytes, receive bytes" {
defer cancel p;
loop.run();
}
-async fn doAsyncTest(loop: &Loop, address: &const std.net.Address) void {
+async fn doAsyncTest(loop: *Loop, address: *const std.net.Address) void {
errdefer @panic("test failure");
var socket_file = try await try async event.connect(loop, address);
diff --git a/std/fmt/errol/index.zig b/std/fmt/errol/index.zig
index 65e8d448a8..933958ac18 100644
--- a/std/fmt/errol/index.zig
+++ b/std/fmt/errol/index.zig
@@ -21,7 +21,7 @@ pub const RoundMode = enum {
/// Round a FloatDecimal as returned by errol3 to the specified fractional precision.
/// All digits after the specified precision should be considered invalid.
-pub fn roundToPrecision(float_decimal: &FloatDecimal, precision: usize, mode: RoundMode) void {
+pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: RoundMode) void {
// The round digit refers to the index which we should look at to determine
// whether we need to round to match the specified precision.
var round_digit: usize = 0;
@@ -59,7 +59,7 @@ pub fn roundToPrecision(float_decimal: &FloatDecimal, precision: usize, mode: Ro
float_decimal.exp += 1;
// Re-size the buffer to use the reserved leading byte.
- const one_before = @intToPtr(&u8, @ptrToInt(&float_decimal.digits[0]) - 1);
+ const one_before = @intToPtr(*u8, @ptrToInt(&float_decimal.digits[0]) - 1);
float_decimal.digits = one_before[0 .. float_decimal.digits.len + 1];
float_decimal.digits[0] = '1';
return;
@@ -217,7 +217,7 @@ fn tableLowerBound(k: u64) usize {
/// @in: The HP number.
/// @val: The double.
/// &returns: The HP number.
-fn hpProd(in: &const HP, val: f64) HP {
+fn hpProd(in: *const HP, val: f64) HP {
var hi: f64 = undefined;
var lo: f64 = undefined;
split(in.val, &hi, &lo);
@@ -239,7 +239,7 @@ fn hpProd(in: &const HP, val: f64) HP {
/// @val: The double.
/// @hi: The high bits.
/// @lo: The low bits.
-fn split(val: f64, hi: &f64, lo: &f64) void {
+fn split(val: f64, hi: *f64, lo: *f64) void {
hi.* = gethi(val);
lo.* = val - hi.*;
}
@@ -252,7 +252,7 @@ fn gethi(in: f64) f64 {
/// Normalize the number by factoring in the error.
/// @hp: The float pair.
-fn hpNormalize(hp: &HP) void {
+fn hpNormalize(hp: *HP) void {
// Required to avoid segfaults causing buffer overrun during errol3 digit output termination.
@setFloatMode(this, @import("builtin").FloatMode.Strict);
@@ -264,7 +264,7 @@ fn hpNormalize(hp: &HP) void {
/// Divide the high-precision number by ten.
/// @hp: The high-precision number
-fn hpDiv10(hp: &HP) void {
+fn hpDiv10(hp: *HP) void {
var val = hp.val;
hp.val /= 10.0;
@@ -280,7 +280,7 @@ fn hpDiv10(hp: &HP) void {
/// Multiply the high-precision number by ten.
/// @hp: The high-precision number
-fn hpMul10(hp: &HP) void {
+fn hpMul10(hp: *HP) void {
const val = hp.val;
hp.val *= 10.0;
diff --git a/std/fmt/index.zig b/std/fmt/index.zig
index 0ffbc59895..b522d9d37d 100644
--- a/std/fmt/index.zig
+++ b/std/fmt/index.zig
@@ -679,7 +679,7 @@ const FormatIntBuf = struct {
out_buf: []u8,
index: usize,
};
-fn formatIntCallback(context: &FormatIntBuf, bytes: []const u8) (error{}!void) {
+fn formatIntCallback(context: *FormatIntBuf, bytes: []const u8) (error{}!void) {
mem.copy(u8, context.out_buf[context.index..], bytes);
context.index += bytes.len;
}
@@ -751,7 +751,7 @@ const BufPrintContext = struct {
remaining: []u8,
};
-fn bufPrintWrite(context: &BufPrintContext, bytes: []const u8) !void {
+fn bufPrintWrite(context: *BufPrintContext, bytes: []const u8) !void {
if (context.remaining.len < bytes.len) return error.BufferTooSmall;
mem.copy(u8, context.remaining, bytes);
context.remaining = context.remaining[bytes.len..];
@@ -763,14 +763,14 @@ pub fn bufPrint(buf: []u8, comptime fmt: []const u8, args: ...) ![]u8 {
return buf[0 .. buf.len - context.remaining.len];
}
-pub fn allocPrint(allocator: &mem.Allocator, comptime fmt: []const u8, args: ...) ![]u8 {
+pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: ...) ![]u8 {
var size: usize = 0;
format(&size, error{}, countSize, fmt, args) catch |err| switch (err) {};
const buf = try allocator.alloc(u8, size);
return bufPrint(buf, fmt, args);
}
-fn countSize(size: &usize, bytes: []const u8) (error{}!void) {
+fn countSize(size: *usize, bytes: []const u8) (error{}!void) {
size.* += bytes.len;
}
diff --git a/std/hash/adler.zig b/std/hash/adler.zig
index 12dab1457c..9c5966f89b 100644
--- a/std/hash/adler.zig
+++ b/std/hash/adler.zig
@@ -18,7 +18,7 @@ pub const Adler32 = struct {
// This fast variant is taken from zlib. It reduces the required modulos and unrolls longer
// buffer inputs and should be much quicker.
- pub fn update(self: &Adler32, input: []const u8) void {
+ pub fn update(self: *Adler32, input: []const u8) void {
var s1 = self.adler & 0xffff;
var s2 = (self.adler >> 16) & 0xffff;
@@ -77,7 +77,7 @@ pub const Adler32 = struct {
self.adler = s1 | (s2 << 16);
}
- pub fn final(self: &Adler32) u32 {
+ pub fn final(self: *Adler32) u32 {
return self.adler;
}
diff --git a/std/hash/crc.zig b/std/hash/crc.zig
index 45bcb70e8b..ec831cdc2e 100644
--- a/std/hash/crc.zig
+++ b/std/hash/crc.zig
@@ -58,7 +58,7 @@ pub fn Crc32WithPoly(comptime poly: u32) type {
return Self{ .crc = 0xffffffff };
}
- pub fn update(self: &Self, input: []const u8) void {
+ pub fn update(self: *Self, input: []const u8) void {
var i: usize = 0;
while (i + 8 <= input.len) : (i += 8) {
const p = input[i .. i + 8];
@@ -86,7 +86,7 @@ pub fn Crc32WithPoly(comptime poly: u32) type {
}
}
- pub fn final(self: &Self) u32 {
+ pub fn final(self: *Self) u32 {
return ~self.crc;
}
@@ -143,14 +143,14 @@ pub fn Crc32SmallWithPoly(comptime poly: u32) type {
return Self{ .crc = 0xffffffff };
}
- pub fn update(self: &Self, input: []const u8) void {
+ pub fn update(self: *Self, input: []const u8) void {
for (input) |b| {
self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 0))] ^ (self.crc >> 4);
self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 4))] ^ (self.crc >> 4);
}
}
- pub fn final(self: &Self) u32 {
+ pub fn final(self: *Self) u32 {
return ~self.crc;
}
diff --git a/std/hash/fnv.zig b/std/hash/fnv.zig
index c2439e0ebc..447c996772 100644
--- a/std/hash/fnv.zig
+++ b/std/hash/fnv.zig
@@ -21,14 +21,14 @@ fn Fnv1a(comptime T: type, comptime prime: T, comptime offset: T) type {
return Self{ .value = offset };
}
- pub fn update(self: &Self, input: []const u8) void {
+ pub fn update(self: *Self, input: []const u8) void {
for (input) |b| {
self.value ^= b;
self.value *%= prime;
}
}
- pub fn final(self: &Self) T {
+ pub fn final(self: *Self) T {
return self.value;
}
diff --git a/std/hash/siphash.zig b/std/hash/siphash.zig
index 750e23d4c8..8a90308a46 100644
--- a/std/hash/siphash.zig
+++ b/std/hash/siphash.zig
@@ -63,7 +63,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
return d;
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial from previous.
@@ -85,7 +85,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
d.msg_len +%= @truncate(u8, b.len);
}
- pub fn final(d: &Self) T {
+ pub fn final(d: *Self) T {
// Padding
mem.set(u8, d.buf[d.buf_len..], 0);
d.buf[7] = d.msg_len;
@@ -118,7 +118,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
return (u128(b2) << 64) | b1;
}
- fn round(d: &Self, b: []const u8) void {
+ fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 8);
const m = mem.readInt(b[0..], u64, Endian.Little);
@@ -132,7 +132,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
d.v0 ^= m;
}
- fn sipRound(d: &Self) void {
+ fn sipRound(d: *Self) void {
d.v0 +%= d.v1;
d.v1 = math.rotl(u64, d.v1, u64(13));
d.v1 ^= d.v0;
diff --git a/std/hash_map.zig b/std/hash_map.zig
index f51b9c66ba..a323cdc197 100644
--- a/std/hash_map.zig
+++ b/std/hash_map.zig
@@ -14,7 +14,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
entries: []Entry,
size: usize,
max_distance_from_start_index: usize,
- allocator: &Allocator,
+ allocator: *Allocator,
// this is used to detect bugs where a hashtable is edited while an iterator is running.
modification_count: debug_u32,
@@ -28,7 +28,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
};
pub const Iterator = struct {
- hm: &const Self,
+ hm: *const Self,
// how many items have we returned
count: usize,
// iterator through the entry array
@@ -36,7 +36,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
// used to detect concurrent modification
initial_modification_count: debug_u32,
- pub fn next(it: &Iterator) ?&Entry {
+ pub fn next(it: *Iterator) ?*Entry {
if (want_modification_safety) {
assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification
}
@@ -53,7 +53,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
}
// Reset the iterator to the initial index
- pub fn reset(it: &Iterator) void {
+ pub fn reset(it: *Iterator) void {
it.count = 0;
it.index = 0;
// Resetting the modification count too
@@ -61,7 +61,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
}
};
- pub fn init(allocator: &Allocator) Self {
+ pub fn init(allocator: *Allocator) Self {
return Self{
.entries = []Entry{},
.allocator = allocator,
@@ -71,11 +71,11 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
};
}
- pub fn deinit(hm: &const Self) void {
+ pub fn deinit(hm: *const Self) void {
hm.allocator.free(hm.entries);
}
- pub fn clear(hm: &Self) void {
+ pub fn clear(hm: *Self) void {
for (hm.entries) |*entry| {
entry.used = false;
}
@@ -84,12 +84,12 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
hm.incrementModificationCount();
}
- pub fn count(hm: &const Self) usize {
+ pub fn count(hm: *const Self) usize {
return hm.size;
}
/// Returns the value that was already there.
- pub fn put(hm: &Self, key: K, value: &const V) !?V {
+ pub fn put(hm: *Self, key: K, value: *const V) !?V {
if (hm.entries.len == 0) {
try hm.initCapacity(16);
}
@@ -111,18 +111,18 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
return hm.internalPut(key, value);
}
- pub fn get(hm: &const Self, key: K) ?&Entry {
+ pub fn get(hm: *const Self, key: K) ?*Entry {
if (hm.entries.len == 0) {
return null;
}
return hm.internalGet(key);
}
- pub fn contains(hm: &const Self, key: K) bool {
+ pub fn contains(hm: *const Self, key: K) bool {
return hm.get(key) != null;
}
- pub fn remove(hm: &Self, key: K) ?&Entry {
+ pub fn remove(hm: *Self, key: K) ?*Entry {
if (hm.entries.len == 0) return null;
hm.incrementModificationCount();
const start_index = hm.keyToIndex(key);
@@ -154,7 +154,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
return null;
}
- pub fn iterator(hm: &const Self) Iterator {
+ pub fn iterator(hm: *const Self) Iterator {
return Iterator{
.hm = hm,
.count = 0,
@@ -163,7 +163,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
};
}
- fn initCapacity(hm: &Self, capacity: usize) !void {
+ fn initCapacity(hm: *Self, capacity: usize) !void {
hm.entries = try hm.allocator.alloc(Entry, capacity);
hm.size = 0;
hm.max_distance_from_start_index = 0;
@@ -172,14 +172,14 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
}
}
- fn incrementModificationCount(hm: &Self) void {
+ fn incrementModificationCount(hm: *Self) void {
if (want_modification_safety) {
hm.modification_count +%= 1;
}
}
/// Returns the value that was already there.
- fn internalPut(hm: &Self, orig_key: K, orig_value: &const V) ?V {
+ fn internalPut(hm: *Self, orig_key: K, orig_value: *const V) ?V {
var key = orig_key;
var value = orig_value.*;
const start_index = hm.keyToIndex(key);
@@ -231,7 +231,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
unreachable; // put into a full map
}
- fn internalGet(hm: &const Self, key: K) ?&Entry {
+ fn internalGet(hm: *const Self, key: K) ?*Entry {
const start_index = hm.keyToIndex(key);
{
var roll_over: usize = 0;
@@ -246,7 +246,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
return null;
}
- fn keyToIndex(hm: &const Self, key: K) usize {
+ fn keyToIndex(hm: *const Self, key: K) usize {
return usize(hash(key)) % hm.entries.len;
}
};
diff --git a/std/heap.zig b/std/heap.zig
index 8d4938a7c3..81d6f25282 100644
--- a/std/heap.zig
+++ b/std/heap.zig
@@ -16,15 +16,15 @@ var c_allocator_state = Allocator{
.freeFn = cFree,
};
-fn cAlloc(self: &Allocator, n: usize, alignment: u29) ![]u8 {
+fn cAlloc(self: *Allocator, n: usize, alignment: u29) ![]u8 {
assert(alignment <= @alignOf(c_longdouble));
- return if (c.malloc(n)) |buf| @ptrCast(&u8, buf)[0..n] else error.OutOfMemory;
+ return if (c.malloc(n)) |buf| @ptrCast(*u8, buf)[0..n] else error.OutOfMemory;
}
-fn cRealloc(self: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
- const old_ptr = @ptrCast(&c_void, old_mem.ptr);
+fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ const old_ptr = @ptrCast(*c_void, old_mem.ptr);
if (c.realloc(old_ptr, new_size)) |buf| {
- return @ptrCast(&u8, buf)[0..new_size];
+ return @ptrCast(*u8, buf)[0..new_size];
} else if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@@ -32,8 +32,8 @@ fn cRealloc(self: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![
}
}
-fn cFree(self: &Allocator, old_mem: []u8) void {
- const old_ptr = @ptrCast(&c_void, old_mem.ptr);
+fn cFree(self: *Allocator, old_mem: []u8) void {
+ const old_ptr = @ptrCast(*c_void, old_mem.ptr);
c.free(old_ptr);
}
@@ -55,7 +55,7 @@ pub const DirectAllocator = struct {
};
}
- pub fn deinit(self: &DirectAllocator) void {
+ pub fn deinit(self: *DirectAllocator) void {
switch (builtin.os) {
Os.windows => if (self.heap_handle) |heap_handle| {
_ = os.windows.HeapDestroy(heap_handle);
@@ -64,7 +64,7 @@ pub const DirectAllocator = struct {
}
}
- fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
@@ -74,7 +74,7 @@ pub const DirectAllocator = struct {
const addr = p.mmap(null, alloc_size, p.PROT_READ | p.PROT_WRITE, p.MAP_PRIVATE | p.MAP_ANONYMOUS, -1, 0);
if (addr == p.MAP_FAILED) return error.OutOfMemory;
- if (alloc_size == n) return @intToPtr(&u8, addr)[0..n];
+ if (alloc_size == n) return @intToPtr(*u8, addr)[0..n];
var aligned_addr = addr & ~usize(alignment - 1);
aligned_addr += alignment;
@@ -93,7 +93,7 @@ pub const DirectAllocator = struct {
//It is impossible that there is an unoccupied page at the top of our
// mmap.
- return @intToPtr(&u8, aligned_addr)[0..n];
+ return @intToPtr(*u8, aligned_addr)[0..n];
},
Os.windows => {
const amt = n + alignment + @sizeOf(usize);
@@ -108,14 +108,14 @@ pub const DirectAllocator = struct {
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_addr = root_addr + march_forward_bytes;
const record_addr = adjusted_addr + n;
- @intToPtr(&align(1) usize, record_addr).* = root_addr;
- return @intToPtr(&u8, adjusted_addr)[0..n];
+ @intToPtr(*align(1) usize, record_addr).* = root_addr;
+ return @intToPtr(*u8, adjusted_addr)[0..n];
},
else => @compileError("Unsupported OS"),
}
}
- fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
@@ -139,13 +139,13 @@ pub const DirectAllocator = struct {
Os.windows => {
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
const old_record_addr = old_adjusted_addr + old_mem.len;
- const root_addr = @intToPtr(&align(1) usize, old_record_addr).*;
+ const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
const old_ptr = @intToPtr(os.windows.LPVOID, root_addr);
const amt = new_size + alignment + @sizeOf(usize);
const new_ptr = os.windows.HeapReAlloc(??self.heap_handle, 0, old_ptr, amt) ?? blk: {
if (new_size > old_mem.len) return error.OutOfMemory;
const new_record_addr = old_record_addr - new_size + old_mem.len;
- @intToPtr(&align(1) usize, new_record_addr).* = root_addr;
+ @intToPtr(*align(1) usize, new_record_addr).* = root_addr;
return old_mem[0..new_size];
};
const offset = old_adjusted_addr - root_addr;
@@ -153,14 +153,14 @@ pub const DirectAllocator = struct {
const new_adjusted_addr = new_root_addr + offset;
assert(new_adjusted_addr % alignment == 0);
const new_record_addr = new_adjusted_addr + new_size;
- @intToPtr(&align(1) usize, new_record_addr).* = new_root_addr;
- return @intToPtr(&u8, new_adjusted_addr)[0..new_size];
+ @intToPtr(*align(1) usize, new_record_addr).* = new_root_addr;
+ return @intToPtr(*u8, new_adjusted_addr)[0..new_size];
},
else => @compileError("Unsupported OS"),
}
}
- fn free(allocator: &Allocator, bytes: []u8) void {
+ fn free(allocator: *Allocator, bytes: []u8) void {
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
@@ -169,7 +169,7 @@ pub const DirectAllocator = struct {
},
Os.windows => {
const record_addr = @ptrToInt(bytes.ptr) + bytes.len;
- const root_addr = @intToPtr(&align(1) usize, record_addr).*;
+ const root_addr = @intToPtr(*align(1) usize, record_addr).*;
const ptr = @intToPtr(os.windows.LPVOID, root_addr);
_ = os.windows.HeapFree(??self.heap_handle, 0, ptr);
},
@@ -183,13 +183,13 @@ pub const DirectAllocator = struct {
pub const ArenaAllocator = struct {
pub allocator: Allocator,
- child_allocator: &Allocator,
+ child_allocator: *Allocator,
buffer_list: std.LinkedList([]u8),
end_index: usize,
const BufNode = std.LinkedList([]u8).Node;
- pub fn init(child_allocator: &Allocator) ArenaAllocator {
+ pub fn init(child_allocator: *Allocator) ArenaAllocator {
return ArenaAllocator{
.allocator = Allocator{
.allocFn = alloc,
@@ -202,7 +202,7 @@ pub const ArenaAllocator = struct {
};
}
- pub fn deinit(self: &ArenaAllocator) void {
+ pub fn deinit(self: *ArenaAllocator) void {
var it = self.buffer_list.first;
while (it) |node| {
// this has to occur before the free because the free frees node
@@ -212,7 +212,7 @@ pub const ArenaAllocator = struct {
}
}
- fn createNode(self: &ArenaAllocator, prev_len: usize, minimum_size: usize) !&BufNode {
+ fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) !*BufNode {
const actual_min_size = minimum_size + @sizeOf(BufNode);
var len = prev_len;
while (true) {
@@ -233,7 +233,7 @@ pub const ArenaAllocator = struct {
return buf_node;
}
- fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
var cur_node = if (self.buffer_list.last) |last_node| last_node else try self.createNode(0, n + alignment);
@@ -254,7 +254,7 @@ pub const ArenaAllocator = struct {
}
}
- fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@@ -264,7 +264,7 @@ pub const ArenaAllocator = struct {
}
}
- fn free(allocator: &Allocator, bytes: []u8) void {}
+ fn free(allocator: *Allocator, bytes: []u8) void {}
};
pub const FixedBufferAllocator = struct {
@@ -284,7 +284,7 @@ pub const FixedBufferAllocator = struct {
};
}
- fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
const addr = @ptrToInt(self.buffer.ptr) + self.end_index;
const rem = @rem(addr, alignment);
@@ -300,7 +300,7 @@ pub const FixedBufferAllocator = struct {
return result;
}
- fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@@ -310,7 +310,7 @@ pub const FixedBufferAllocator = struct {
}
}
- fn free(allocator: &Allocator, bytes: []u8) void {}
+ fn free(allocator: *Allocator, bytes: []u8) void {}
};
/// lock free
@@ -331,7 +331,7 @@ pub const ThreadSafeFixedBufferAllocator = struct {
};
}
- fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
while (true) {
@@ -343,11 +343,11 @@ pub const ThreadSafeFixedBufferAllocator = struct {
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
}
- end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) ?? return self.buffer[adjusted_index..new_end_index];
+ end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst,) ?? return self.buffer[adjusted_index..new_end_index];
}
}
- fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@@ -357,7 +357,7 @@ pub const ThreadSafeFixedBufferAllocator = struct {
}
}
- fn free(allocator: &Allocator, bytes: []u8) void {}
+ fn free(allocator: *Allocator, bytes: []u8) void {}
};
test "c_allocator" {
@@ -403,8 +403,8 @@ test "ThreadSafeFixedBufferAllocator" {
try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
}
-fn testAllocator(allocator: &mem.Allocator) !void {
- var slice = try allocator.alloc(&i32, 100);
+fn testAllocator(allocator: *mem.Allocator) !void {
+ var slice = try allocator.alloc(*i32, 100);
for (slice) |*item, i| {
item.* = try allocator.create(i32);
@@ -415,15 +415,15 @@ fn testAllocator(allocator: &mem.Allocator) !void {
allocator.destroy(item);
}
- slice = try allocator.realloc(&i32, slice, 20000);
- slice = try allocator.realloc(&i32, slice, 50);
- slice = try allocator.realloc(&i32, slice, 25);
- slice = try allocator.realloc(&i32, slice, 10);
+ slice = try allocator.realloc(*i32, slice, 20000);
+ slice = try allocator.realloc(*i32, slice, 50);
+ slice = try allocator.realloc(*i32, slice, 25);
+ slice = try allocator.realloc(*i32, slice, 10);
allocator.free(slice);
}
-fn testAllocatorLargeAlignment(allocator: &mem.Allocator) mem.Allocator.Error!void {
+fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void {
//Maybe a platform's page_size is actually the same as or
// very near usize?
if (os.page_size << 2 > @maxValue(usize)) return;
diff --git a/std/io.zig b/std/io.zig
index 39d319159e..e20a284e4e 100644
--- a/std/io.zig
+++ b/std/io.zig
@@ -34,20 +34,20 @@ pub fn getStdIn() GetStdIoErrs!File {
/// Implementation of InStream trait for File
pub const FileInStream = struct {
- file: &File,
+ file: *File,
stream: Stream,
pub const Error = @typeOf(File.read).ReturnType.ErrorSet;
pub const Stream = InStream(Error);
- pub fn init(file: &File) FileInStream {
+ pub fn init(file: *File) FileInStream {
return FileInStream{
.file = file,
.stream = Stream{ .readFn = readFn },
};
}
- fn readFn(in_stream: &Stream, buffer: []u8) Error!usize {
+ fn readFn(in_stream: *Stream, buffer: []u8) Error!usize {
const self = @fieldParentPtr(FileInStream, "stream", in_stream);
return self.file.read(buffer);
}
@@ -55,20 +55,20 @@ pub const FileInStream = struct {
/// Implementation of OutStream trait for File
pub const FileOutStream = struct {
- file: &File,
+ file: *File,
stream: Stream,
pub const Error = File.WriteError;
pub const Stream = OutStream(Error);
- pub fn init(file: &File) FileOutStream {
+ pub fn init(file: *File) FileOutStream {
return FileOutStream{
.file = file,
.stream = Stream{ .writeFn = writeFn },
};
}
- fn writeFn(out_stream: &Stream, bytes: []const u8) !void {
+ fn writeFn(out_stream: *Stream, bytes: []const u8) !void {
const self = @fieldParentPtr(FileOutStream, "stream", out_stream);
return self.file.write(bytes);
}
@@ -82,12 +82,12 @@ pub fn InStream(comptime ReadError: type) type {
/// Return the number of bytes read. If the number read is smaller than buf.len, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
- readFn: fn (self: &Self, buffer: []u8) Error!usize,
+ readFn: fn (self: *Self, buffer: []u8) Error!usize,
/// Replaces `buffer` contents by reading from the stream until it is finished.
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and
/// the contents read from the stream are lost.
- pub fn readAllBuffer(self: &Self, buffer: &Buffer, max_size: usize) !void {
+ pub fn readAllBuffer(self: *Self, buffer: *Buffer, max_size: usize) !void {
try buffer.resize(0);
var actual_buf_len: usize = 0;
@@ -111,7 +111,7 @@ pub fn InStream(comptime ReadError: type) type {
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
- pub fn readAllAlloc(self: &Self, allocator: &mem.Allocator, max_size: usize) ![]u8 {
+ pub fn readAllAlloc(self: *Self, allocator: *mem.Allocator, max_size: usize) ![]u8 {
var buf = Buffer.initNull(allocator);
defer buf.deinit();
@@ -123,7 +123,7 @@ pub fn InStream(comptime ReadError: type) type {
/// Does not include the delimiter in the result.
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and the contents
/// read from the stream so far are lost.
- pub fn readUntilDelimiterBuffer(self: &Self, buffer: &Buffer, delimiter: u8, max_size: usize) !void {
+ pub fn readUntilDelimiterBuffer(self: *Self, buffer: *Buffer, delimiter: u8, max_size: usize) !void {
try buffer.resize(0);
while (true) {
@@ -145,7 +145,7 @@ pub fn InStream(comptime ReadError: type) type {
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
- pub fn readUntilDelimiterAlloc(self: &Self, allocator: &mem.Allocator, delimiter: u8, max_size: usize) ![]u8 {
+ pub fn readUntilDelimiterAlloc(self: *Self, allocator: *mem.Allocator, delimiter: u8, max_size: usize) ![]u8 {
var buf = Buffer.initNull(allocator);
defer buf.deinit();
@@ -156,43 +156,43 @@ pub fn InStream(comptime ReadError: type) type {
/// Returns the number of bytes read. If the number read is smaller than buf.len, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
- pub fn read(self: &Self, buffer: []u8) !usize {
+ pub fn read(self: *Self, buffer: []u8) !usize {
return self.readFn(self, buffer);
}
/// Same as `read` but end of stream returns `error.EndOfStream`.
- pub fn readNoEof(self: &Self, buf: []u8) !void {
+ pub fn readNoEof(self: *Self, buf: []u8) !void {
const amt_read = try self.read(buf);
if (amt_read < buf.len) return error.EndOfStream;
}
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
- pub fn readByte(self: &Self) !u8 {
+ pub fn readByte(self: *Self) !u8 {
var result: [1]u8 = undefined;
try self.readNoEof(result[0..]);
return result[0];
}
/// Same as `readByte` except the returned byte is signed.
- pub fn readByteSigned(self: &Self) !i8 {
+ pub fn readByteSigned(self: *Self) !i8 {
return @bitCast(i8, try self.readByte());
}
- pub fn readIntLe(self: &Self, comptime T: type) !T {
+ pub fn readIntLe(self: *Self, comptime T: type) !T {
return self.readInt(builtin.Endian.Little, T);
}
- pub fn readIntBe(self: &Self, comptime T: type) !T {
+ pub fn readIntBe(self: *Self, comptime T: type) !T {
return self.readInt(builtin.Endian.Big, T);
}
- pub fn readInt(self: &Self, endian: builtin.Endian, comptime T: type) !T {
+ pub fn readInt(self: *Self, endian: builtin.Endian, comptime T: type) !T {
var bytes: [@sizeOf(T)]u8 = undefined;
try self.readNoEof(bytes[0..]);
return mem.readInt(bytes, T, endian);
}
- pub fn readVarInt(self: &Self, endian: builtin.Endian, comptime T: type, size: usize) !T {
+ pub fn readVarInt(self: *Self, endian: builtin.Endian, comptime T: type, size: usize) !T {
assert(size <= @sizeOf(T));
assert(size <= 8);
var input_buf: [8]u8 = undefined;
@@ -208,22 +208,22 @@ pub fn OutStream(comptime WriteError: type) type {
const Self = this;
pub const Error = WriteError;
- writeFn: fn (self: &Self, bytes: []const u8) Error!void,
+ writeFn: fn (self: *Self, bytes: []const u8) Error!void,
- pub fn print(self: &Self, comptime format: []const u8, args: ...) !void {
+ pub fn print(self: *Self, comptime format: []const u8, args: ...) !void {
return std.fmt.format(self, Error, self.writeFn, format, args);
}
- pub fn write(self: &Self, bytes: []const u8) !void {
+ pub fn write(self: *Self, bytes: []const u8) !void {
return self.writeFn(self, bytes);
}
- pub fn writeByte(self: &Self, byte: u8) !void {
+ pub fn writeByte(self: *Self, byte: u8) !void {
const slice = (&byte)[0..1];
return self.writeFn(self, slice);
}
- pub fn writeByteNTimes(self: &Self, byte: u8, n: usize) !void {
+ pub fn writeByteNTimes(self: *Self, byte: u8, n: usize) !void {
const slice = (&byte)[0..1];
var i: usize = 0;
while (i < n) : (i += 1) {
@@ -234,14 +234,14 @@ pub fn OutStream(comptime WriteError: type) type {
}
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
-pub fn writeFile(allocator: &mem.Allocator, path: []const u8, data: []const u8) !void {
+pub fn writeFile(allocator: *mem.Allocator, path: []const u8, data: []const u8) !void {
var file = try File.openWrite(allocator, path);
defer file.close();
try file.write(data);
}
/// On success, caller owns returned buffer.
-pub fn readFileAlloc(allocator: &mem.Allocator, path: []const u8) ![]u8 {
+pub fn readFileAlloc(allocator: *mem.Allocator, path: []const u8) ![]u8 {
var file = try File.openRead(allocator, path);
defer file.close();
@@ -265,13 +265,13 @@ pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type)
pub stream: Stream,
- unbuffered_in_stream: &Stream,
+ unbuffered_in_stream: *Stream,
buffer: [buffer_size]u8,
start_index: usize,
end_index: usize,
- pub fn init(unbuffered_in_stream: &Stream) Self {
+ pub fn init(unbuffered_in_stream: *Stream) Self {
return Self{
.unbuffered_in_stream = unbuffered_in_stream,
.buffer = undefined,
@@ -287,7 +287,7 @@ pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type)
};
}
- fn readFn(in_stream: &Stream, dest: []u8) !usize {
+ fn readFn(in_stream: *Stream, dest: []u8) !usize {
const self = @fieldParentPtr(Self, "stream", in_stream);
var dest_index: usize = 0;
@@ -338,12 +338,12 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
pub stream: Stream,
- unbuffered_out_stream: &Stream,
+ unbuffered_out_stream: *Stream,
buffer: [buffer_size]u8,
index: usize,
- pub fn init(unbuffered_out_stream: &Stream) Self {
+ pub fn init(unbuffered_out_stream: *Stream) Self {
return Self{
.unbuffered_out_stream = unbuffered_out_stream,
.buffer = undefined,
@@ -352,12 +352,12 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
};
}
- pub fn flush(self: &Self) !void {
+ pub fn flush(self: *Self) !void {
try self.unbuffered_out_stream.write(self.buffer[0..self.index]);
self.index = 0;
}
- fn writeFn(out_stream: &Stream, bytes: []const u8) !void {
+ fn writeFn(out_stream: *Stream, bytes: []const u8) !void {
const self = @fieldParentPtr(Self, "stream", out_stream);
if (bytes.len >= self.buffer.len) {
@@ -383,20 +383,20 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
/// Implementation of OutStream trait for Buffer
pub const BufferOutStream = struct {
- buffer: &Buffer,
+ buffer: *Buffer,
stream: Stream,
pub const Error = error{OutOfMemory};
pub const Stream = OutStream(Error);
- pub fn init(buffer: &Buffer) BufferOutStream {
+ pub fn init(buffer: *Buffer) BufferOutStream {
return BufferOutStream{
.buffer = buffer,
.stream = Stream{ .writeFn = writeFn },
};
}
- fn writeFn(out_stream: &Stream, bytes: []const u8) !void {
+ fn writeFn(out_stream: *Stream, bytes: []const u8) !void {
const self = @fieldParentPtr(BufferOutStream, "stream", out_stream);
return self.buffer.append(bytes);
}
@@ -407,7 +407,7 @@ pub const BufferedAtomicFile = struct {
file_stream: FileOutStream,
buffered_stream: BufferedOutStream(FileOutStream.Error),
- pub fn create(allocator: &mem.Allocator, dest_path: []const u8) !&BufferedAtomicFile {
+ pub fn create(allocator: *mem.Allocator, dest_path: []const u8) !*BufferedAtomicFile {
// TODO with well defined copy elision we don't need this allocation
var self = try allocator.create(BufferedAtomicFile);
errdefer allocator.destroy(self);
@@ -427,18 +427,18 @@ pub const BufferedAtomicFile = struct {
}
/// always call destroy, even after successful finish()
- pub fn destroy(self: &BufferedAtomicFile) void {
+ pub fn destroy(self: *BufferedAtomicFile) void {
const allocator = self.atomic_file.allocator;
self.atomic_file.deinit();
allocator.destroy(self);
}
- pub fn finish(self: &BufferedAtomicFile) !void {
+ pub fn finish(self: *BufferedAtomicFile) !void {
try self.buffered_stream.flush();
try self.atomic_file.finish();
}
- pub fn stream(self: &BufferedAtomicFile) &OutStream(FileOutStream.Error) {
+ pub fn stream(self: *BufferedAtomicFile) *OutStream(FileOutStream.Error) {
return &self.buffered_stream.stream;
}
};
diff --git a/std/json.zig b/std/json.zig
index 9de8f0b53e..c8aef7688b 100644
--- a/std/json.zig
+++ b/std/json.zig
@@ -76,7 +76,7 @@ pub const Token = struct {
}
// Slice into the underlying input string.
- pub fn slice(self: &const Token, input: []const u8, i: usize) []const u8 {
+ pub fn slice(self: *const Token, input: []const u8, i: usize) []const u8 {
return input[i + self.offset - self.count .. i + self.offset];
}
};
@@ -115,7 +115,7 @@ pub const StreamingJsonParser = struct {
return p;
}
- pub fn reset(p: &StreamingJsonParser) void {
+ pub fn reset(p: *StreamingJsonParser) void {
p.state = State.TopLevelBegin;
p.count = 0;
// Set before ever read in main transition function
@@ -205,7 +205,7 @@ pub const StreamingJsonParser = struct {
// tokens. token2 is always null if token1 is null.
//
// There is currently no error recovery on a bad stream.
- pub fn feed(p: &StreamingJsonParser, c: u8, token1: &?Token, token2: &?Token) Error!void {
+ pub fn feed(p: *StreamingJsonParser, c: u8, token1: *?Token, token2: *?Token) Error!void {
token1.* = null;
token2.* = null;
p.count += 1;
@@ -217,7 +217,7 @@ pub const StreamingJsonParser = struct {
}
// Perform a single transition on the state machine and return any possible token.
- fn transition(p: &StreamingJsonParser, c: u8, token: &?Token) Error!bool {
+ fn transition(p: *StreamingJsonParser, c: u8, token: *?Token) Error!bool {
switch (p.state) {
State.TopLevelBegin => switch (c) {
'{' => {
@@ -861,7 +861,7 @@ pub fn validate(s: []const u8) bool {
var token1: ?Token = undefined;
var token2: ?Token = undefined;
- p.feed(c, &token1, &token2) catch |err| {
+ p.feed(c, *token1, *token2) catch |err| {
return false;
};
}
@@ -878,7 +878,7 @@ pub const ValueTree = struct {
arena: ArenaAllocator,
root: Value,
- pub fn deinit(self: &ValueTree) void {
+ pub fn deinit(self: *ValueTree) void {
self.arena.deinit();
}
};
@@ -894,7 +894,7 @@ pub const Value = union(enum) {
Array: ArrayList(Value),
Object: ObjectMap,
- pub fn dump(self: &const Value) void {
+ pub fn dump(self: *const Value) void {
switch (self.*) {
Value.Null => {
std.debug.warn("null");
@@ -941,7 +941,7 @@ pub const Value = union(enum) {
}
}
- pub fn dumpIndent(self: &const Value, indent: usize) void {
+ pub fn dumpIndent(self: *const Value, indent: usize) void {
if (indent == 0) {
self.dump();
} else {
@@ -949,7 +949,7 @@ pub const Value = union(enum) {
}
}
- fn dumpIndentLevel(self: &const Value, indent: usize, level: usize) void {
+ fn dumpIndentLevel(self: *const Value, indent: usize, level: usize) void {
switch (self.*) {
Value.Null => {
std.debug.warn("null");
@@ -1013,7 +1013,7 @@ pub const Value = union(enum) {
// A non-stream JSON parser which constructs a tree of Value's.
pub const JsonParser = struct {
- allocator: &Allocator,
+ allocator: *Allocator,
state: State,
copy_strings: bool,
// Stores parent nodes and un-combined Values.
@@ -1026,7 +1026,7 @@ pub const JsonParser = struct {
Simple,
};
- pub fn init(allocator: &Allocator, copy_strings: bool) JsonParser {
+ pub fn init(allocator: *Allocator, copy_strings: bool) JsonParser {
return JsonParser{
.allocator = allocator,
.state = State.Simple,
@@ -1035,16 +1035,16 @@ pub const JsonParser = struct {
};
}
- pub fn deinit(p: &JsonParser) void {
+ pub fn deinit(p: *JsonParser) void {
p.stack.deinit();
}
- pub fn reset(p: &JsonParser) void {
+ pub fn reset(p: *JsonParser) void {
p.state = State.Simple;
p.stack.shrink(0);
}
- pub fn parse(p: &JsonParser, input: []const u8) !ValueTree {
+ pub fn parse(p: *JsonParser, input: []const u8) !ValueTree {
var mp = StreamingJsonParser.init();
var arena = ArenaAllocator.init(p.allocator);
@@ -1090,7 +1090,7 @@ pub const JsonParser = struct {
// Even though p.allocator exists, we take an explicit allocator so that allocation state
// can be cleaned up on error correctly during a `parse` on call.
- fn transition(p: &JsonParser, allocator: &Allocator, input: []const u8, i: usize, token: &const Token) !void {
+ fn transition(p: *JsonParser, allocator: *Allocator, input: []const u8, i: usize, token: *const Token) !void {
switch (p.state) {
State.ObjectKey => switch (token.id) {
Token.Id.ObjectEnd => {
@@ -1223,7 +1223,7 @@ pub const JsonParser = struct {
}
}
- fn pushToParent(p: &JsonParser, value: &const Value) !void {
+ fn pushToParent(p: *JsonParser, value: *const Value) !void {
switch (p.stack.at(p.stack.len - 1)) {
// Object Parent -> [ ..., object, , value ]
Value.String => |key| {
@@ -1244,14 +1244,14 @@ pub const JsonParser = struct {
}
}
- fn parseString(p: &JsonParser, allocator: &Allocator, token: &const Token, input: []const u8, i: usize) !Value {
+ fn parseString(p: *JsonParser, allocator: *Allocator, token: *const Token, input: []const u8, i: usize) !Value {
// TODO: We don't strictly have to copy values which do not contain any escape
// characters if flagged with the option.
const slice = token.slice(input, i);
return Value{ .String = try mem.dupe(p.allocator, u8, slice) };
}
- fn parseNumber(p: &JsonParser, token: &const Token, input: []const u8, i: usize) !Value {
+ fn parseNumber(p: *JsonParser, token: *const Token, input: []const u8, i: usize) !Value {
return if (token.number_is_integer)
Value{ .Integer = try std.fmt.parseInt(i64, token.slice(input, i), 10) }
else
diff --git a/std/linked_list.zig b/std/linked_list.zig
index c6be08171e..fbc0a0c42a 100644
--- a/std/linked_list.zig
+++ b/std/linked_list.zig
@@ -21,11 +21,11 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Node inside the linked list wrapping the actual data.
pub const Node = struct {
- prev: ?&Node,
- next: ?&Node,
+ prev: ?*Node,
+ next: ?*Node,
data: T,
- pub fn init(value: &const T) Node {
+ pub fn init(value: *const T) Node {
return Node{
.prev = null,
.next = null,
@@ -38,14 +38,14 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
return Node.init({});
}
- pub fn toData(node: &Node) &ParentType {
+ pub fn toData(node: *Node) *ParentType {
comptime assert(isIntrusive());
return @fieldParentPtr(ParentType, field_name, node);
}
};
- first: ?&Node,
- last: ?&Node,
+ first: ?*Node,
+ last: ?*Node,
len: usize,
/// Initialize a linked list.
@@ -69,7 +69,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Arguments:
/// node: Pointer to a node in the list.
/// new_node: Pointer to the new node to insert.
- pub fn insertAfter(list: &Self, node: &Node, new_node: &Node) void {
+ pub fn insertAfter(list: *Self, node: *Node, new_node: *Node) void {
new_node.prev = node;
if (node.next) |next_node| {
// Intermediate node.
@@ -90,7 +90,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Arguments:
/// node: Pointer to a node in the list.
/// new_node: Pointer to the new node to insert.
- pub fn insertBefore(list: &Self, node: &Node, new_node: &Node) void {
+ pub fn insertBefore(list: *Self, node: *Node, new_node: *Node) void {
new_node.next = node;
if (node.prev) |prev_node| {
// Intermediate node.
@@ -110,7 +110,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Arguments:
/// new_node: Pointer to the new node to insert.
- pub fn append(list: &Self, new_node: &Node) void {
+ pub fn append(list: *Self, new_node: *Node) void {
if (list.last) |last| {
// Insert after last.
list.insertAfter(last, new_node);
@@ -124,7 +124,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Arguments:
/// new_node: Pointer to the new node to insert.
- pub fn prepend(list: &Self, new_node: &Node) void {
+ pub fn prepend(list: *Self, new_node: *Node) void {
if (list.first) |first| {
// Insert before first.
list.insertBefore(first, new_node);
@@ -143,7 +143,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Arguments:
/// node: Pointer to the node to be removed.
- pub fn remove(list: &Self, node: &Node) void {
+ pub fn remove(list: *Self, node: *Node) void {
if (node.prev) |prev_node| {
// Intermediate node.
prev_node.next = node.next;
@@ -168,7 +168,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Returns:
/// A pointer to the last node in the list.
- pub fn pop(list: &Self) ?&Node {
+ pub fn pop(list: *Self) ?*Node {
const last = list.last ?? return null;
list.remove(last);
return last;
@@ -178,7 +178,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Returns:
/// A pointer to the first node in the list.
- pub fn popFirst(list: &Self) ?&Node {
+ pub fn popFirst(list: *Self) ?*Node {
const first = list.first ?? return null;
list.remove(first);
return first;
@@ -191,7 +191,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Returns:
/// A pointer to the new node.
- pub fn allocateNode(list: &Self, allocator: &Allocator) !&Node {
+ pub fn allocateNode(list: *Self, allocator: *Allocator) !*Node {
comptime assert(!isIntrusive());
return allocator.create(Node);
}
@@ -201,7 +201,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Arguments:
/// node: Pointer to the node to deallocate.
/// allocator: Dynamic memory allocator.
- pub fn destroyNode(list: &Self, node: &Node, allocator: &Allocator) void {
+ pub fn destroyNode(list: *Self, node: *Node, allocator: *Allocator) void {
comptime assert(!isIntrusive());
allocator.destroy(node);
}
@@ -214,7 +214,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Returns:
/// A pointer to the new node.
- pub fn createNode(list: &Self, data: &const T, allocator: &Allocator) !&Node {
+ pub fn createNode(list: *Self, data: *const T, allocator: *Allocator) !*Node {
comptime assert(!isIntrusive());
var node = try list.allocateNode(allocator);
node.* = Node.init(data);
diff --git a/std/macho.zig b/std/macho.zig
index 615569e4b4..e71ac76b1a 100644
--- a/std/macho.zig
+++ b/std/macho.zig
@@ -42,13 +42,13 @@ pub const Symbol = struct {
name: []const u8,
address: u64,
- fn addressLessThan(lhs: &const Symbol, rhs: &const Symbol) bool {
+ fn addressLessThan(lhs: *const Symbol, rhs: *const Symbol) bool {
return lhs.address < rhs.address;
}
};
pub const SymbolTable = struct {
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
symbols: []const Symbol,
strings: []const u8,
@@ -56,7 +56,7 @@ pub const SymbolTable = struct {
// Ideally we'd use _mh_execute_header because it's always at 0x100000000
// in the image but as it's located in a different section than executable
// code, its displacement is different.
- pub fn deinit(self: &SymbolTable) void {
+ pub fn deinit(self: *SymbolTable) void {
self.allocator.free(self.symbols);
self.symbols = []const Symbol{};
@@ -64,7 +64,7 @@ pub const SymbolTable = struct {
self.strings = []const u8{};
}
- pub fn search(self: &const SymbolTable, address: usize) ?&const Symbol {
+ pub fn search(self: *const SymbolTable, address: usize) ?*const Symbol {
var min: usize = 0;
var max: usize = self.symbols.len - 1; // Exclude sentinel.
while (min < max) {
@@ -83,7 +83,7 @@ pub const SymbolTable = struct {
}
};
-pub fn loadSymbols(allocator: &mem.Allocator, in: &io.FileInStream) !SymbolTable {
+pub fn loadSymbols(allocator: *mem.Allocator, in: *io.FileInStream) !SymbolTable {
var file = in.file;
try file.seekTo(0);
@@ -160,13 +160,13 @@ pub fn loadSymbols(allocator: &mem.Allocator, in: &io.FileInStream) !SymbolTable
};
}
-fn readNoEof(in: &io.FileInStream, comptime T: type, result: []T) !void {
+fn readNoEof(in: *io.FileInStream, comptime T: type, result: []T) !void {
return in.stream.readNoEof(([]u8)(result));
}
-fn readOneNoEof(in: &io.FileInStream, comptime T: type, result: &T) !void {
+fn readOneNoEof(in: *io.FileInStream, comptime T: type, result: *T) !void {
return readNoEof(in, T, result[0..1]);
}
-fn isSymbol(sym: &const Nlist64) bool {
+fn isSymbol(sym: *const Nlist64) bool {
return sym.n_value != 0 and sym.n_desc == 0;
}
diff --git a/std/math/complex/atan.zig b/std/math/complex/atan.zig
index b7bbf930eb..9bfe5fe724 100644
--- a/std/math/complex/atan.zig
+++ b/std/math/complex/atan.zig
@@ -29,7 +29,7 @@ fn redupif32(x: f32) f32 {
return ((x - u * DP1) - u * DP2) - t * DP3;
}
-fn atan32(z: &const Complex(f32)) Complex(f32) {
+fn atan32(z: *const Complex(f32)) Complex(f32) {
const maxnum = 1.0e38;
const x = z.re;
@@ -78,7 +78,7 @@ fn redupif64(x: f64) f64 {
return ((x - u * DP1) - u * DP2) - t * DP3;
}
-fn atan64(z: &const Complex(f64)) Complex(f64) {
+fn atan64(z: *const Complex(f64)) Complex(f64) {
const maxnum = 1.0e308;
const x = z.re;
diff --git a/std/math/complex/cosh.zig b/std/math/complex/cosh.zig
index 96eac68556..c2f9a47b8d 100644
--- a/std/math/complex/cosh.zig
+++ b/std/math/complex/cosh.zig
@@ -15,7 +15,7 @@ pub fn cosh(z: var) Complex(@typeOf(z.re)) {
};
}
-fn cosh32(z: &const Complex(f32)) Complex(f32) {
+fn cosh32(z: *const Complex(f32)) Complex(f32) {
const x = z.re;
const y = z.im;
@@ -78,7 +78,7 @@ fn cosh32(z: &const Complex(f32)) Complex(f32) {
return Complex(f32).new((x * x) * (y - y), (x + x) * (y - y));
}
-fn cosh64(z: &const Complex(f64)) Complex(f64) {
+fn cosh64(z: *const Complex(f64)) Complex(f64) {
const x = z.re;
const y = z.im;
diff --git a/std/math/complex/exp.zig b/std/math/complex/exp.zig
index 8fe069a43d..44c354f246 100644
--- a/std/math/complex/exp.zig
+++ b/std/math/complex/exp.zig
@@ -16,7 +16,7 @@ pub fn exp(z: var) Complex(@typeOf(z.re)) {
};
}
-fn exp32(z: &const Complex(f32)) Complex(f32) {
+fn exp32(z: *const Complex(f32)) Complex(f32) {
@setFloatMode(this, @import("builtin").FloatMode.Strict);
const exp_overflow = 0x42b17218; // max_exp * ln2 ~= 88.72283955
@@ -63,7 +63,7 @@ fn exp32(z: &const Complex(f32)) Complex(f32) {
}
}
-fn exp64(z: &const Complex(f64)) Complex(f64) {
+fn exp64(z: *const Complex(f64)) Complex(f64) {
const exp_overflow = 0x40862e42; // high bits of max_exp * ln2 ~= 710
const cexp_overflow = 0x4096b8e4; // (max_exp - min_denorm_exp) * ln2
diff --git a/std/math/complex/index.zig b/std/math/complex/index.zig
index 5902ffaa19..b00296beda 100644
--- a/std/math/complex/index.zig
+++ b/std/math/complex/index.zig
@@ -37,28 +37,28 @@ pub fn Complex(comptime T: type) type {
};
}
- pub fn add(self: &const Self, other: &const Self) Self {
+ pub fn add(self: *const Self, other: *const Self) Self {
return Self{
.re = self.re + other.re,
.im = self.im + other.im,
};
}
- pub fn sub(self: &const Self, other: &const Self) Self {
+ pub fn sub(self: *const Self, other: *const Self) Self {
return Self{
.re = self.re - other.re,
.im = self.im - other.im,
};
}
- pub fn mul(self: &const Self, other: &const Self) Self {
+ pub fn mul(self: *const Self, other: *const Self) Self {
return Self{
.re = self.re * other.re - self.im * other.im,
.im = self.im * other.re + self.re * other.im,
};
}
- pub fn div(self: &const Self, other: &const Self) Self {
+ pub fn div(self: *const Self, other: *const Self) Self {
const re_num = self.re * other.re + self.im * other.im;
const im_num = self.im * other.re - self.re * other.im;
const den = other.re * other.re + other.im * other.im;
@@ -69,14 +69,14 @@ pub fn Complex(comptime T: type) type {
};
}
- pub fn conjugate(self: &const Self) Self {
+ pub fn conjugate(self: *const Self) Self {
return Self{
.re = self.re,
.im = -self.im,
};
}
- pub fn reciprocal(self: &const Self) Self {
+ pub fn reciprocal(self: *const Self) Self {
const m = self.re * self.re + self.im * self.im;
return Self{
.re = self.re / m,
@@ -84,7 +84,7 @@ pub fn Complex(comptime T: type) type {
};
}
- pub fn magnitude(self: &const Self) T {
+ pub fn magnitude(self: *const Self) T {
return math.sqrt(self.re * self.re + self.im * self.im);
}
};
diff --git a/std/math/complex/ldexp.zig b/std/math/complex/ldexp.zig
index 7ebefff40c..a56c2ef2eb 100644
--- a/std/math/complex/ldexp.zig
+++ b/std/math/complex/ldexp.zig
@@ -14,7 +14,7 @@ pub fn ldexp_cexp(z: var, expt: i32) Complex(@typeOf(z.re)) {
};
}
-fn frexp_exp32(x: f32, expt: &i32) f32 {
+fn frexp_exp32(x: f32, expt: *i32) f32 {
const k = 235; // reduction constant
const kln2 = 162.88958740; // k * ln2
@@ -24,7 +24,7 @@ fn frexp_exp32(x: f32, expt: &i32) f32 {
return @bitCast(f32, (hx & 0x7fffff) | ((0x7f + 127) << 23));
}
-fn ldexp_cexp32(z: &const Complex(f32), expt: i32) Complex(f32) {
+fn ldexp_cexp32(z: *const Complex(f32), expt: i32) Complex(f32) {
var ex_expt: i32 = undefined;
const exp_x = frexp_exp32(z.re, &ex_expt);
const exptf = expt + ex_expt;
@@ -38,7 +38,7 @@ fn ldexp_cexp32(z: &const Complex(f32), expt: i32) Complex(f32) {
return Complex(f32).new(math.cos(z.im) * exp_x * scale1 * scale2, math.sin(z.im) * exp_x * scale1 * scale2);
}
-fn frexp_exp64(x: f64, expt: &i32) f64 {
+fn frexp_exp64(x: f64, expt: *i32) f64 {
const k = 1799; // reduction constant
const kln2 = 1246.97177782734161156; // k * ln2
@@ -54,7 +54,7 @@ fn frexp_exp64(x: f64, expt: &i32) f64 {
return @bitCast(f64, (u64(high_word) << 32) | lx);
}
-fn ldexp_cexp64(z: &const Complex(f64), expt: i32) Complex(f64) {
+fn ldexp_cexp64(z: *const Complex(f64), expt: i32) Complex(f64) {
var ex_expt: i32 = undefined;
const exp_x = frexp_exp64(z.re, &ex_expt);
const exptf = i64(expt + ex_expt);
diff --git a/std/math/complex/pow.zig b/std/math/complex/pow.zig
index bef9fde542..4c2cd9cf34 100644
--- a/std/math/complex/pow.zig
+++ b/std/math/complex/pow.zig
@@ -4,7 +4,7 @@ const math = std.math;
const cmath = math.complex;
const Complex = cmath.Complex;
-pub fn pow(comptime T: type, z: &const T, c: &const T) T {
+pub fn pow(comptime T: type, z: *const T, c: *const T) T {
const p = cmath.log(z);
const q = c.mul(p);
return cmath.exp(q);
diff --git a/std/math/complex/sinh.zig b/std/math/complex/sinh.zig
index 09a62ca058..3d196bfd50 100644
--- a/std/math/complex/sinh.zig
+++ b/std/math/complex/sinh.zig
@@ -15,7 +15,7 @@ pub fn sinh(z: var) Complex(@typeOf(z.re)) {
};
}
-fn sinh32(z: &const Complex(f32)) Complex(f32) {
+fn sinh32(z: *const Complex(f32)) Complex(f32) {
const x = z.re;
const y = z.im;
@@ -78,7 +78,7 @@ fn sinh32(z: &const Complex(f32)) Complex(f32) {
return Complex(f32).new((x * x) * (y - y), (x + x) * (y - y));
}
-fn sinh64(z: &const Complex(f64)) Complex(f64) {
+fn sinh64(z: *const Complex(f64)) Complex(f64) {
const x = z.re;
const y = z.im;
diff --git a/std/math/complex/sqrt.zig b/std/math/complex/sqrt.zig
index afda69f7c9..d4f5a67528 100644
--- a/std/math/complex/sqrt.zig
+++ b/std/math/complex/sqrt.zig
@@ -15,7 +15,7 @@ pub fn sqrt(z: var) Complex(@typeOf(z.re)) {
};
}
-fn sqrt32(z: &const Complex(f32)) Complex(f32) {
+fn sqrt32(z: *const Complex(f32)) Complex(f32) {
const x = z.re;
const y = z.im;
@@ -57,7 +57,7 @@ fn sqrt32(z: &const Complex(f32)) Complex(f32) {
}
}
-fn sqrt64(z: &const Complex(f64)) Complex(f64) {
+fn sqrt64(z: *const Complex(f64)) Complex(f64) {
// may encounter overflow for im,re >= DBL_MAX / (1 + sqrt(2))
const threshold = 0x1.a827999fcef32p+1022;
diff --git a/std/math/complex/tanh.zig b/std/math/complex/tanh.zig
index 34250b1b4a..1d754838a3 100644
--- a/std/math/complex/tanh.zig
+++ b/std/math/complex/tanh.zig
@@ -13,7 +13,7 @@ pub fn tanh(z: var) Complex(@typeOf(z.re)) {
};
}
-fn tanh32(z: &const Complex(f32)) Complex(f32) {
+fn tanh32(z: *const Complex(f32)) Complex(f32) {
const x = z.re;
const y = z.im;
@@ -51,7 +51,7 @@ fn tanh32(z: &const Complex(f32)) Complex(f32) {
return Complex(f32).new((beta * rho * s) / den, t / den);
}
-fn tanh64(z: &const Complex(f64)) Complex(f64) {
+fn tanh64(z: *const Complex(f64)) Complex(f64) {
const x = z.re;
const y = z.im;
diff --git a/std/math/hypot.zig b/std/math/hypot.zig
index fe0de3a1ea..494df22ba6 100644
--- a/std/math/hypot.zig
+++ b/std/math/hypot.zig
@@ -52,7 +52,7 @@ fn hypot32(x: f32, y: f32) f32 {
return z * math.sqrt(f32(f64(x) * x + f64(y) * y));
}
-fn sq(hi: &f64, lo: &f64, x: f64) void {
+fn sq(hi: *f64, lo: *f64, x: f64) void {
const split: f64 = 0x1.0p27 + 1.0;
const xc = x * split;
const xh = x - xc + xc;
diff --git a/std/math/index.zig b/std/math/index.zig
index 847e972500..33bc1082f7 100644
--- a/std/math/index.zig
+++ b/std/math/index.zig
@@ -46,12 +46,12 @@ pub fn forceEval(value: var) void {
switch (T) {
f32 => {
var x: f32 = undefined;
- const p = @ptrCast(&volatile f32, &x);
+ const p = @ptrCast(*volatile f32, &x);
p.* = x;
},
f64 => {
var x: f64 = undefined;
- const p = @ptrCast(&volatile f64, &x);
+ const p = @ptrCast(*volatile f64, &x);
p.* = x;
},
else => {
diff --git a/std/mem.zig b/std/mem.zig
index f4696cff9f..aec24e8491 100644
--- a/std/mem.zig
+++ b/std/mem.zig
@@ -13,7 +13,7 @@ pub const Allocator = struct {
/// The returned newly allocated memory is undefined.
/// `alignment` is guaranteed to be >= 1
/// `alignment` is guaranteed to be a power of 2
- allocFn: fn (self: &Allocator, byte_count: usize, alignment: u29) Error![]u8,
+ allocFn: fn (self: *Allocator, byte_count: usize, alignment: u29) Error![]u8,
/// If `new_byte_count > old_mem.len`:
/// * `old_mem.len` is the same as what was returned from allocFn or reallocFn.
@@ -26,22 +26,22 @@ pub const Allocator = struct {
/// The returned newly allocated memory is undefined.
/// `alignment` is guaranteed to be >= 1
/// `alignment` is guaranteed to be a power of 2
- reallocFn: fn (self: &Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) Error![]u8,
+ reallocFn: fn (self: *Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) Error![]u8,
/// Guaranteed: `old_mem.len` is the same as what was returned from `allocFn` or `reallocFn`
- freeFn: fn (self: &Allocator, old_mem: []u8) void,
+ freeFn: fn (self: *Allocator, old_mem: []u8) void,
- fn create(self: &Allocator, comptime T: type) !&T {
- if (@sizeOf(T) == 0) return &{};
+ fn create(self: *Allocator, comptime T: type) !*T {
+ if (@sizeOf(T) == 0) return *{};
const slice = try self.alloc(T, 1);
return &slice[0];
}
// TODO once #733 is solved, this will replace create
- fn construct(self: &Allocator, init: var) t: {
+ fn construct(self: *Allocator, init: var) t: {
// TODO this is a workaround for type getting parsed as Error!&const T
const T = @typeOf(init).Child;
- break :t Error!&T;
+ break :t Error!*T;
} {
const T = @typeOf(init).Child;
if (@sizeOf(T) == 0) return &{};
@@ -51,17 +51,17 @@ pub const Allocator = struct {
return ptr;
}
- fn destroy(self: &Allocator, ptr: var) void {
+ fn destroy(self: *Allocator, ptr: var) void {
self.free(ptr[0..1]);
}
- fn alloc(self: &Allocator, comptime T: type, n: usize) ![]T {
+ fn alloc(self: *Allocator, comptime T: type, n: usize) ![]T {
return self.alignedAlloc(T, @alignOf(T), n);
}
- fn alignedAlloc(self: &Allocator, comptime T: type, comptime alignment: u29, n: usize) ![]align(alignment) T {
+ fn alignedAlloc(self: *Allocator, comptime T: type, comptime alignment: u29, n: usize) ![]align(alignment) T {
if (n == 0) {
- return (&align(alignment) T)(undefined)[0..0];
+ return (*align(alignment) T)(undefined)[0..0];
}
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_slice = try self.allocFn(self, byte_count, alignment);
@@ -73,17 +73,17 @@ pub const Allocator = struct {
return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
}
- fn realloc(self: &Allocator, comptime T: type, old_mem: []T, n: usize) ![]T {
+ fn realloc(self: *Allocator, comptime T: type, old_mem: []T, n: usize) ![]T {
return self.alignedRealloc(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
}
- fn alignedRealloc(self: &Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) ![]align(alignment) T {
+ fn alignedRealloc(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) ![]align(alignment) T {
if (old_mem.len == 0) {
return self.alloc(T, n);
}
if (n == 0) {
self.free(old_mem);
- return (&align(alignment) T)(undefined)[0..0];
+ return (*align(alignment) T)(undefined)[0..0];
}
const old_byte_slice = ([]u8)(old_mem);
@@ -102,11 +102,11 @@ pub const Allocator = struct {
/// Reallocate, but `n` must be less than or equal to `old_mem.len`.
/// Unlike `realloc`, this function cannot fail.
/// Shrinking to 0 is the same as calling `free`.
- fn shrink(self: &Allocator, comptime T: type, old_mem: []T, n: usize) []T {
+ fn shrink(self: *Allocator, comptime T: type, old_mem: []T, n: usize) []T {
return self.alignedShrink(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
}
- fn alignedShrink(self: &Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) []align(alignment) T {
+ fn alignedShrink(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) []align(alignment) T {
if (n == 0) {
self.free(old_mem);
return old_mem[0..0];
@@ -123,10 +123,10 @@ pub const Allocator = struct {
return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
}
- fn free(self: &Allocator, memory: var) void {
+ fn free(self: *Allocator, memory: var) void {
const bytes = ([]const u8)(memory);
if (bytes.len == 0) return;
- const non_const_ptr = @intToPtr(&u8, @ptrToInt(bytes.ptr));
+ const non_const_ptr = @intToPtr(*u8, @ptrToInt(bytes.ptr));
self.freeFn(self, non_const_ptr[0..bytes.len]);
}
};
@@ -186,7 +186,7 @@ pub fn allEqual(comptime T: type, slice: []const T, scalar: T) bool {
}
/// Copies ::m to newly allocated memory. Caller is responsible to free it.
-pub fn dupe(allocator: &Allocator, comptime T: type, m: []const T) ![]T {
+pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T {
const new_buf = try allocator.alloc(T, m.len);
copy(T, new_buf, m);
return new_buf;
@@ -457,7 +457,7 @@ pub const SplitIterator = struct {
split_bytes: []const u8,
index: usize,
- pub fn next(self: &SplitIterator) ?[]const u8 {
+ pub fn next(self: *SplitIterator) ?[]const u8 {
// move to beginning of token
while (self.index < self.buffer.len and self.isSplitByte(self.buffer[self.index])) : (self.index += 1) {}
const start = self.index;
@@ -473,14 +473,14 @@ pub const SplitIterator = struct {
}
/// Returns a slice of the remaining bytes. Does not affect iterator state.
- pub fn rest(self: &const SplitIterator) []const u8 {
+ pub fn rest(self: *const SplitIterator) []const u8 {
// move to beginning of token
var index: usize = self.index;
while (index < self.buffer.len and self.isSplitByte(self.buffer[index])) : (index += 1) {}
return self.buffer[index..];
}
- fn isSplitByte(self: &const SplitIterator, byte: u8) bool {
+ fn isSplitByte(self: *const SplitIterator, byte: u8) bool {
for (self.split_bytes) |split_byte| {
if (byte == split_byte) {
return true;
@@ -492,7 +492,7 @@ pub const SplitIterator = struct {
/// Naively combines a series of strings with a separator.
/// Allocates memory for the result, which must be freed by the caller.
-pub fn join(allocator: &Allocator, sep: u8, strings: ...) ![]u8 {
+pub fn join(allocator: *Allocator, sep: u8, strings: ...) ![]u8 {
comptime assert(strings.len >= 1);
var total_strings_len: usize = strings.len; // 1 sep per string
{
@@ -649,7 +649,7 @@ test "mem.max" {
assert(max(u8, "abcdefg") == 'g');
}
-pub fn swap(comptime T: type, a: &T, b: &T) void {
+pub fn swap(comptime T: type, a: *T, b: *T) void {
const tmp = a.*;
a.* = b.*;
b.* = tmp;
diff --git a/std/net.zig b/std/net.zig
index 3af4e0b525..bfe4b1c2a0 100644
--- a/std/net.zig
+++ b/std/net.zig
@@ -31,7 +31,7 @@ pub const Address = struct {
};
}
- pub fn initIp6(ip6: &const Ip6Addr, port: u16) Address {
+ pub fn initIp6(ip6: *const Ip6Addr, port: u16) Address {
return Address{
.family = posix.AF_INET6,
.os_addr = posix.sockaddr{
@@ -46,15 +46,15 @@ pub const Address = struct {
};
}
- pub fn initPosix(addr: &const posix.sockaddr) Address {
+ pub fn initPosix(addr: *const posix.sockaddr) Address {
return Address{ .os_addr = addr.* };
}
- pub fn format(self: &const Address, out_stream: var) !void {
+ pub fn format(self: *const Address, out_stream: var) !void {
switch (self.os_addr.in.family) {
posix.AF_INET => {
const native_endian_port = std.mem.endianSwapIfLe(u16, self.os_addr.in.port);
- const bytes = ([]const u8)((&self.os_addr.in.addr)[0..1]);
+ const bytes = ([]const u8)((*self.os_addr.in.addr)[0..1]);
try out_stream.print("{}.{}.{}.{}:{}", bytes[0], bytes[1], bytes[2], bytes[3], native_endian_port);
},
posix.AF_INET6 => {
diff --git a/std/os/child_process.zig b/std/os/child_process.zig
index 51f1bd96e5..30a2fd1408 100644
--- a/std/os/child_process.zig
+++ b/std/os/child_process.zig
@@ -20,7 +20,7 @@ pub const ChildProcess = struct {
pub handle: if (is_windows) windows.HANDLE else void,
pub thread_handle: if (is_windows) windows.HANDLE else void,
- pub allocator: &mem.Allocator,
+ pub allocator: *mem.Allocator,
pub stdin: ?os.File,
pub stdout: ?os.File,
@@ -31,7 +31,7 @@ pub const ChildProcess = struct {
pub argv: []const []const u8,
/// Leave as null to use the current env map using the supplied allocator.
- pub env_map: ?&const BufMap,
+ pub env_map: ?*const BufMap,
pub stdin_behavior: StdIo,
pub stdout_behavior: StdIo,
@@ -47,7 +47,7 @@ pub const ChildProcess = struct {
pub cwd: ?[]const u8,
err_pipe: if (is_windows) void else [2]i32,
- llnode: if (is_windows) void else LinkedList(&ChildProcess).Node,
+ llnode: if (is_windows) void else LinkedList(*ChildProcess).Node,
pub const SpawnError = error{
ProcessFdQuotaExceeded,
@@ -84,7 +84,7 @@ pub const ChildProcess = struct {
/// First argument in argv is the executable.
/// On success must call deinit.
- pub fn init(argv: []const []const u8, allocator: &mem.Allocator) !&ChildProcess {
+ pub fn init(argv: []const []const u8, allocator: *mem.Allocator) !*ChildProcess {
const child = try allocator.create(ChildProcess);
errdefer allocator.destroy(child);
@@ -114,14 +114,14 @@ pub const ChildProcess = struct {
return child;
}
- pub fn setUserName(self: &ChildProcess, name: []const u8) !void {
+ pub fn setUserName(self: *ChildProcess, name: []const u8) !void {
const user_info = try os.getUserInfo(name);
self.uid = user_info.uid;
self.gid = user_info.gid;
}
/// On success must call `kill` or `wait`.
- pub fn spawn(self: &ChildProcess) !void {
+ pub fn spawn(self: *ChildProcess) !void {
if (is_windows) {
return self.spawnWindows();
} else {
@@ -129,13 +129,13 @@ pub const ChildProcess = struct {
}
}
- pub fn spawnAndWait(self: &ChildProcess) !Term {
+ pub fn spawnAndWait(self: *ChildProcess) !Term {
try self.spawn();
return self.wait();
}
/// Forcibly terminates child process and then cleans up all resources.
- pub fn kill(self: &ChildProcess) !Term {
+ pub fn kill(self: *ChildProcess) !Term {
if (is_windows) {
return self.killWindows(1);
} else {
@@ -143,7 +143,7 @@ pub const ChildProcess = struct {
}
}
- pub fn killWindows(self: &ChildProcess, exit_code: windows.UINT) !Term {
+ pub fn killWindows(self: *ChildProcess, exit_code: windows.UINT) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
@@ -159,7 +159,7 @@ pub const ChildProcess = struct {
return ??self.term;
}
- pub fn killPosix(self: &ChildProcess) !Term {
+ pub fn killPosix(self: *ChildProcess) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
@@ -179,7 +179,7 @@ pub const ChildProcess = struct {
}
/// Blocks until child process terminates and then cleans up all resources.
- pub fn wait(self: &ChildProcess) !Term {
+ pub fn wait(self: *ChildProcess) !Term {
if (is_windows) {
return self.waitWindows();
} else {
@@ -195,7 +195,7 @@ pub const ChildProcess = struct {
/// Spawns a child process, waits for it, collecting stdout and stderr, and then returns.
/// If it succeeds, the caller owns result.stdout and result.stderr memory.
- pub fn exec(allocator: &mem.Allocator, argv: []const []const u8, cwd: ?[]const u8, env_map: ?&const BufMap, max_output_size: usize) !ExecResult {
+ pub fn exec(allocator: *mem.Allocator, argv: []const []const u8, cwd: ?[]const u8, env_map: ?*const BufMap, max_output_size: usize) !ExecResult {
const child = try ChildProcess.init(argv, allocator);
defer child.deinit();
@@ -225,7 +225,7 @@ pub const ChildProcess = struct {
};
}
- fn waitWindows(self: &ChildProcess) !Term {
+ fn waitWindows(self: *ChildProcess) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
@@ -235,7 +235,7 @@ pub const ChildProcess = struct {
return ??self.term;
}
- fn waitPosix(self: &ChildProcess) !Term {
+ fn waitPosix(self: *ChildProcess) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
@@ -245,16 +245,16 @@ pub const ChildProcess = struct {
return ??self.term;
}
- pub fn deinit(self: &ChildProcess) void {
+ pub fn deinit(self: *ChildProcess) void {
self.allocator.destroy(self);
}
- fn waitUnwrappedWindows(self: &ChildProcess) !void {
+ fn waitUnwrappedWindows(self: *ChildProcess) !void {
const result = os.windowsWaitSingle(self.handle, windows.INFINITE);
self.term = (SpawnError!Term)(x: {
var exit_code: windows.DWORD = undefined;
- if (windows.GetExitCodeProcess(self.handle, &exit_code) == 0) {
+ if (windows.GetExitCodeProcess(self.handle, *exit_code) == 0) {
break :x Term{ .Unknown = 0 };
} else {
break :x Term{ .Exited = @bitCast(i32, exit_code) };
@@ -267,7 +267,7 @@ pub const ChildProcess = struct {
return result;
}
- fn waitUnwrapped(self: &ChildProcess) void {
+ fn waitUnwrapped(self: *ChildProcess) void {
var status: i32 = undefined;
while (true) {
const err = posix.getErrno(posix.waitpid(self.pid, &status, 0));
@@ -283,11 +283,11 @@ pub const ChildProcess = struct {
}
}
- fn handleWaitResult(self: &ChildProcess, status: i32) void {
+ fn handleWaitResult(self: *ChildProcess, status: i32) void {
self.term = self.cleanupAfterWait(status);
}
- fn cleanupStreams(self: &ChildProcess) void {
+ fn cleanupStreams(self: *ChildProcess) void {
if (self.stdin) |*stdin| {
stdin.close();
self.stdin = null;
@@ -302,7 +302,7 @@ pub const ChildProcess = struct {
}
}
- fn cleanupAfterWait(self: &ChildProcess, status: i32) !Term {
+ fn cleanupAfterWait(self: *ChildProcess, status: i32) !Term {
defer {
os.close(self.err_pipe[0]);
os.close(self.err_pipe[1]);
@@ -335,7 +335,7 @@ pub const ChildProcess = struct {
Term{ .Unknown = status };
}
- fn spawnPosix(self: &ChildProcess) !void {
+ fn spawnPosix(self: *ChildProcess) !void {
const stdin_pipe = if (self.stdin_behavior == StdIo.Pipe) try makePipe() else undefined;
errdefer if (self.stdin_behavior == StdIo.Pipe) {
destroyPipe(stdin_pipe);
@@ -432,7 +432,7 @@ pub const ChildProcess = struct {
self.pid = pid;
self.err_pipe = err_pipe;
- self.llnode = LinkedList(&ChildProcess).Node.init(self);
+ self.llnode = LinkedList(*ChildProcess).Node.init(self);
self.term = null;
if (self.stdin_behavior == StdIo.Pipe) {
@@ -446,7 +446,7 @@ pub const ChildProcess = struct {
}
}
- fn spawnWindows(self: &ChildProcess) !void {
+ fn spawnWindows(self: *ChildProcess) !void {
const saAttr = windows.SECURITY_ATTRIBUTES{
.nLength = @sizeOf(windows.SECURITY_ATTRIBUTES),
.bInheritHandle = windows.TRUE,
@@ -639,8 +639,8 @@ pub const ChildProcess = struct {
}
};
-fn windowsCreateProcess(app_name: &u8, cmd_line: &u8, envp_ptr: ?&u8, cwd_ptr: ?&u8, lpStartupInfo: &windows.STARTUPINFOA, lpProcessInformation: &windows.PROCESS_INFORMATION) !void {
- if (windows.CreateProcessA(app_name, cmd_line, null, null, windows.TRUE, 0, @ptrCast(?&c_void, envp_ptr), cwd_ptr, lpStartupInfo, lpProcessInformation) == 0) {
+fn windowsCreateProcess(app_name: *u8, cmd_line: *u8, envp_ptr: ?*u8, cwd_ptr: ?*u8, lpStartupInfo: *windows.STARTUPINFOA, lpProcessInformation: *windows.PROCESS_INFORMATION) !void {
+ if (windows.CreateProcessA(app_name, cmd_line, null, null, windows.TRUE, 0, @ptrCast(?*c_void, envp_ptr), cwd_ptr, lpStartupInfo, lpProcessInformation) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.FILE_NOT_FOUND, windows.ERROR.PATH_NOT_FOUND => error.FileNotFound,
@@ -653,7 +653,7 @@ fn windowsCreateProcess(app_name: &u8, cmd_line: &u8, envp_ptr: ?&u8, cwd_ptr: ?
/// Caller must dealloc.
/// Guarantees a null byte at result[result.len].
-fn windowsCreateCommandLine(allocator: &mem.Allocator, argv: []const []const u8) ![]u8 {
+fn windowsCreateCommandLine(allocator: *mem.Allocator, argv: []const []const u8) ![]u8 {
var buf = try Buffer.initSize(allocator, 0);
defer buf.deinit();
@@ -698,7 +698,7 @@ fn windowsDestroyPipe(rd: ?windows.HANDLE, wr: ?windows.HANDLE) void {
// a namespace field lookup
const SECURITY_ATTRIBUTES = windows.SECURITY_ATTRIBUTES;
-fn windowsMakePipe(rd: &windows.HANDLE, wr: &windows.HANDLE, sattr: &const SECURITY_ATTRIBUTES) !void {
+fn windowsMakePipe(rd: *windows.HANDLE, wr: *windows.HANDLE, sattr: *const SECURITY_ATTRIBUTES) !void {
if (windows.CreatePipe(rd, wr, sattr, 0) == 0) {
const err = windows.GetLastError();
return switch (err) {
@@ -716,7 +716,7 @@ fn windowsSetHandleInfo(h: windows.HANDLE, mask: windows.DWORD, flags: windows.D
}
}
-fn windowsMakePipeIn(rd: &?windows.HANDLE, wr: &?windows.HANDLE, sattr: &const SECURITY_ATTRIBUTES) !void {
+fn windowsMakePipeIn(rd: *?windows.HANDLE, wr: *?windows.HANDLE, sattr: *const SECURITY_ATTRIBUTES) !void {
var rd_h: windows.HANDLE = undefined;
var wr_h: windows.HANDLE = undefined;
try windowsMakePipe(&rd_h, &wr_h, sattr);
@@ -726,7 +726,7 @@ fn windowsMakePipeIn(rd: &?windows.HANDLE, wr: &?windows.HANDLE, sattr: &const S
wr.* = wr_h;
}
-fn windowsMakePipeOut(rd: &?windows.HANDLE, wr: &?windows.HANDLE, sattr: &const SECURITY_ATTRIBUTES) !void {
+fn windowsMakePipeOut(rd: *?windows.HANDLE, wr: *?windows.HANDLE, sattr: *const SECURITY_ATTRIBUTES) !void {
var rd_h: windows.HANDLE = undefined;
var wr_h: windows.HANDLE = undefined;
try windowsMakePipe(&rd_h, &wr_h, sattr);
@@ -748,7 +748,7 @@ fn makePipe() ![2]i32 {
return fds;
}
-fn destroyPipe(pipe: &const [2]i32) void {
+fn destroyPipe(pipe: *const [2]i32) void {
os.close((pipe.*)[0]);
os.close((pipe.*)[1]);
}
diff --git a/std/os/darwin.zig b/std/os/darwin.zig
index a3fc230ac5..77e8b6bb6a 100644
--- a/std/os/darwin.zig
+++ b/std/os/darwin.zig
@@ -309,7 +309,7 @@ pub fn isatty(fd: i32) bool {
return c.isatty(fd) != 0;
}
-pub fn fstat(fd: i32, buf: &c.Stat) usize {
+pub fn fstat(fd: i32, buf: *c.Stat) usize {
return errnoWrap(c.@"fstat$INODE64"(fd, buf));
}
@@ -317,7 +317,7 @@ pub fn lseek(fd: i32, offset: isize, whence: c_int) usize {
return errnoWrap(c.lseek(fd, offset, whence));
}
-pub fn open(path: &const u8, flags: u32, mode: usize) usize {
+pub fn open(path: *const u8, flags: u32, mode: usize) usize {
return errnoWrap(c.open(path, @bitCast(c_int, flags), mode));
}
@@ -325,79 +325,79 @@ pub fn raise(sig: i32) usize {
return errnoWrap(c.raise(sig));
}
-pub fn read(fd: i32, buf: &u8, nbyte: usize) usize {
- return errnoWrap(c.read(fd, @ptrCast(&c_void, buf), nbyte));
+pub fn read(fd: i32, buf: *u8, nbyte: usize) usize {
+ return errnoWrap(c.read(fd, @ptrCast(*c_void, buf), nbyte));
}
-pub fn stat(noalias path: &const u8, noalias buf: &stat) usize {
+pub fn stat(noalias path: *const u8, noalias buf: *stat) usize {
return errnoWrap(c.stat(path, buf));
}
-pub fn write(fd: i32, buf: &const u8, nbyte: usize) usize {
- return errnoWrap(c.write(fd, @ptrCast(&const c_void, buf), nbyte));
+pub fn write(fd: i32, buf: *const u8, nbyte: usize) usize {
+ return errnoWrap(c.write(fd, @ptrCast(*const c_void, buf), nbyte));
}
-pub fn mmap(address: ?&u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
- const ptr_result = c.mmap(@ptrCast(&c_void, address), length, @bitCast(c_int, c_uint(prot)), @bitCast(c_int, c_uint(flags)), fd, offset);
+pub fn mmap(address: ?*u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
+ const ptr_result = c.mmap(@ptrCast(*c_void, address), length, @bitCast(c_int, c_uint(prot)), @bitCast(c_int, c_uint(flags)), fd, offset);
const isize_result = @bitCast(isize, @ptrToInt(ptr_result));
return errnoWrap(isize_result);
}
pub fn munmap(address: usize, length: usize) usize {
- return errnoWrap(c.munmap(@intToPtr(&c_void, address), length));
+ return errnoWrap(c.munmap(@intToPtr(*c_void, address), length));
}
-pub fn unlink(path: &const u8) usize {
+pub fn unlink(path: *const u8) usize {
return errnoWrap(c.unlink(path));
}
-pub fn getcwd(buf: &u8, size: usize) usize {
+pub fn getcwd(buf: *u8, size: usize) usize {
return if (c.getcwd(buf, size) == null) @bitCast(usize, -isize(c._errno().*)) else 0;
}
-pub fn waitpid(pid: i32, status: &i32, options: u32) usize {
+pub fn waitpid(pid: i32, status: *i32, options: u32) usize {
comptime assert(i32.bit_count == c_int.bit_count);
- return errnoWrap(c.waitpid(pid, @ptrCast(&c_int, status), @bitCast(c_int, options)));
+ return errnoWrap(c.waitpid(pid, @ptrCast(*c_int, status), @bitCast(c_int, options)));
}
pub fn fork() usize {
return errnoWrap(c.fork());
}
-pub fn access(path: &const u8, mode: u32) usize {
+pub fn access(path: *const u8, mode: u32) usize {
return errnoWrap(c.access(path, mode));
}
-pub fn pipe(fds: &[2]i32) usize {
+pub fn pipe(fds: *[2]i32) usize {
comptime assert(i32.bit_count == c_int.bit_count);
- return errnoWrap(c.pipe(@ptrCast(&c_int, fds)));
+ return errnoWrap(c.pipe(@ptrCast(*c_int, fds)));
}
-pub fn getdirentries64(fd: i32, buf_ptr: &u8, buf_len: usize, basep: &i64) usize {
+pub fn getdirentries64(fd: i32, buf_ptr: *u8, buf_len: usize, basep: *i64) usize {
return errnoWrap(@bitCast(isize, c.__getdirentries64(fd, buf_ptr, buf_len, basep)));
}
-pub fn mkdir(path: &const u8, mode: u32) usize {
+pub fn mkdir(path: *const u8, mode: u32) usize {
return errnoWrap(c.mkdir(path, mode));
}
-pub fn symlink(existing: &const u8, new: &const u8) usize {
+pub fn symlink(existing: *const u8, new: *const u8) usize {
return errnoWrap(c.symlink(existing, new));
}
-pub fn rename(old: &const u8, new: &const u8) usize {
+pub fn rename(old: *const u8, new: *const u8) usize {
return errnoWrap(c.rename(old, new));
}
-pub fn rmdir(path: &const u8) usize {
+pub fn rmdir(path: *const u8) usize {
return errnoWrap(c.rmdir(path));
}
-pub fn chdir(path: &const u8) usize {
+pub fn chdir(path: *const u8) usize {
return errnoWrap(c.chdir(path));
}
-pub fn execve(path: &const u8, argv: &const ?&const u8, envp: &const ?&const u8) usize {
+pub fn execve(path: *const u8, argv: *const ?*const u8, envp: *const ?*const u8) usize {
return errnoWrap(c.execve(path, argv, envp));
}
@@ -405,19 +405,19 @@ pub fn dup2(old: i32, new: i32) usize {
return errnoWrap(c.dup2(old, new));
}
-pub fn readlink(noalias path: &const u8, noalias buf_ptr: &u8, buf_len: usize) usize {
+pub fn readlink(noalias path: *const u8, noalias buf_ptr: *u8, buf_len: usize) usize {
return errnoWrap(c.readlink(path, buf_ptr, buf_len));
}
-pub fn gettimeofday(tv: ?&timeval, tz: ?&timezone) usize {
+pub fn gettimeofday(tv: ?*timeval, tz: ?*timezone) usize {
return errnoWrap(c.gettimeofday(tv, tz));
}
-pub fn nanosleep(req: &const timespec, rem: ?×pec) usize {
+pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize {
return errnoWrap(c.nanosleep(req, rem));
}
-pub fn realpath(noalias filename: &const u8, noalias resolved_name: &u8) usize {
+pub fn realpath(noalias filename: *const u8, noalias resolved_name: *u8) usize {
return if (c.realpath(filename, resolved_name) == null) @bitCast(usize, -isize(c._errno().*)) else 0;
}
@@ -429,11 +429,11 @@ pub fn setregid(rgid: u32, egid: u32) usize {
return errnoWrap(c.setregid(rgid, egid));
}
-pub fn sigprocmask(flags: u32, noalias set: &const sigset_t, noalias oldset: ?&sigset_t) usize {
+pub fn sigprocmask(flags: u32, noalias set: *const sigset_t, noalias oldset: ?*sigset_t) usize {
return errnoWrap(c.sigprocmask(@bitCast(c_int, flags), set, oldset));
}
-pub fn sigaction(sig: u5, noalias act: &const Sigaction, noalias oact: ?&Sigaction) usize {
+pub fn sigaction(sig: u5, noalias act: *const Sigaction, noalias oact: ?*Sigaction) usize {
assert(sig != SIGKILL);
assert(sig != SIGSTOP);
var cact = c.Sigaction{
@@ -442,7 +442,7 @@ pub fn sigaction(sig: u5, noalias act: &const Sigaction, noalias oact: ?&Sigacti
.sa_mask = act.mask,
};
var coact: c.Sigaction = undefined;
- const result = errnoWrap(c.sigaction(sig, &cact, &coact));
+ const result = errnoWrap(c.sigaction(sig, *cact, *coact));
if (result != 0) {
return result;
}
@@ -473,7 +473,7 @@ pub const Sigaction = struct {
flags: u32,
};
-pub fn sigaddset(set: &sigset_t, signo: u5) void {
+pub fn sigaddset(set: *sigset_t, signo: u5) void {
set.* |= u32(1) << (signo - 1);
}
diff --git a/std/os/file.zig b/std/os/file.zig
index c07e2c5c8b..d943da30ca 100644
--- a/std/os/file.zig
+++ b/std/os/file.zig
@@ -19,7 +19,7 @@ pub const File = struct {
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up.
- pub fn openRead(allocator: &mem.Allocator, path: []const u8) OpenError!File {
+ pub fn openRead(allocator: *mem.Allocator, path: []const u8) OpenError!File {
if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_RDONLY;
const fd = try os.posixOpen(allocator, path, flags, 0);
@@ -40,7 +40,7 @@ pub const File = struct {
}
/// Calls `openWriteMode` with os.default_file_mode for the mode.
- pub fn openWrite(allocator: &mem.Allocator, path: []const u8) OpenError!File {
+ pub fn openWrite(allocator: *mem.Allocator, path: []const u8) OpenError!File {
return openWriteMode(allocator, path, os.default_file_mode);
}
@@ -48,7 +48,7 @@ pub const File = struct {
/// If a file already exists in the destination it will be truncated.
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up.
- pub fn openWriteMode(allocator: &mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
+ pub fn openWriteMode(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_TRUNC;
const fd = try os.posixOpen(allocator, path, flags, file_mode);
@@ -72,7 +72,7 @@ pub const File = struct {
/// If a file already exists in the destination this returns OpenError.PathAlreadyExists
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up.
- pub fn openWriteNoClobber(allocator: &mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
+ pub fn openWriteNoClobber(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_EXCL;
const fd = try os.posixOpen(allocator, path, flags, file_mode);
@@ -96,7 +96,7 @@ pub const File = struct {
return File{ .handle = handle };
}
- pub fn access(allocator: &mem.Allocator, path: []const u8, file_mode: os.FileMode) !bool {
+ pub fn access(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) !bool {
const path_with_null = try std.cstr.addNullByte(allocator, path);
defer allocator.free(path_with_null);
@@ -140,17 +140,17 @@ pub const File = struct {
/// Upon success, the stream is in an uninitialized state. To continue using it,
/// you must use the open() function.
- pub fn close(self: &File) void {
+ pub fn close(self: *File) void {
os.close(self.handle);
self.handle = undefined;
}
/// Calls `os.isTty` on `self.handle`.
- pub fn isTty(self: &File) bool {
+ pub fn isTty(self: *File) bool {
return os.isTty(self.handle);
}
- pub fn seekForward(self: &File, amount: isize) !void {
+ pub fn seekForward(self: *File, amount: isize) !void {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
const result = posix.lseek(self.handle, amount, posix.SEEK_CUR);
@@ -179,7 +179,7 @@ pub const File = struct {
}
}
- pub fn seekTo(self: &File, pos: usize) !void {
+ pub fn seekTo(self: *File, pos: usize) !void {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
const ipos = try math.cast(isize, pos);
@@ -210,7 +210,7 @@ pub const File = struct {
}
}
- pub fn getPos(self: &File) !usize {
+ pub fn getPos(self: *File) !usize {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
const result = posix.lseek(self.handle, 0, posix.SEEK_CUR);
@@ -229,7 +229,7 @@ pub const File = struct {
},
Os.windows => {
var pos: windows.LARGE_INTEGER = undefined;
- if (windows.SetFilePointerEx(self.handle, 0, &pos, windows.FILE_CURRENT) == 0) {
+ if (windows.SetFilePointerEx(self.handle, 0, *pos, windows.FILE_CURRENT) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.INVALID_PARAMETER => error.BadFd,
@@ -250,7 +250,7 @@ pub const File = struct {
}
}
- pub fn getEndPos(self: &File) !usize {
+ pub fn getEndPos(self: *File) !usize {
if (is_posix) {
var stat: posix.Stat = undefined;
const err = posix.getErrno(posix.fstat(self.handle, &stat));
@@ -285,7 +285,7 @@ pub const File = struct {
Unexpected,
};
- fn mode(self: &File) ModeError!os.FileMode {
+ fn mode(self: *File) ModeError!os.FileMode {
if (is_posix) {
var stat: posix.Stat = undefined;
const err = posix.getErrno(posix.fstat(self.handle, &stat));
@@ -309,7 +309,7 @@ pub const File = struct {
pub const ReadError = error{};
- pub fn read(self: &File, buffer: []u8) !usize {
+ pub fn read(self: *File, buffer: []u8) !usize {
if (is_posix) {
var index: usize = 0;
while (index < buffer.len) {
@@ -334,7 +334,7 @@ pub const File = struct {
while (index < buffer.len) {
const want_read_count = windows.DWORD(math.min(windows.DWORD(@maxValue(windows.DWORD)), buffer.len - index));
var amt_read: windows.DWORD = undefined;
- if (windows.ReadFile(self.handle, @ptrCast(&c_void, &buffer[index]), want_read_count, &amt_read, null) == 0) {
+ if (windows.ReadFile(self.handle, @ptrCast(*c_void, &buffer[index]), want_read_count, &amt_read, null) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.OPERATION_ABORTED => continue,
@@ -353,7 +353,7 @@ pub const File = struct {
pub const WriteError = os.WindowsWriteError || os.PosixWriteError;
- fn write(self: &File, bytes: []const u8) WriteError!void {
+ fn write(self: *File, bytes: []const u8) WriteError!void {
if (is_posix) {
try os.posixWrite(self.handle, bytes);
} else if (is_windows) {
diff --git a/std/os/get_user_id.zig b/std/os/get_user_id.zig
index 2a15e1d495..c0c1b1cc4b 100644
--- a/std/os/get_user_id.zig
+++ b/std/os/get_user_id.zig
@@ -77,8 +77,8 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
'0'...'9' => byte - '0',
else => return error.CorruptPasswordFile,
};
- if (@mulWithOverflow(u32, uid, 10, &uid)) return error.CorruptPasswordFile;
- if (@addWithOverflow(u32, uid, digit, &uid)) return error.CorruptPasswordFile;
+ if (@mulWithOverflow(u32, uid, 10, *uid)) return error.CorruptPasswordFile;
+ if (@addWithOverflow(u32, uid, digit, *uid)) return error.CorruptPasswordFile;
},
},
State.ReadGroupId => switch (byte) {
@@ -93,8 +93,8 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
'0'...'9' => byte - '0',
else => return error.CorruptPasswordFile,
};
- if (@mulWithOverflow(u32, gid, 10, &gid)) return error.CorruptPasswordFile;
- if (@addWithOverflow(u32, gid, digit, &gid)) return error.CorruptPasswordFile;
+ if (@mulWithOverflow(u32, gid, 10, *gid)) return error.CorruptPasswordFile;
+ if (@addWithOverflow(u32, gid, digit, *gid)) return error.CorruptPasswordFile;
},
},
}
diff --git a/std/os/index.zig b/std/os/index.zig
index 70e654bcd9..ff638c670b 100644
--- a/std/os/index.zig
+++ b/std/os/index.zig
@@ -321,14 +321,14 @@ pub const PosixOpenError = error{
/// ::file_path needs to be copied in memory to add a null terminating byte.
/// Calls POSIX open, keeps trying if it gets interrupted, and translates
/// the return value into zig errors.
-pub fn posixOpen(allocator: &Allocator, file_path: []const u8, flags: u32, perm: usize) PosixOpenError!i32 {
+pub fn posixOpen(allocator: *Allocator, file_path: []const u8, flags: u32, perm: usize) PosixOpenError!i32 {
const path_with_null = try cstr.addNullByte(allocator, file_path);
defer allocator.free(path_with_null);
return posixOpenC(path_with_null.ptr, flags, perm);
}
-pub fn posixOpenC(file_path: &const u8, flags: u32, perm: usize) !i32 {
+pub fn posixOpenC(file_path: *const u8, flags: u32, perm: usize) !i32 {
while (true) {
const result = posix.open(file_path, flags, perm);
const err = posix.getErrno(result);
@@ -374,10 +374,10 @@ pub fn posixDup2(old_fd: i32, new_fd: i32) !void {
}
}
-pub fn createNullDelimitedEnvMap(allocator: &Allocator, env_map: &const BufMap) ![]?&u8 {
+pub fn createNullDelimitedEnvMap(allocator: *Allocator, env_map: *const BufMap) ![]?*u8 {
const envp_count = env_map.count();
- const envp_buf = try allocator.alloc(?&u8, envp_count + 1);
- mem.set(?&u8, envp_buf, null);
+ const envp_buf = try allocator.alloc(?*u8, envp_count + 1);
+ mem.set(?*u8, envp_buf, null);
errdefer freeNullDelimitedEnvMap(allocator, envp_buf);
{
var it = env_map.iterator();
@@ -397,7 +397,7 @@ pub fn createNullDelimitedEnvMap(allocator: &Allocator, env_map: &const BufMap)
return envp_buf;
}
-pub fn freeNullDelimitedEnvMap(allocator: &Allocator, envp_buf: []?&u8) void {
+pub fn freeNullDelimitedEnvMap(allocator: *Allocator, envp_buf: []?*u8) void {
for (envp_buf) |env| {
const env_buf = if (env) |ptr| ptr[0 .. cstr.len(ptr) + 1] else break;
allocator.free(env_buf);
@@ -410,9 +410,9 @@ pub fn freeNullDelimitedEnvMap(allocator: &Allocator, envp_buf: []?&u8) void {
/// pointers after the args and after the environment variables.
/// `argv[0]` is the executable path.
/// This function also uses the PATH environment variable to get the full path to the executable.
-pub fn posixExecve(argv: []const []const u8, env_map: &const BufMap, allocator: &Allocator) !void {
- const argv_buf = try allocator.alloc(?&u8, argv.len + 1);
- mem.set(?&u8, argv_buf, null);
+pub fn posixExecve(argv: []const []const u8, env_map: *const BufMap, allocator: *Allocator) !void {
+ const argv_buf = try allocator.alloc(?*u8, argv.len + 1);
+ mem.set(?*u8, argv_buf, null);
defer {
for (argv_buf) |arg| {
const arg_buf = if (arg) |ptr| cstr.toSlice(ptr) else break;
@@ -494,10 +494,10 @@ fn posixExecveErrnoToErr(err: usize) PosixExecveError {
}
pub var linux_aux_raw = []usize{0} ** 38;
-pub var posix_environ_raw: []&u8 = undefined;
+pub var posix_environ_raw: []*u8 = undefined;
/// Caller must free result when done.
-pub fn getEnvMap(allocator: &Allocator) !BufMap {
+pub fn getEnvMap(allocator: *Allocator) !BufMap {
var result = BufMap.init(allocator);
errdefer result.deinit();
@@ -557,7 +557,7 @@ pub fn getEnvPosix(key: []const u8) ?[]const u8 {
}
/// Caller must free returned memory.
-pub fn getEnvVarOwned(allocator: &mem.Allocator, key: []const u8) ![]u8 {
+pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) ![]u8 {
if (is_windows) {
const key_with_null = try cstr.addNullByte(allocator, key);
defer allocator.free(key_with_null);
@@ -591,7 +591,7 @@ pub fn getEnvVarOwned(allocator: &mem.Allocator, key: []const u8) ![]u8 {
}
/// Caller must free the returned memory.
-pub fn getCwd(allocator: &Allocator) ![]u8 {
+pub fn getCwd(allocator: *Allocator) ![]u8 {
switch (builtin.os) {
Os.windows => {
var buf = try allocator.alloc(u8, 256);
@@ -640,7 +640,7 @@ test "os.getCwd" {
pub const SymLinkError = PosixSymLinkError || WindowsSymLinkError;
-pub fn symLink(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) SymLinkError!void {
+pub fn symLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) SymLinkError!void {
if (is_windows) {
return symLinkWindows(allocator, existing_path, new_path);
} else {
@@ -653,7 +653,7 @@ pub const WindowsSymLinkError = error{
Unexpected,
};
-pub fn symLinkWindows(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) WindowsSymLinkError!void {
+pub fn symLinkWindows(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) WindowsSymLinkError!void {
const existing_with_null = try cstr.addNullByte(allocator, existing_path);
defer allocator.free(existing_with_null);
const new_with_null = try cstr.addNullByte(allocator, new_path);
@@ -683,7 +683,7 @@ pub const PosixSymLinkError = error{
Unexpected,
};
-pub fn symLinkPosix(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) PosixSymLinkError!void {
+pub fn symLinkPosix(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) PosixSymLinkError!void {
const full_buf = try allocator.alloc(u8, existing_path.len + new_path.len + 2);
defer allocator.free(full_buf);
@@ -718,7 +718,7 @@ pub fn symLinkPosix(allocator: &Allocator, existing_path: []const u8, new_path:
// here we replace the standard +/ with -_ so that it can be used in a file name
const b64_fs_encoder = base64.Base64Encoder.init("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_", base64.standard_pad_char);
-pub fn atomicSymLink(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) !void {
+pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
if (symLink(allocator, existing_path, new_path)) {
return;
} else |err| switch (err) {
@@ -746,7 +746,7 @@ pub fn atomicSymLink(allocator: &Allocator, existing_path: []const u8, new_path:
}
}
-pub fn deleteFile(allocator: &Allocator, file_path: []const u8) !void {
+pub fn deleteFile(allocator: *Allocator, file_path: []const u8) !void {
if (builtin.os == Os.windows) {
return deleteFileWindows(allocator, file_path);
} else {
@@ -754,7 +754,7 @@ pub fn deleteFile(allocator: &Allocator, file_path: []const u8) !void {
}
}
-pub fn deleteFileWindows(allocator: &Allocator, file_path: []const u8) !void {
+pub fn deleteFileWindows(allocator: *Allocator, file_path: []const u8) !void {
const buf = try allocator.alloc(u8, file_path.len + 1);
defer allocator.free(buf);
@@ -772,7 +772,7 @@ pub fn deleteFileWindows(allocator: &Allocator, file_path: []const u8) !void {
}
}
-pub fn deleteFilePosix(allocator: &Allocator, file_path: []const u8) !void {
+pub fn deleteFilePosix(allocator: *Allocator, file_path: []const u8) !void {
const buf = try allocator.alloc(u8, file_path.len + 1);
defer allocator.free(buf);
@@ -803,7 +803,7 @@ pub fn deleteFilePosix(allocator: &Allocator, file_path: []const u8) !void {
/// there is a possibility of power loss or application termination leaving temporary files present
/// in the same directory as dest_path.
/// Destination file will have the same mode as the source file.
-pub fn copyFile(allocator: &Allocator, source_path: []const u8, dest_path: []const u8) !void {
+pub fn copyFile(allocator: *Allocator, source_path: []const u8, dest_path: []const u8) !void {
var in_file = try os.File.openRead(allocator, source_path);
defer in_file.close();
@@ -825,7 +825,7 @@ pub fn copyFile(allocator: &Allocator, source_path: []const u8, dest_path: []con
/// Guaranteed to be atomic. However until https://patchwork.kernel.org/patch/9636735/ is
/// merged and readily available,
/// there is a possibility of power loss or application termination leaving temporary files present
-pub fn copyFileMode(allocator: &Allocator, source_path: []const u8, dest_path: []const u8, mode: FileMode) !void {
+pub fn copyFileMode(allocator: *Allocator, source_path: []const u8, dest_path: []const u8, mode: FileMode) !void {
var in_file = try os.File.openRead(allocator, source_path);
defer in_file.close();
@@ -843,7 +843,7 @@ pub fn copyFileMode(allocator: &Allocator, source_path: []const u8, dest_path: [
}
pub const AtomicFile = struct {
- allocator: &Allocator,
+ allocator: *Allocator,
file: os.File,
tmp_path: []u8,
dest_path: []const u8,
@@ -851,7 +851,7 @@ pub const AtomicFile = struct {
/// dest_path must remain valid for the lifetime of AtomicFile
/// call finish to atomically replace dest_path with contents
- pub fn init(allocator: &Allocator, dest_path: []const u8, mode: FileMode) !AtomicFile {
+ pub fn init(allocator: *Allocator, dest_path: []const u8, mode: FileMode) !AtomicFile {
const dirname = os.path.dirname(dest_path);
var rand_buf: [12]u8 = undefined;
@@ -888,7 +888,7 @@ pub const AtomicFile = struct {
}
/// always call deinit, even after successful finish()
- pub fn deinit(self: &AtomicFile) void {
+ pub fn deinit(self: *AtomicFile) void {
if (!self.finished) {
self.file.close();
deleteFile(self.allocator, self.tmp_path) catch {};
@@ -897,7 +897,7 @@ pub const AtomicFile = struct {
}
}
- pub fn finish(self: &AtomicFile) !void {
+ pub fn finish(self: *AtomicFile) !void {
assert(!self.finished);
self.file.close();
try rename(self.allocator, self.tmp_path, self.dest_path);
@@ -906,7 +906,7 @@ pub const AtomicFile = struct {
}
};
-pub fn rename(allocator: &Allocator, old_path: []const u8, new_path: []const u8) !void {
+pub fn rename(allocator: *Allocator, old_path: []const u8, new_path: []const u8) !void {
const full_buf = try allocator.alloc(u8, old_path.len + new_path.len + 2);
defer allocator.free(full_buf);
@@ -951,7 +951,7 @@ pub fn rename(allocator: &Allocator, old_path: []const u8, new_path: []const u8)
}
}
-pub fn makeDir(allocator: &Allocator, dir_path: []const u8) !void {
+pub fn makeDir(allocator: *Allocator, dir_path: []const u8) !void {
if (is_windows) {
return makeDirWindows(allocator, dir_path);
} else {
@@ -959,7 +959,7 @@ pub fn makeDir(allocator: &Allocator, dir_path: []const u8) !void {
}
}
-pub fn makeDirWindows(allocator: &Allocator, dir_path: []const u8) !void {
+pub fn makeDirWindows(allocator: *Allocator, dir_path: []const u8) !void {
const path_buf = try cstr.addNullByte(allocator, dir_path);
defer allocator.free(path_buf);
@@ -973,7 +973,7 @@ pub fn makeDirWindows(allocator: &Allocator, dir_path: []const u8) !void {
}
}
-pub fn makeDirPosix(allocator: &Allocator, dir_path: []const u8) !void {
+pub fn makeDirPosix(allocator: *Allocator, dir_path: []const u8) !void {
const path_buf = try cstr.addNullByte(allocator, dir_path);
defer allocator.free(path_buf);
@@ -999,7 +999,7 @@ pub fn makeDirPosix(allocator: &Allocator, dir_path: []const u8) !void {
/// Calls makeDir recursively to make an entire path. Returns success if the path
/// already exists and is a directory.
-pub fn makePath(allocator: &Allocator, full_path: []const u8) !void {
+pub fn makePath(allocator: *Allocator, full_path: []const u8) !void {
const resolved_path = try path.resolve(allocator, full_path);
defer allocator.free(resolved_path);
@@ -1033,7 +1033,7 @@ pub fn makePath(allocator: &Allocator, full_path: []const u8) !void {
/// Returns ::error.DirNotEmpty if the directory is not empty.
/// To delete a directory recursively, see ::deleteTree
-pub fn deleteDir(allocator: &Allocator, dir_path: []const u8) !void {
+pub fn deleteDir(allocator: *Allocator, dir_path: []const u8) !void {
const path_buf = try allocator.alloc(u8, dir_path.len + 1);
defer allocator.free(path_buf);
@@ -1084,7 +1084,7 @@ const DeleteTreeError = error{
DirNotEmpty,
Unexpected,
};
-pub fn deleteTree(allocator: &Allocator, full_path: []const u8) DeleteTreeError!void {
+pub fn deleteTree(allocator: *Allocator, full_path: []const u8) DeleteTreeError!void {
start_over: while (true) {
var got_access_denied = false;
// First, try deleting the item as a file. This way we don't follow sym links.
@@ -1153,7 +1153,7 @@ pub fn deleteTree(allocator: &Allocator, full_path: []const u8) DeleteTreeError!
pub const Dir = struct {
fd: i32,
darwin_seek: darwin_seek_t,
- allocator: &Allocator,
+ allocator: *Allocator,
buf: []u8,
index: usize,
end_index: usize,
@@ -1180,7 +1180,7 @@ pub const Dir = struct {
};
};
- pub fn open(allocator: &Allocator, dir_path: []const u8) !Dir {
+ pub fn open(allocator: *Allocator, dir_path: []const u8) !Dir {
const fd = switch (builtin.os) {
Os.windows => @compileError("TODO support Dir.open for windows"),
Os.linux => try posixOpen(allocator, dir_path, posix.O_RDONLY | posix.O_DIRECTORY | posix.O_CLOEXEC, 0),
@@ -1206,14 +1206,14 @@ pub const Dir = struct {
};
}
- pub fn close(self: &Dir) void {
+ pub fn close(self: *Dir) void {
self.allocator.free(self.buf);
os.close(self.fd);
}
/// Memory such as file names referenced in this returned entry becomes invalid
/// with subsequent calls to next, as well as when this ::Dir is deinitialized.
- pub fn next(self: &Dir) !?Entry {
+ pub fn next(self: *Dir) !?Entry {
switch (builtin.os) {
Os.linux => return self.nextLinux(),
Os.macosx, Os.ios => return self.nextDarwin(),
@@ -1222,7 +1222,7 @@ pub const Dir = struct {
}
}
- fn nextDarwin(self: &Dir) !?Entry {
+ fn nextDarwin(self: *Dir) !?Entry {
start_over: while (true) {
if (self.index >= self.end_index) {
if (self.buf.len == 0) {
@@ -1248,7 +1248,7 @@ pub const Dir = struct {
break;
}
}
- const darwin_entry = @ptrCast(&align(1) posix.dirent, &self.buf[self.index]);
+ const darwin_entry = @ptrCast(*align(1) posix.dirent, &self.buf[self.index]);
const next_index = self.index + darwin_entry.d_reclen;
self.index = next_index;
@@ -1277,11 +1277,11 @@ pub const Dir = struct {
}
}
- fn nextWindows(self: &Dir) !?Entry {
+ fn nextWindows(self: *Dir) !?Entry {
@compileError("TODO support Dir.next for windows");
}
- fn nextLinux(self: &Dir) !?Entry {
+ fn nextLinux(self: *Dir) !?Entry {
start_over: while (true) {
if (self.index >= self.end_index) {
if (self.buf.len == 0) {
@@ -1307,7 +1307,7 @@ pub const Dir = struct {
break;
}
}
- const linux_entry = @ptrCast(&align(1) posix.dirent, &self.buf[self.index]);
+ const linux_entry = @ptrCast(*align(1) posix.dirent, &self.buf[self.index]);
const next_index = self.index + linux_entry.d_reclen;
self.index = next_index;
@@ -1337,7 +1337,7 @@ pub const Dir = struct {
}
};
-pub fn changeCurDir(allocator: &Allocator, dir_path: []const u8) !void {
+pub fn changeCurDir(allocator: *Allocator, dir_path: []const u8) !void {
const path_buf = try allocator.alloc(u8, dir_path.len + 1);
defer allocator.free(path_buf);
@@ -1361,7 +1361,7 @@ pub fn changeCurDir(allocator: &Allocator, dir_path: []const u8) !void {
}
/// Read value of a symbolic link.
-pub fn readLink(allocator: &Allocator, pathname: []const u8) ![]u8 {
+pub fn readLink(allocator: *Allocator, pathname: []const u8) ![]u8 {
const path_buf = try allocator.alloc(u8, pathname.len + 1);
defer allocator.free(path_buf);
@@ -1468,7 +1468,7 @@ pub const ArgIteratorPosix = struct {
};
}
- pub fn next(self: &ArgIteratorPosix) ?[]const u8 {
+ pub fn next(self: *ArgIteratorPosix) ?[]const u8 {
if (self.index == self.count) return null;
const s = raw[self.index];
@@ -1476,7 +1476,7 @@ pub const ArgIteratorPosix = struct {
return cstr.toSlice(s);
}
- pub fn skip(self: &ArgIteratorPosix) bool {
+ pub fn skip(self: *ArgIteratorPosix) bool {
if (self.index == self.count) return false;
self.index += 1;
@@ -1485,12 +1485,12 @@ pub const ArgIteratorPosix = struct {
/// This is marked as public but actually it's only meant to be used
/// internally by zig's startup code.
- pub var raw: []&u8 = undefined;
+ pub var raw: []*u8 = undefined;
};
pub const ArgIteratorWindows = struct {
index: usize,
- cmd_line: &const u8,
+ cmd_line: *const u8,
in_quote: bool,
quote_count: usize,
seen_quote_count: usize,
@@ -1501,7 +1501,7 @@ pub const ArgIteratorWindows = struct {
return initWithCmdLine(windows.GetCommandLineA());
}
- pub fn initWithCmdLine(cmd_line: &const u8) ArgIteratorWindows {
+ pub fn initWithCmdLine(cmd_line: *const u8) ArgIteratorWindows {
return ArgIteratorWindows{
.index = 0,
.cmd_line = cmd_line,
@@ -1512,7 +1512,7 @@ pub const ArgIteratorWindows = struct {
}
/// You must free the returned memory when done.
- pub fn next(self: &ArgIteratorWindows, allocator: &Allocator) ?(NextError![]u8) {
+ pub fn next(self: *ArgIteratorWindows, allocator: *Allocator) ?(NextError![]u8) {
// march forward over whitespace
while (true) : (self.index += 1) {
const byte = self.cmd_line[self.index];
@@ -1526,7 +1526,7 @@ pub const ArgIteratorWindows = struct {
return self.internalNext(allocator);
}
- pub fn skip(self: &ArgIteratorWindows) bool {
+ pub fn skip(self: *ArgIteratorWindows) bool {
// march forward over whitespace
while (true) : (self.index += 1) {
const byte = self.cmd_line[self.index];
@@ -1565,7 +1565,7 @@ pub const ArgIteratorWindows = struct {
}
}
- fn internalNext(self: &ArgIteratorWindows, allocator: &Allocator) NextError![]u8 {
+ fn internalNext(self: *ArgIteratorWindows, allocator: *Allocator) NextError![]u8 {
var buf = try Buffer.initSize(allocator, 0);
defer buf.deinit();
@@ -1609,14 +1609,14 @@ pub const ArgIteratorWindows = struct {
}
}
- fn emitBackslashes(self: &ArgIteratorWindows, buf: &Buffer, emit_count: usize) !void {
+ fn emitBackslashes(self: *ArgIteratorWindows, buf: *Buffer, emit_count: usize) !void {
var i: usize = 0;
while (i < emit_count) : (i += 1) {
try buf.appendByte('\\');
}
}
- fn countQuotes(cmd_line: &const u8) usize {
+ fn countQuotes(cmd_line: *const u8) usize {
var result: usize = 0;
var backslash_count: usize = 0;
var index: usize = 0;
@@ -1649,7 +1649,7 @@ pub const ArgIterator = struct {
pub const NextError = ArgIteratorWindows.NextError;
/// You must free the returned memory when done.
- pub fn next(self: &ArgIterator, allocator: &Allocator) ?(NextError![]u8) {
+ pub fn next(self: *ArgIterator, allocator: *Allocator) ?(NextError![]u8) {
if (builtin.os == Os.windows) {
return self.inner.next(allocator);
} else {
@@ -1658,13 +1658,13 @@ pub const ArgIterator = struct {
}
/// If you only are targeting posix you can call this and not need an allocator.
- pub fn nextPosix(self: &ArgIterator) ?[]const u8 {
+ pub fn nextPosix(self: *ArgIterator) ?[]const u8 {
return self.inner.next();
}
/// Parse past 1 argument without capturing it.
/// Returns `true` if skipped an arg, `false` if we are at the end.
- pub fn skip(self: &ArgIterator) bool {
+ pub fn skip(self: *ArgIterator) bool {
return self.inner.skip();
}
};
@@ -1674,7 +1674,7 @@ pub fn args() ArgIterator {
}
/// Caller must call freeArgs on result.
-pub fn argsAlloc(allocator: &mem.Allocator) ![]const []u8 {
+pub fn argsAlloc(allocator: *mem.Allocator) ![]const []u8 {
// TODO refactor to only make 1 allocation.
var it = args();
var contents = try Buffer.initSize(allocator, 0);
@@ -1711,12 +1711,12 @@ pub fn argsAlloc(allocator: &mem.Allocator) ![]const []u8 {
return result_slice_list;
}
-pub fn argsFree(allocator: &mem.Allocator, args_alloc: []const []u8) void {
+pub fn argsFree(allocator: *mem.Allocator, args_alloc: []const []u8) void {
var total_bytes: usize = 0;
for (args_alloc) |arg| {
total_bytes += @sizeOf([]u8) + arg.len;
}
- const unaligned_allocated_buf = @ptrCast(&const u8, args_alloc.ptr)[0..total_bytes];
+ const unaligned_allocated_buf = @ptrCast(*const u8, args_alloc.ptr)[0..total_bytes];
const aligned_allocated_buf = @alignCast(@alignOf([]u8), unaligned_allocated_buf);
return allocator.free(aligned_allocated_buf);
}
@@ -1765,7 +1765,7 @@ test "windows arg parsing" {
});
}
-fn testWindowsCmdLine(input_cmd_line: &const u8, expected_args: []const []const u8) void {
+fn testWindowsCmdLine(input_cmd_line: *const u8, expected_args: []const []const u8) void {
var it = ArgIteratorWindows.initWithCmdLine(input_cmd_line);
for (expected_args) |expected_arg| {
const arg = ??it.next(debug.global_allocator) catch unreachable;
@@ -1832,7 +1832,7 @@ test "openSelfExe" {
/// This function may return an error if the current executable
/// was deleted after spawning.
/// Caller owns returned memory.
-pub fn selfExePath(allocator: &mem.Allocator) ![]u8 {
+pub fn selfExePath(allocator: *mem.Allocator) ![]u8 {
switch (builtin.os) {
Os.linux => {
// If the currently executing binary has been deleted,
@@ -1875,7 +1875,7 @@ pub fn selfExePath(allocator: &mem.Allocator) ![]u8 {
/// Get the directory path that contains the current executable.
/// Caller owns returned memory.
-pub fn selfExeDirPath(allocator: &mem.Allocator) ![]u8 {
+pub fn selfExeDirPath(allocator: *mem.Allocator) ![]u8 {
switch (builtin.os) {
Os.linux => {
// If the currently executing binary has been deleted,
@@ -2001,7 +2001,7 @@ pub const PosixBindError = error{
};
/// addr is `&const T` where T is one of the sockaddr
-pub fn posixBind(fd: i32, addr: &const posix.sockaddr) PosixBindError!void {
+pub fn posixBind(fd: i32, addr: *const posix.sockaddr) PosixBindError!void {
const rc = posix.bind(fd, addr, @sizeOf(posix.sockaddr));
const err = posix.getErrno(rc);
switch (err) {
@@ -2096,7 +2096,7 @@ pub const PosixAcceptError = error{
Unexpected,
};
-pub fn posixAccept(fd: i32, addr: &posix.sockaddr, flags: u32) PosixAcceptError!i32 {
+pub fn posixAccept(fd: i32, addr: *posix.sockaddr, flags: u32) PosixAcceptError!i32 {
while (true) {
var sockaddr_size = u32(@sizeOf(posix.sockaddr));
const rc = posix.accept4(fd, addr, &sockaddr_size, flags);
@@ -2195,7 +2195,7 @@ pub const LinuxEpollCtlError = error{
Unexpected,
};
-pub fn linuxEpollCtl(epfd: i32, op: u32, fd: i32, event: &linux.epoll_event) LinuxEpollCtlError!void {
+pub fn linuxEpollCtl(epfd: i32, op: u32, fd: i32, event: *linux.epoll_event) LinuxEpollCtlError!void {
const rc = posix.epoll_ctl(epfd, op, fd, event);
const err = posix.getErrno(rc);
switch (err) {
@@ -2288,7 +2288,7 @@ pub const PosixConnectError = error{
Unexpected,
};
-pub fn posixConnect(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConnectError!void {
+pub fn posixConnect(sockfd: i32, sockaddr: *const posix.sockaddr) PosixConnectError!void {
while (true) {
const rc = posix.connect(sockfd, sockaddr, @sizeOf(posix.sockaddr));
const err = posix.getErrno(rc);
@@ -2319,7 +2319,7 @@ pub fn posixConnect(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConnectEr
/// Same as posixConnect except it is for blocking socket file descriptors.
/// It expects to receive EINPROGRESS.
-pub fn posixConnectAsync(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConnectError!void {
+pub fn posixConnectAsync(sockfd: i32, sockaddr: *const posix.sockaddr) PosixConnectError!void {
while (true) {
const rc = posix.connect(sockfd, sockaddr, @sizeOf(posix.sockaddr));
const err = posix.getErrno(rc);
@@ -2350,7 +2350,7 @@ pub fn posixConnectAsync(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConn
pub fn posixGetSockOptConnectError(sockfd: i32) PosixConnectError!void {
var err_code: i32 = undefined;
var size: u32 = @sizeOf(i32);
- const rc = posix.getsockopt(sockfd, posix.SOL_SOCKET, posix.SO_ERROR, @ptrCast(&u8, &err_code), &size);
+ const rc = posix.getsockopt(sockfd, posix.SOL_SOCKET, posix.SO_ERROR, @ptrCast(*u8, &err_code), &size);
assert(size == 4);
const err = posix.getErrno(rc);
switch (err) {
@@ -2401,13 +2401,13 @@ pub const Thread = struct {
},
builtin.Os.windows => struct {
handle: windows.HANDLE,
- alloc_start: &c_void,
+ alloc_start: *c_void,
heap_handle: windows.HANDLE,
},
else => @compileError("Unsupported OS"),
};
- pub fn wait(self: &const Thread) void {
+ pub fn wait(self: *const Thread) void {
if (use_pthreads) {
const err = c.pthread_join(self.data.handle, null);
switch (err) {
@@ -2473,7 +2473,7 @@ pub const SpawnThreadError = error{
/// fn startFn(@typeOf(context)) T
/// where T is u8, noreturn, void, or !void
/// caller must call wait on the returned thread
-pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread {
+pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread {
// TODO compile-time call graph analysis to determine stack upper bound
// https://github.com/ziglang/zig/issues/157
const default_stack_size = 8 * 1024 * 1024;
@@ -2491,7 +2491,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
if (@sizeOf(Context) == 0) {
return startFn({});
} else {
- return startFn(@ptrCast(&Context, @alignCast(@alignOf(Context), arg)).*);
+ return startFn(@ptrCast(*Context, @alignCast(@alignOf(Context), arg)).*);
}
}
};
@@ -2500,13 +2500,13 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
const byte_count = @alignOf(WinThread.OuterContext) + @sizeOf(WinThread.OuterContext);
const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) ?? return SpawnThreadError.OutOfMemory;
errdefer assert(windows.HeapFree(heap_handle, 0, bytes_ptr) != 0);
- const bytes = @ptrCast(&u8, bytes_ptr)[0..byte_count];
+ const bytes = @ptrCast(*u8, bytes_ptr)[0..byte_count];
const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext) catch unreachable;
outer_context.inner = context;
outer_context.thread.data.heap_handle = heap_handle;
outer_context.thread.data.alloc_start = bytes_ptr;
- const parameter = if (@sizeOf(Context) == 0) null else @ptrCast(&c_void, &outer_context.inner);
+ const parameter = if (@sizeOf(Context) == 0) null else @ptrCast(*c_void, &outer_context.inner);
outer_context.thread.data.handle = windows.CreateThread(null, default_stack_size, WinThread.threadMain, parameter, 0, null) ?? {
const err = windows.GetLastError();
return switch (err) {
@@ -2521,15 +2521,15 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
if (@sizeOf(Context) == 0) {
return startFn({});
} else {
- return startFn(@intToPtr(&const Context, ctx_addr).*);
+ return startFn(@intToPtr(*const Context, ctx_addr).*);
}
}
- extern fn posixThreadMain(ctx: ?&c_void) ?&c_void {
+ extern fn posixThreadMain(ctx: ?*c_void) ?*c_void {
if (@sizeOf(Context) == 0) {
_ = startFn({});
return null;
} else {
- _ = startFn(@ptrCast(&const Context, @alignCast(@alignOf(Context), ctx)).*);
+ _ = startFn(@ptrCast(*const Context, @alignCast(@alignOf(Context), ctx)).*);
return null;
}
}
@@ -2548,7 +2548,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
stack_end -= @sizeOf(Context);
stack_end -= stack_end % @alignOf(Context);
assert(stack_end >= stack_addr);
- const context_ptr = @alignCast(@alignOf(Context), @intToPtr(&Context, stack_end));
+ const context_ptr = @alignCast(@alignOf(Context), @intToPtr(*Context, stack_end));
context_ptr.* = context;
arg = stack_end;
}
@@ -2556,7 +2556,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
stack_end -= @sizeOf(Thread);
stack_end -= stack_end % @alignOf(Thread);
assert(stack_end >= stack_addr);
- const thread_ptr = @alignCast(@alignOf(Thread), @intToPtr(&Thread, stack_end));
+ const thread_ptr = @alignCast(@alignOf(Thread), @intToPtr(*Thread, stack_end));
thread_ptr.data.stack_addr = stack_addr;
thread_ptr.data.stack_len = mmap_len;
@@ -2572,9 +2572,9 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
// align to page
stack_end -= stack_end % os.page_size;
- assert(c.pthread_attr_setstack(&attr, @intToPtr(&c_void, stack_addr), stack_end - stack_addr) == 0);
+ assert(c.pthread_attr_setstack(&attr, @intToPtr(*c_void, stack_addr), stack_end - stack_addr) == 0);
- const err = c.pthread_create(&thread_ptr.data.handle, &attr, MainFuncs.posixThreadMain, @intToPtr(&c_void, arg));
+ const err = c.pthread_create(&thread_ptr.data.handle, &attr, MainFuncs.posixThreadMain, @intToPtr(*c_void, arg));
switch (err) {
0 => return thread_ptr,
posix.EAGAIN => return SpawnThreadError.SystemResources,
diff --git a/std/os/linux/index.zig b/std/os/linux/index.zig
index 5186ff32d3..3e7b836ac7 100644
--- a/std/os/linux/index.zig
+++ b/std/os/linux/index.zig
@@ -665,15 +665,15 @@ pub fn dup2(old: i32, new: i32) usize {
return syscall2(SYS_dup2, usize(old), usize(new));
}
-pub fn chdir(path: &const u8) usize {
+pub fn chdir(path: *const u8) usize {
return syscall1(SYS_chdir, @ptrToInt(path));
}
-pub fn chroot(path: &const u8) usize {
+pub fn chroot(path: *const u8) usize {
return syscall1(SYS_chroot, @ptrToInt(path));
}
-pub fn execve(path: &const u8, argv: &const ?&const u8, envp: &const ?&const u8) usize {
+pub fn execve(path: *const u8, argv: *const ?*const u8, envp: *const ?*const u8) usize {
return syscall3(SYS_execve, @ptrToInt(path), @ptrToInt(argv), @ptrToInt(envp));
}
@@ -681,15 +681,15 @@ pub fn fork() usize {
return syscall0(SYS_fork);
}
-pub fn futex_wait(uaddr: usize, futex_op: u32, val: i32, timeout: ?×pec) usize {
+pub fn futex_wait(uaddr: usize, futex_op: u32, val: i32, timeout: ?*timespec) usize {
return syscall4(SYS_futex, uaddr, futex_op, @bitCast(u32, val), @ptrToInt(timeout));
}
-pub fn getcwd(buf: &u8, size: usize) usize {
+pub fn getcwd(buf: *u8, size: usize) usize {
return syscall2(SYS_getcwd, @ptrToInt(buf), size);
}
-pub fn getdents(fd: i32, dirp: &u8, count: usize) usize {
+pub fn getdents(fd: i32, dirp: *u8, count: usize) usize {
return syscall3(SYS_getdents, usize(fd), @ptrToInt(dirp), count);
}
@@ -698,27 +698,27 @@ pub fn isatty(fd: i32) bool {
return syscall3(SYS_ioctl, usize(fd), TIOCGWINSZ, @ptrToInt(&wsz)) == 0;
}
-pub fn readlink(noalias path: &const u8, noalias buf_ptr: &u8, buf_len: usize) usize {
+pub fn readlink(noalias path: *const u8, noalias buf_ptr: *u8, buf_len: usize) usize {
return syscall3(SYS_readlink, @ptrToInt(path), @ptrToInt(buf_ptr), buf_len);
}
-pub fn mkdir(path: &const u8, mode: u32) usize {
+pub fn mkdir(path: *const u8, mode: u32) usize {
return syscall2(SYS_mkdir, @ptrToInt(path), mode);
}
-pub fn mount(special: &const u8, dir: &const u8, fstype: &const u8, flags: usize, data: usize) usize {
+pub fn mount(special: *const u8, dir: *const u8, fstype: *const u8, flags: usize, data: usize) usize {
return syscall5(SYS_mount, @ptrToInt(special), @ptrToInt(dir), @ptrToInt(fstype), flags, data);
}
-pub fn umount(special: &const u8) usize {
+pub fn umount(special: *const u8) usize {
return syscall2(SYS_umount2, @ptrToInt(special), 0);
}
-pub fn umount2(special: &const u8, flags: u32) usize {
+pub fn umount2(special: *const u8, flags: u32) usize {
return syscall2(SYS_umount2, @ptrToInt(special), flags);
}
-pub fn mmap(address: ?&u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
+pub fn mmap(address: ?*u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
return syscall6(SYS_mmap, @ptrToInt(address), length, prot, flags, usize(fd), @bitCast(usize, offset));
}
@@ -726,60 +726,60 @@ pub fn munmap(address: usize, length: usize) usize {
return syscall2(SYS_munmap, address, length);
}
-pub fn read(fd: i32, buf: &u8, count: usize) usize {
+pub fn read(fd: i32, buf: *u8, count: usize) usize {
return syscall3(SYS_read, usize(fd), @ptrToInt(buf), count);
}
-pub fn rmdir(path: &const u8) usize {
+pub fn rmdir(path: *const u8) usize {
return syscall1(SYS_rmdir, @ptrToInt(path));
}
-pub fn symlink(existing: &const u8, new: &const u8) usize {
+pub fn symlink(existing: *const u8, new: *const u8) usize {
return syscall2(SYS_symlink, @ptrToInt(existing), @ptrToInt(new));
}
-pub fn pread(fd: i32, buf: &u8, count: usize, offset: usize) usize {
+pub fn pread(fd: i32, buf: *u8, count: usize, offset: usize) usize {
return syscall4(SYS_pread, usize(fd), @ptrToInt(buf), count, offset);
}
-pub fn access(path: &const u8, mode: u32) usize {
+pub fn access(path: *const u8, mode: u32) usize {
return syscall2(SYS_access, @ptrToInt(path), mode);
}
-pub fn pipe(fd: &[2]i32) usize {
+pub fn pipe(fd: *[2]i32) usize {
return pipe2(fd, 0);
}
-pub fn pipe2(fd: &[2]i32, flags: usize) usize {
+pub fn pipe2(fd: *[2]i32, flags: usize) usize {
return syscall2(SYS_pipe2, @ptrToInt(fd), flags);
}
-pub fn write(fd: i32, buf: &const u8, count: usize) usize {
+pub fn write(fd: i32, buf: *const u8, count: usize) usize {
return syscall3(SYS_write, usize(fd), @ptrToInt(buf), count);
}
-pub fn pwrite(fd: i32, buf: &const u8, count: usize, offset: usize) usize {
+pub fn pwrite(fd: i32, buf: *const u8, count: usize, offset: usize) usize {
return syscall4(SYS_pwrite, usize(fd), @ptrToInt(buf), count, offset);
}
-pub fn rename(old: &const u8, new: &const u8) usize {
+pub fn rename(old: *const u8, new: *const u8) usize {
return syscall2(SYS_rename, @ptrToInt(old), @ptrToInt(new));
}
-pub fn open(path: &const u8, flags: u32, perm: usize) usize {
+pub fn open(path: *const u8, flags: u32, perm: usize) usize {
return syscall3(SYS_open, @ptrToInt(path), flags, perm);
}
-pub fn create(path: &const u8, perm: usize) usize {
+pub fn create(path: *const u8, perm: usize) usize {
return syscall2(SYS_creat, @ptrToInt(path), perm);
}
-pub fn openat(dirfd: i32, path: &const u8, flags: usize, mode: usize) usize {
+pub fn openat(dirfd: i32, path: *const u8, flags: usize, mode: usize) usize {
return syscall4(SYS_openat, usize(dirfd), @ptrToInt(path), flags, mode);
}
/// See also `clone` (from the arch-specific include)
-pub fn clone5(flags: usize, child_stack_ptr: usize, parent_tid: &i32, child_tid: &i32, newtls: usize) usize {
+pub fn clone5(flags: usize, child_stack_ptr: usize, parent_tid: *i32, child_tid: *i32, newtls: usize) usize {
return syscall5(SYS_clone, flags, child_stack_ptr, @ptrToInt(parent_tid), @ptrToInt(child_tid), newtls);
}
@@ -801,7 +801,7 @@ pub fn exit(status: i32) noreturn {
unreachable;
}
-pub fn getrandom(buf: &u8, count: usize, flags: u32) usize {
+pub fn getrandom(buf: *u8, count: usize, flags: u32) usize {
return syscall3(SYS_getrandom, @ptrToInt(buf), count, usize(flags));
}
@@ -809,15 +809,15 @@ pub fn kill(pid: i32, sig: i32) usize {
return syscall2(SYS_kill, @bitCast(usize, isize(pid)), usize(sig));
}
-pub fn unlink(path: &const u8) usize {
+pub fn unlink(path: *const u8) usize {
return syscall1(SYS_unlink, @ptrToInt(path));
}
-pub fn waitpid(pid: i32, status: &i32, options: i32) usize {
+pub fn waitpid(pid: i32, status: *i32, options: i32) usize {
return syscall4(SYS_wait4, @bitCast(usize, isize(pid)), @ptrToInt(status), @bitCast(usize, isize(options)), 0);
}
-pub fn clock_gettime(clk_id: i32, tp: ×pec) usize {
+pub fn clock_gettime(clk_id: i32, tp: *timespec) usize {
if (VDSO_CGT_SYM.len != 0) {
const f = @atomicLoad(@typeOf(init_vdso_clock_gettime), &vdso_clock_gettime, builtin.AtomicOrder.Unordered);
if (@ptrToInt(f) != 0) {
@@ -831,7 +831,7 @@ pub fn clock_gettime(clk_id: i32, tp: ×pec) usize {
return syscall2(SYS_clock_gettime, @bitCast(usize, isize(clk_id)), @ptrToInt(tp));
}
var vdso_clock_gettime = init_vdso_clock_gettime;
-extern fn init_vdso_clock_gettime(clk: i32, ts: ×pec) usize {
+extern fn init_vdso_clock_gettime(clk: i32, ts: *timespec) usize {
const addr = vdso.lookup(VDSO_CGT_VER, VDSO_CGT_SYM);
var f = @intToPtr(@typeOf(init_vdso_clock_gettime), addr);
_ = @cmpxchgStrong(@typeOf(init_vdso_clock_gettime), &vdso_clock_gettime, init_vdso_clock_gettime, f, builtin.AtomicOrder.Monotonic, builtin.AtomicOrder.Monotonic);
@@ -839,23 +839,23 @@ extern fn init_vdso_clock_gettime(clk: i32, ts: ×pec) usize {
return f(clk, ts);
}
-pub fn clock_getres(clk_id: i32, tp: ×pec) usize {
+pub fn clock_getres(clk_id: i32, tp: *timespec) usize {
return syscall2(SYS_clock_getres, @bitCast(usize, isize(clk_id)), @ptrToInt(tp));
}
-pub fn clock_settime(clk_id: i32, tp: &const timespec) usize {
+pub fn clock_settime(clk_id: i32, tp: *const timespec) usize {
return syscall2(SYS_clock_settime, @bitCast(usize, isize(clk_id)), @ptrToInt(tp));
}
-pub fn gettimeofday(tv: &timeval, tz: &timezone) usize {
+pub fn gettimeofday(tv: *timeval, tz: *timezone) usize {
return syscall2(SYS_gettimeofday, @ptrToInt(tv), @ptrToInt(tz));
}
-pub fn settimeofday(tv: &const timeval, tz: &const timezone) usize {
+pub fn settimeofday(tv: *const timeval, tz: *const timezone) usize {
return syscall2(SYS_settimeofday, @ptrToInt(tv), @ptrToInt(tz));
}
-pub fn nanosleep(req: &const timespec, rem: ?×pec) usize {
+pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize {
return syscall2(SYS_nanosleep, @ptrToInt(req), @ptrToInt(rem));
}
@@ -899,11 +899,11 @@ pub fn setegid(egid: u32) usize {
return syscall1(SYS_setegid, egid);
}
-pub fn getresuid(ruid: &u32, euid: &u32, suid: &u32) usize {
+pub fn getresuid(ruid: *u32, euid: *u32, suid: *u32) usize {
return syscall3(SYS_getresuid, @ptrToInt(ruid), @ptrToInt(euid), @ptrToInt(suid));
}
-pub fn getresgid(rgid: &u32, egid: &u32, sgid: &u32) usize {
+pub fn getresgid(rgid: *u32, egid: *u32, sgid: *u32) usize {
return syscall3(SYS_getresgid, @ptrToInt(rgid), @ptrToInt(egid), @ptrToInt(sgid));
}
@@ -915,11 +915,11 @@ pub fn setresgid(rgid: u32, egid: u32, sgid: u32) usize {
return syscall3(SYS_setresgid, rgid, egid, sgid);
}
-pub fn getgroups(size: usize, list: &u32) usize {
+pub fn getgroups(size: usize, list: *u32) usize {
return syscall2(SYS_getgroups, size, @ptrToInt(list));
}
-pub fn setgroups(size: usize, list: &const u32) usize {
+pub fn setgroups(size: usize, list: *const u32) usize {
return syscall2(SYS_setgroups, size, @ptrToInt(list));
}
@@ -927,11 +927,11 @@ pub fn getpid() i32 {
return @bitCast(i32, u32(syscall0(SYS_getpid)));
}
-pub fn sigprocmask(flags: u32, noalias set: &const sigset_t, noalias oldset: ?&sigset_t) usize {
+pub fn sigprocmask(flags: u32, noalias set: *const sigset_t, noalias oldset: ?*sigset_t) usize {
return syscall4(SYS_rt_sigprocmask, flags, @ptrToInt(set), @ptrToInt(oldset), NSIG / 8);
}
-pub fn sigaction(sig: u6, noalias act: &const Sigaction, noalias oact: ?&Sigaction) usize {
+pub fn sigaction(sig: u6, noalias act: *const Sigaction, noalias oact: ?*Sigaction) usize {
assert(sig >= 1);
assert(sig != SIGKILL);
assert(sig != SIGSTOP);
@@ -942,8 +942,8 @@ pub fn sigaction(sig: u6, noalias act: &const Sigaction, noalias oact: ?&Sigacti
.restorer = @ptrCast(extern fn () void, restore_rt),
};
var ksa_old: k_sigaction = undefined;
- @memcpy(@ptrCast(&u8, &ksa.mask), @ptrCast(&const u8, &act.mask), 8);
- const result = syscall4(SYS_rt_sigaction, sig, @ptrToInt(&ksa), @ptrToInt(&ksa_old), @sizeOf(@typeOf(ksa.mask)));
+ @memcpy(@ptrCast(*u8, *ksa.mask), @ptrCast(*const u8, *act.mask), 8);
+ const result = syscall4(SYS_rt_sigaction, sig, @ptrToInt(*ksa), @ptrToInt(*ksa_old), @sizeOf(@typeOf(ksa.mask)));
const err = getErrno(result);
if (err != 0) {
return result;
@@ -951,7 +951,7 @@ pub fn sigaction(sig: u6, noalias act: &const Sigaction, noalias oact: ?&Sigacti
if (oact) |old| {
old.handler = ksa_old.handler;
old.flags = @truncate(u32, ksa_old.flags);
- @memcpy(@ptrCast(&u8, &old.mask), @ptrCast(&const u8, &ksa_old.mask), @sizeOf(@typeOf(ksa_old.mask)));
+ @memcpy(@ptrCast(*u8, *old.mask), @ptrCast(*const u8, *ksa_old.mask), @sizeOf(@typeOf(ksa_old.mask)));
}
return 0;
}
@@ -989,24 +989,24 @@ pub fn raise(sig: i32) usize {
return ret;
}
-fn blockAllSignals(set: &sigset_t) void {
+fn blockAllSignals(set: *sigset_t) void {
_ = syscall4(SYS_rt_sigprocmask, SIG_BLOCK, @ptrToInt(&all_mask), @ptrToInt(set), NSIG / 8);
}
-fn blockAppSignals(set: &sigset_t) void {
+fn blockAppSignals(set: *sigset_t) void {
_ = syscall4(SYS_rt_sigprocmask, SIG_BLOCK, @ptrToInt(&app_mask), @ptrToInt(set), NSIG / 8);
}
-fn restoreSignals(set: &sigset_t) void {
+fn restoreSignals(set: *sigset_t) void {
_ = syscall4(SYS_rt_sigprocmask, SIG_SETMASK, @ptrToInt(set), 0, NSIG / 8);
}
-pub fn sigaddset(set: &sigset_t, sig: u6) void {
+pub fn sigaddset(set: *sigset_t, sig: u6) void {
const s = sig - 1;
(set.*)[usize(s) / usize.bit_count] |= usize(1) << (s & (usize.bit_count - 1));
}
-pub fn sigismember(set: &const sigset_t, sig: u6) bool {
+pub fn sigismember(set: *const sigset_t, sig: u6) bool {
const s = sig - 1;
return ((set.*)[usize(s) / usize.bit_count] & (usize(1) << (s & (usize.bit_count - 1)))) != 0;
}
@@ -1036,15 +1036,15 @@ pub const sockaddr_in6 = extern struct {
};
pub const iovec = extern struct {
- iov_base: &u8,
+ iov_base: *u8,
iov_len: usize,
};
-pub fn getsockname(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t) usize {
+pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
return syscall3(SYS_getsockname, usize(fd), @ptrToInt(addr), @ptrToInt(len));
}
-pub fn getpeername(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t) usize {
+pub fn getpeername(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
return syscall3(SYS_getpeername, usize(fd), @ptrToInt(addr), @ptrToInt(len));
}
@@ -1052,27 +1052,27 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize {
return syscall3(SYS_socket, domain, socket_type, protocol);
}
-pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: &const u8, optlen: socklen_t) usize {
+pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: *const u8, optlen: socklen_t) usize {
return syscall5(SYS_setsockopt, usize(fd), level, optname, usize(optval), @ptrToInt(optlen));
}
-pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: &u8, noalias optlen: &socklen_t) usize {
+pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: *u8, noalias optlen: *socklen_t) usize {
return syscall5(SYS_getsockopt, usize(fd), level, optname, @ptrToInt(optval), @ptrToInt(optlen));
}
-pub fn sendmsg(fd: i32, msg: &const msghdr, flags: u32) usize {
+pub fn sendmsg(fd: i32, msg: *const msghdr, flags: u32) usize {
return syscall3(SYS_sendmsg, usize(fd), @ptrToInt(msg), flags);
}
-pub fn connect(fd: i32, addr: &const sockaddr, len: socklen_t) usize {
+pub fn connect(fd: i32, addr: *const sockaddr, len: socklen_t) usize {
return syscall3(SYS_connect, usize(fd), @ptrToInt(addr), usize(len));
}
-pub fn recvmsg(fd: i32, msg: &msghdr, flags: u32) usize {
+pub fn recvmsg(fd: i32, msg: *msghdr, flags: u32) usize {
return syscall3(SYS_recvmsg, usize(fd), @ptrToInt(msg), flags);
}
-pub fn recvfrom(fd: i32, noalias buf: &u8, len: usize, flags: u32, noalias addr: ?&sockaddr, noalias alen: ?&socklen_t) usize {
+pub fn recvfrom(fd: i32, noalias buf: *u8, len: usize, flags: u32, noalias addr: ?*sockaddr, noalias alen: ?*socklen_t) usize {
return syscall6(SYS_recvfrom, usize(fd), @ptrToInt(buf), len, flags, @ptrToInt(addr), @ptrToInt(alen));
}
@@ -1080,7 +1080,7 @@ pub fn shutdown(fd: i32, how: i32) usize {
return syscall2(SYS_shutdown, usize(fd), usize(how));
}
-pub fn bind(fd: i32, addr: &const sockaddr, len: socklen_t) usize {
+pub fn bind(fd: i32, addr: *const sockaddr, len: socklen_t) usize {
return syscall3(SYS_bind, usize(fd), @ptrToInt(addr), usize(len));
}
@@ -1088,79 +1088,79 @@ pub fn listen(fd: i32, backlog: u32) usize {
return syscall2(SYS_listen, usize(fd), backlog);
}
-pub fn sendto(fd: i32, buf: &const u8, len: usize, flags: u32, addr: ?&const sockaddr, alen: socklen_t) usize {
+pub fn sendto(fd: i32, buf: *const u8, len: usize, flags: u32, addr: ?*const sockaddr, alen: socklen_t) usize {
return syscall6(SYS_sendto, usize(fd), @ptrToInt(buf), len, flags, @ptrToInt(addr), usize(alen));
}
pub fn socketpair(domain: i32, socket_type: i32, protocol: i32, fd: [2]i32) usize {
- return syscall4(SYS_socketpair, usize(domain), usize(socket_type), usize(protocol), @ptrToInt(&fd[0]));
+ return syscall4(SYS_socketpair, usize(domain), usize(socket_type), usize(protocol), @ptrToInt(*fd[0]));
}
-pub fn accept(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t) usize {
+pub fn accept(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
return accept4(fd, addr, len, 0);
}
-pub fn accept4(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t, flags: u32) usize {
+pub fn accept4(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t, flags: u32) usize {
return syscall4(SYS_accept4, usize(fd), @ptrToInt(addr), @ptrToInt(len), flags);
}
-pub fn fstat(fd: i32, stat_buf: &Stat) usize {
+pub fn fstat(fd: i32, stat_buf: *Stat) usize {
return syscall2(SYS_fstat, usize(fd), @ptrToInt(stat_buf));
}
-pub fn stat(pathname: &const u8, statbuf: &Stat) usize {
+pub fn stat(pathname: *const u8, statbuf: *Stat) usize {
return syscall2(SYS_stat, @ptrToInt(pathname), @ptrToInt(statbuf));
}
-pub fn lstat(pathname: &const u8, statbuf: &Stat) usize {
+pub fn lstat(pathname: *const u8, statbuf: *Stat) usize {
return syscall2(SYS_lstat, @ptrToInt(pathname), @ptrToInt(statbuf));
}
-pub fn listxattr(path: &const u8, list: &u8, size: usize) usize {
+pub fn listxattr(path: *const u8, list: *u8, size: usize) usize {
return syscall3(SYS_listxattr, @ptrToInt(path), @ptrToInt(list), size);
}
-pub fn llistxattr(path: &const u8, list: &u8, size: usize) usize {
+pub fn llistxattr(path: *const u8, list: *u8, size: usize) usize {
return syscall3(SYS_llistxattr, @ptrToInt(path), @ptrToInt(list), size);
}
-pub fn flistxattr(fd: usize, list: &u8, size: usize) usize {
+pub fn flistxattr(fd: usize, list: *u8, size: usize) usize {
return syscall3(SYS_flistxattr, fd, @ptrToInt(list), size);
}
-pub fn getxattr(path: &const u8, name: &const u8, value: &void, size: usize) usize {
+pub fn getxattr(path: *const u8, name: *const u8, value: *void, size: usize) usize {
return syscall4(SYS_getxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size);
}
-pub fn lgetxattr(path: &const u8, name: &const u8, value: &void, size: usize) usize {
+pub fn lgetxattr(path: *const u8, name: *const u8, value: *void, size: usize) usize {
return syscall4(SYS_lgetxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size);
}
-pub fn fgetxattr(fd: usize, name: &const u8, value: &void, size: usize) usize {
+pub fn fgetxattr(fd: usize, name: *const u8, value: *void, size: usize) usize {
return syscall4(SYS_lgetxattr, fd, @ptrToInt(name), @ptrToInt(value), size);
}
-pub fn setxattr(path: &const u8, name: &const u8, value: &const void, size: usize, flags: usize) usize {
+pub fn setxattr(path: *const u8, name: *const u8, value: *const void, size: usize, flags: usize) usize {
return syscall5(SYS_setxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size, flags);
}
-pub fn lsetxattr(path: &const u8, name: &const u8, value: &const void, size: usize, flags: usize) usize {
+pub fn lsetxattr(path: *const u8, name: *const u8, value: *const void, size: usize, flags: usize) usize {
return syscall5(SYS_lsetxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size, flags);
}
-pub fn fsetxattr(fd: usize, name: &const u8, value: &const void, size: usize, flags: usize) usize {
+pub fn fsetxattr(fd: usize, name: *const u8, value: *const void, size: usize, flags: usize) usize {
return syscall5(SYS_fsetxattr, fd, @ptrToInt(name), @ptrToInt(value), size, flags);
}
-pub fn removexattr(path: &const u8, name: &const u8) usize {
+pub fn removexattr(path: *const u8, name: *const u8) usize {
return syscall2(SYS_removexattr, @ptrToInt(path), @ptrToInt(name));
}
-pub fn lremovexattr(path: &const u8, name: &const u8) usize {
+pub fn lremovexattr(path: *const u8, name: *const u8) usize {
return syscall2(SYS_lremovexattr, @ptrToInt(path), @ptrToInt(name));
}
-pub fn fremovexattr(fd: usize, name: &const u8) usize {
+pub fn fremovexattr(fd: usize, name: *const u8) usize {
return syscall2(SYS_fremovexattr, fd, @ptrToInt(name));
}
@@ -1184,11 +1184,11 @@ pub fn epoll_create1(flags: usize) usize {
return syscall1(SYS_epoll_create1, flags);
}
-pub fn epoll_ctl(epoll_fd: i32, op: u32, fd: i32, ev: &epoll_event) usize {
+pub fn epoll_ctl(epoll_fd: i32, op: u32, fd: i32, ev: *epoll_event) usize {
return syscall4(SYS_epoll_ctl, usize(epoll_fd), usize(op), usize(fd), @ptrToInt(ev));
}
-pub fn epoll_wait(epoll_fd: i32, events: &epoll_event, maxevents: u32, timeout: i32) usize {
+pub fn epoll_wait(epoll_fd: i32, events: *epoll_event, maxevents: u32, timeout: i32) usize {
return syscall4(SYS_epoll_wait, usize(epoll_fd), @ptrToInt(events), usize(maxevents), usize(timeout));
}
@@ -1201,11 +1201,11 @@ pub const itimerspec = extern struct {
it_value: timespec,
};
-pub fn timerfd_gettime(fd: i32, curr_value: &itimerspec) usize {
+pub fn timerfd_gettime(fd: i32, curr_value: *itimerspec) usize {
return syscall2(SYS_timerfd_gettime, usize(fd), @ptrToInt(curr_value));
}
-pub fn timerfd_settime(fd: i32, flags: u32, new_value: &const itimerspec, old_value: ?&itimerspec) usize {
+pub fn timerfd_settime(fd: i32, flags: u32, new_value: *const itimerspec, old_value: ?*itimerspec) usize {
return syscall4(SYS_timerfd_settime, usize(fd), usize(flags), @ptrToInt(new_value), @ptrToInt(old_value));
}
@@ -1300,8 +1300,8 @@ pub fn CAP_TO_INDEX(cap: u8) u8 {
}
pub const cap_t = extern struct {
- hdrp: &cap_user_header_t,
- datap: &cap_user_data_t,
+ hdrp: *cap_user_header_t,
+ datap: *cap_user_data_t,
};
pub const cap_user_header_t = extern struct {
@@ -1319,11 +1319,11 @@ pub fn unshare(flags: usize) usize {
return syscall1(SYS_unshare, usize(flags));
}
-pub fn capget(hdrp: &cap_user_header_t, datap: &cap_user_data_t) usize {
+pub fn capget(hdrp: *cap_user_header_t, datap: *cap_user_data_t) usize {
return syscall2(SYS_capget, @ptrToInt(hdrp), @ptrToInt(datap));
}
-pub fn capset(hdrp: &cap_user_header_t, datap: &const cap_user_data_t) usize {
+pub fn capset(hdrp: *cap_user_header_t, datap: *const cap_user_data_t) usize {
return syscall2(SYS_capset, @ptrToInt(hdrp), @ptrToInt(datap));
}
diff --git a/std/os/linux/vdso.zig b/std/os/linux/vdso.zig
index 8e0a285841..1317da6388 100644
--- a/std/os/linux/vdso.zig
+++ b/std/os/linux/vdso.zig
@@ -8,11 +8,11 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
const vdso_addr = std.os.linux_aux_raw[std.elf.AT_SYSINFO_EHDR];
if (vdso_addr == 0) return 0;
- const eh = @intToPtr(&elf.Ehdr, vdso_addr);
+ const eh = @intToPtr(*elf.Ehdr, vdso_addr);
var ph_addr: usize = vdso_addr + eh.e_phoff;
- const ph = @intToPtr(&elf.Phdr, ph_addr);
+ const ph = @intToPtr(*elf.Phdr, ph_addr);
- var maybe_dynv: ?&usize = null;
+ var maybe_dynv: ?*usize = null;
var base: usize = @maxValue(usize);
{
var i: usize = 0;
@@ -20,10 +20,10 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
i += 1;
ph_addr += eh.e_phentsize;
}) {
- const this_ph = @intToPtr(&elf.Phdr, ph_addr);
+ const this_ph = @intToPtr(*elf.Phdr, ph_addr);
switch (this_ph.p_type) {
elf.PT_LOAD => base = vdso_addr + this_ph.p_offset - this_ph.p_vaddr,
- elf.PT_DYNAMIC => maybe_dynv = @intToPtr(&usize, vdso_addr + this_ph.p_offset),
+ elf.PT_DYNAMIC => maybe_dynv = @intToPtr(*usize, vdso_addr + this_ph.p_offset),
else => {},
}
}
@@ -31,22 +31,22 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
const dynv = maybe_dynv ?? return 0;
if (base == @maxValue(usize)) return 0;
- var maybe_strings: ?&u8 = null;
- var maybe_syms: ?&elf.Sym = null;
- var maybe_hashtab: ?&linux.Elf_Symndx = null;
- var maybe_versym: ?&u16 = null;
- var maybe_verdef: ?&elf.Verdef = null;
+ var maybe_strings: ?*u8 = null;
+ var maybe_syms: ?*elf.Sym = null;
+ var maybe_hashtab: ?*linux.Elf_Symndx = null;
+ var maybe_versym: ?*u16 = null;
+ var maybe_verdef: ?*elf.Verdef = null;
{
var i: usize = 0;
while (dynv[i] != 0) : (i += 2) {
const p = base + dynv[i + 1];
switch (dynv[i]) {
- elf.DT_STRTAB => maybe_strings = @intToPtr(&u8, p),
- elf.DT_SYMTAB => maybe_syms = @intToPtr(&elf.Sym, p),
- elf.DT_HASH => maybe_hashtab = @intToPtr(&linux.Elf_Symndx, p),
- elf.DT_VERSYM => maybe_versym = @intToPtr(&u16, p),
- elf.DT_VERDEF => maybe_verdef = @intToPtr(&elf.Verdef, p),
+ elf.DT_STRTAB => maybe_strings = @intToPtr(*u8, p),
+ elf.DT_SYMTAB => maybe_syms = @intToPtr(*elf.Sym, p),
+ elf.DT_HASH => maybe_hashtab = @intToPtr(*linux.Elf_Symndx, p),
+ elf.DT_VERSYM => maybe_versym = @intToPtr(*u16, p),
+ elf.DT_VERDEF => maybe_verdef = @intToPtr(*elf.Verdef, p),
else => {},
}
}
@@ -76,7 +76,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
return 0;
}
-fn checkver(def_arg: &elf.Verdef, vsym_arg: i32, vername: []const u8, strings: &u8) bool {
+fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: *u8) bool {
var def = def_arg;
const vsym = @bitCast(u32, vsym_arg) & 0x7fff;
while (true) {
@@ -84,8 +84,8 @@ fn checkver(def_arg: &elf.Verdef, vsym_arg: i32, vername: []const u8, strings: &
break;
if (def.vd_next == 0)
return false;
- def = @intToPtr(&elf.Verdef, @ptrToInt(def) + def.vd_next);
+ def = @intToPtr(*elf.Verdef, @ptrToInt(def) + def.vd_next);
}
- const aux = @intToPtr(&elf.Verdaux, @ptrToInt(def) + def.vd_aux);
+ const aux = @intToPtr(*elf.Verdaux, @ptrToInt(def) + def.vd_aux);
return mem.eql(u8, vername, cstr.toSliceConst(&strings[aux.vda_name]));
}
diff --git a/std/os/linux/x86_64.zig b/std/os/linux/x86_64.zig
index b43a642038..9a90e64757 100644
--- a/std/os/linux/x86_64.zig
+++ b/std/os/linux/x86_64.zig
@@ -463,7 +463,7 @@ pub fn syscall6(
}
/// This matches the libc clone function.
-pub extern fn clone(func: extern fn (arg: usize) u8, stack: usize, flags: usize, arg: usize, ptid: &i32, tls: usize, ctid: &i32) usize;
+pub extern fn clone(func: extern fn (arg: usize) u8, stack: usize, flags: usize, arg: usize, ptid: *i32, tls: usize, ctid: *i32) usize;
pub nakedcc fn restore_rt() void {
return asm volatile ("syscall"
@@ -474,12 +474,12 @@ pub nakedcc fn restore_rt() void {
}
pub const msghdr = extern struct {
- msg_name: &u8,
+ msg_name: *u8,
msg_namelen: socklen_t,
- msg_iov: &iovec,
+ msg_iov: *iovec,
msg_iovlen: i32,
__pad1: i32,
- msg_control: &u8,
+ msg_control: *u8,
msg_controllen: socklen_t,
__pad2: socklen_t,
msg_flags: i32,
diff --git a/std/os/path.zig b/std/os/path.zig
index 162faffc42..4df6179bf5 100644
--- a/std/os/path.zig
+++ b/std/os/path.zig
@@ -32,7 +32,7 @@ pub fn isSep(byte: u8) bool {
/// Naively combines a series of paths with the native path seperator.
/// Allocates memory for the result, which must be freed by the caller.
-pub fn join(allocator: &Allocator, paths: ...) ![]u8 {
+pub fn join(allocator: *Allocator, paths: ...) ![]u8 {
if (is_windows) {
return joinWindows(allocator, paths);
} else {
@@ -40,11 +40,11 @@ pub fn join(allocator: &Allocator, paths: ...) ![]u8 {
}
}
-pub fn joinWindows(allocator: &Allocator, paths: ...) ![]u8 {
+pub fn joinWindows(allocator: *Allocator, paths: ...) ![]u8 {
return mem.join(allocator, sep_windows, paths);
}
-pub fn joinPosix(allocator: &Allocator, paths: ...) ![]u8 {
+pub fn joinPosix(allocator: *Allocator, paths: ...) ![]u8 {
return mem.join(allocator, sep_posix, paths);
}
@@ -310,7 +310,7 @@ fn asciiEqlIgnoreCase(s1: []const u8, s2: []const u8) bool {
}
/// Converts the command line arguments into a slice and calls `resolveSlice`.
-pub fn resolve(allocator: &Allocator, args: ...) ![]u8 {
+pub fn resolve(allocator: *Allocator, args: ...) ![]u8 {
var paths: [args.len][]const u8 = undefined;
comptime var arg_i = 0;
inline while (arg_i < args.len) : (arg_i += 1) {
@@ -320,7 +320,7 @@ pub fn resolve(allocator: &Allocator, args: ...) ![]u8 {
}
/// On Windows, this calls `resolveWindows` and on POSIX it calls `resolvePosix`.
-pub fn resolveSlice(allocator: &Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolveSlice(allocator: *Allocator, paths: []const []const u8) ![]u8 {
if (is_windows) {
return resolveWindows(allocator, paths);
} else {
@@ -334,7 +334,7 @@ pub fn resolveSlice(allocator: &Allocator, paths: []const []const u8) ![]u8 {
/// If all paths are relative it uses the current working directory as a starting point.
/// Each drive has its own current working directory.
/// Path separators are canonicalized to '\\' and drives are canonicalized to capital letters.
-pub fn resolveWindows(allocator: &Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) {
assert(is_windows); // resolveWindows called on non windows can't use getCwd
return os.getCwd(allocator);
@@ -513,7 +513,7 @@ pub fn resolveWindows(allocator: &Allocator, paths: []const []const u8) ![]u8 {
/// It resolves "." and "..".
/// The result does not have a trailing path separator.
/// If all paths are relative it uses the current working directory as a starting point.
-pub fn resolvePosix(allocator: &Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) {
assert(!is_windows); // resolvePosix called on windows can't use getCwd
return os.getCwd(allocator);
@@ -883,7 +883,7 @@ fn testBasenameWindows(input: []const u8, expected_output: []const u8) void {
/// resolve to the same path (after calling `resolve` on each), a zero-length
/// string is returned.
/// On Windows this canonicalizes the drive to a capital letter and paths to `\\`.
-pub fn relative(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relative(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
if (is_windows) {
return relativeWindows(allocator, from, to);
} else {
@@ -891,7 +891,7 @@ pub fn relative(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 {
}
}
-pub fn relativeWindows(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
const resolved_from = try resolveWindows(allocator, [][]const u8{from});
defer allocator.free(resolved_from);
@@ -964,7 +964,7 @@ pub fn relativeWindows(allocator: &Allocator, from: []const u8, to: []const u8)
return []u8{};
}
-pub fn relativePosix(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relativePosix(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
const resolved_from = try resolvePosix(allocator, [][]const u8{from});
defer allocator.free(resolved_from);
@@ -1063,7 +1063,7 @@ fn testRelativeWindows(from: []const u8, to: []const u8, expected_output: []cons
/// Expands all symbolic links and resolves references to `.`, `..`, and
/// extra `/` characters in ::pathname.
/// Caller must deallocate result.
-pub fn real(allocator: &Allocator, pathname: []const u8) ![]u8 {
+pub fn real(allocator: *Allocator, pathname: []const u8) ![]u8 {
switch (builtin.os) {
Os.windows => {
const pathname_buf = try allocator.alloc(u8, pathname.len + 1);
diff --git a/std/os/test.zig b/std/os/test.zig
index 4dfe76224a..4aa3535829 100644
--- a/std/os/test.zig
+++ b/std/os/test.zig
@@ -63,7 +63,7 @@ fn start1(ctx: void) u8 {
return 0;
}
-fn start2(ctx: &i32) u8 {
+fn start2(ctx: *i32) u8 {
_ = @atomicRmw(i32, ctx, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
return 0;
}
diff --git a/std/os/time.zig b/std/os/time.zig
index 9a7c682483..8629504323 100644
--- a/std/os/time.zig
+++ b/std/os/time.zig
@@ -200,7 +200,7 @@ pub const Timer = struct {
}
/// Reads the timer value since start or the last reset in nanoseconds
- pub fn read(self: &Timer) u64 {
+ pub fn read(self: *Timer) u64 {
var clock = clockNative() - self.start_time;
return switch (builtin.os) {
Os.windows => @divFloor(clock * ns_per_s, self.frequency),
@@ -211,12 +211,12 @@ pub const Timer = struct {
}
/// Resets the timer value to 0/now.
- pub fn reset(self: &Timer) void {
+ pub fn reset(self: *Timer) void {
self.start_time = clockNative();
}
/// Returns the current value of the timer in nanoseconds, then resets it
- pub fn lap(self: &Timer) u64 {
+ pub fn lap(self: *Timer) u64 {
var now = clockNative();
var lap_time = self.read();
self.start_time = now;
diff --git a/std/os/windows/index.zig b/std/os/windows/index.zig
index 264ea391c4..85f69836d5 100644
--- a/std/os/windows/index.zig
+++ b/std/os/windows/index.zig
@@ -1,7 +1,7 @@
pub const ERROR = @import("error.zig");
pub extern "advapi32" stdcallcc fn CryptAcquireContextA(
- phProv: &HCRYPTPROV,
+ phProv: *HCRYPTPROV,
pszContainer: ?LPCSTR,
pszProvider: ?LPCSTR,
dwProvType: DWORD,
@@ -10,13 +10,13 @@ pub extern "advapi32" stdcallcc fn CryptAcquireContextA(
pub extern "advapi32" stdcallcc fn CryptReleaseContext(hProv: HCRYPTPROV, dwFlags: DWORD) BOOL;
-pub extern "advapi32" stdcallcc fn CryptGenRandom(hProv: HCRYPTPROV, dwLen: DWORD, pbBuffer: &BYTE) BOOL;
+pub extern "advapi32" stdcallcc fn CryptGenRandom(hProv: HCRYPTPROV, dwLen: DWORD, pbBuffer: *BYTE) BOOL;
pub extern "kernel32" stdcallcc fn CloseHandle(hObject: HANDLE) BOOL;
pub extern "kernel32" stdcallcc fn CreateDirectoryA(
lpPathName: LPCSTR,
- lpSecurityAttributes: ?&SECURITY_ATTRIBUTES,
+ lpSecurityAttributes: ?*SECURITY_ATTRIBUTES,
) BOOL;
pub extern "kernel32" stdcallcc fn CreateFileA(
@@ -30,23 +30,23 @@ pub extern "kernel32" stdcallcc fn CreateFileA(
) HANDLE;
pub extern "kernel32" stdcallcc fn CreatePipe(
- hReadPipe: &HANDLE,
- hWritePipe: &HANDLE,
- lpPipeAttributes: &const SECURITY_ATTRIBUTES,
+ hReadPipe: *HANDLE,
+ hWritePipe: *HANDLE,
+ lpPipeAttributes: *const SECURITY_ATTRIBUTES,
nSize: DWORD,
) BOOL;
pub extern "kernel32" stdcallcc fn CreateProcessA(
lpApplicationName: ?LPCSTR,
lpCommandLine: LPSTR,
- lpProcessAttributes: ?&SECURITY_ATTRIBUTES,
- lpThreadAttributes: ?&SECURITY_ATTRIBUTES,
+ lpProcessAttributes: ?*SECURITY_ATTRIBUTES,
+ lpThreadAttributes: ?*SECURITY_ATTRIBUTES,
bInheritHandles: BOOL,
dwCreationFlags: DWORD,
- lpEnvironment: ?&c_void,
+ lpEnvironment: ?*c_void,
lpCurrentDirectory: ?LPCSTR,
- lpStartupInfo: &STARTUPINFOA,
- lpProcessInformation: &PROCESS_INFORMATION,
+ lpStartupInfo: *STARTUPINFOA,
+ lpProcessInformation: *PROCESS_INFORMATION,
) BOOL;
pub extern "kernel32" stdcallcc fn CreateSymbolicLinkA(
@@ -65,7 +65,7 @@ pub extern "kernel32" stdcallcc fn FreeEnvironmentStringsA(penv: LPCH) BOOL;
pub extern "kernel32" stdcallcc fn GetCommandLineA() LPSTR;
-pub extern "kernel32" stdcallcc fn GetConsoleMode(in_hConsoleHandle: HANDLE, out_lpMode: &DWORD) BOOL;
+pub extern "kernel32" stdcallcc fn GetConsoleMode(in_hConsoleHandle: HANDLE, out_lpMode: *DWORD) BOOL;
pub extern "kernel32" stdcallcc fn GetCurrentDirectoryA(nBufferLength: WORD, lpBuffer: ?LPSTR) DWORD;
@@ -73,9 +73,9 @@ pub extern "kernel32" stdcallcc fn GetEnvironmentStringsA() ?LPCH;
pub extern "kernel32" stdcallcc fn GetEnvironmentVariableA(lpName: LPCSTR, lpBuffer: LPSTR, nSize: DWORD) DWORD;
-pub extern "kernel32" stdcallcc fn GetExitCodeProcess(hProcess: HANDLE, lpExitCode: &DWORD) BOOL;
+pub extern "kernel32" stdcallcc fn GetExitCodeProcess(hProcess: HANDLE, lpExitCode: *DWORD) BOOL;
-pub extern "kernel32" stdcallcc fn GetFileSizeEx(hFile: HANDLE, lpFileSize: &LARGE_INTEGER) BOOL;
+pub extern "kernel32" stdcallcc fn GetFileSizeEx(hFile: HANDLE, lpFileSize: *LARGE_INTEGER) BOOL;
pub extern "kernel32" stdcallcc fn GetModuleFileNameA(hModule: ?HMODULE, lpFilename: LPSTR, nSize: DWORD) DWORD;
@@ -84,7 +84,7 @@ pub extern "kernel32" stdcallcc fn GetLastError() DWORD;
pub extern "kernel32" stdcallcc fn GetFileInformationByHandleEx(
in_hFile: HANDLE,
in_FileInformationClass: FILE_INFO_BY_HANDLE_CLASS,
- out_lpFileInformation: &c_void,
+ out_lpFileInformation: *c_void,
in_dwBufferSize: DWORD,
) BOOL;
@@ -97,21 +97,21 @@ pub extern "kernel32" stdcallcc fn GetFinalPathNameByHandleA(
pub extern "kernel32" stdcallcc fn GetProcessHeap() ?HANDLE;
-pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(?&FILETIME) void;
+pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(?*FILETIME) void;
pub extern "kernel32" stdcallcc fn HeapCreate(flOptions: DWORD, dwInitialSize: SIZE_T, dwMaximumSize: SIZE_T) ?HANDLE;
pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL;
-pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: &c_void, dwBytes: SIZE_T) ?&c_void;
-pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: &const c_void) SIZE_T;
-pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: &const c_void) BOOL;
+pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void, dwBytes: SIZE_T) ?*c_void;
+pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) SIZE_T;
+pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) BOOL;
pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T;
pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL;
pub extern "kernel32" stdcallcc fn GetStdHandle(in_nStdHandle: DWORD) ?HANDLE;
-pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?&c_void;
+pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?*c_void;
-pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: &c_void) BOOL;
+pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void) BOOL;
pub extern "kernel32" stdcallcc fn MoveFileExA(
lpExistingFileName: LPCSTR,
@@ -119,24 +119,24 @@ pub extern "kernel32" stdcallcc fn MoveFileExA(
dwFlags: DWORD,
) BOOL;
-pub extern "kernel32" stdcallcc fn QueryPerformanceCounter(lpPerformanceCount: &LARGE_INTEGER) BOOL;
+pub extern "kernel32" stdcallcc fn QueryPerformanceCounter(lpPerformanceCount: *LARGE_INTEGER) BOOL;
-pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: &LARGE_INTEGER) BOOL;
+pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: *LARGE_INTEGER) BOOL;
pub extern "kernel32" stdcallcc fn PathFileExists(pszPath: ?LPCTSTR) BOOL;
pub extern "kernel32" stdcallcc fn ReadFile(
in_hFile: HANDLE,
- out_lpBuffer: &c_void,
+ out_lpBuffer: *c_void,
in_nNumberOfBytesToRead: DWORD,
- out_lpNumberOfBytesRead: &DWORD,
- in_out_lpOverlapped: ?&OVERLAPPED,
+ out_lpNumberOfBytesRead: *DWORD,
+ in_out_lpOverlapped: ?*OVERLAPPED,
) BOOL;
pub extern "kernel32" stdcallcc fn SetFilePointerEx(
in_fFile: HANDLE,
in_liDistanceToMove: LARGE_INTEGER,
- out_opt_ldNewFilePointer: ?&LARGE_INTEGER,
+ out_opt_ldNewFilePointer: ?*LARGE_INTEGER,
in_dwMoveMethod: DWORD,
) BOOL;
@@ -150,10 +150,10 @@ pub extern "kernel32" stdcallcc fn WaitForSingleObject(hHandle: HANDLE, dwMillis
pub extern "kernel32" stdcallcc fn WriteFile(
in_hFile: HANDLE,
- in_lpBuffer: &const c_void,
+ in_lpBuffer: *const c_void,
in_nNumberOfBytesToWrite: DWORD,
- out_lpNumberOfBytesWritten: ?&DWORD,
- in_out_lpOverlapped: ?&OVERLAPPED,
+ out_lpNumberOfBytesWritten: ?*DWORD,
+ in_out_lpOverlapped: ?*OVERLAPPED,
) BOOL;
//TODO: call unicode versions instead of relying on ANSI code page
@@ -171,23 +171,23 @@ pub const BYTE = u8;
pub const CHAR = u8;
pub const DWORD = u32;
pub const FLOAT = f32;
-pub const HANDLE = &c_void;
+pub const HANDLE = *c_void;
pub const HCRYPTPROV = ULONG_PTR;
-pub const HINSTANCE = &@OpaqueType();
-pub const HMODULE = &@OpaqueType();
+pub const HINSTANCE = *@OpaqueType();
+pub const HMODULE = *@OpaqueType();
pub const INT = c_int;
-pub const LPBYTE = &BYTE;
-pub const LPCH = &CHAR;
-pub const LPCSTR = &const CHAR;
-pub const LPCTSTR = &const TCHAR;
-pub const LPCVOID = &const c_void;
-pub const LPDWORD = &DWORD;
-pub const LPSTR = &CHAR;
+pub const LPBYTE = *BYTE;
+pub const LPCH = *CHAR;
+pub const LPCSTR = *const CHAR;
+pub const LPCTSTR = *const TCHAR;
+pub const LPCVOID = *const c_void;
+pub const LPDWORD = *DWORD;
+pub const LPSTR = *CHAR;
pub const LPTSTR = if (UNICODE) LPWSTR else LPSTR;
-pub const LPVOID = &c_void;
-pub const LPWSTR = &WCHAR;
-pub const PVOID = &c_void;
-pub const PWSTR = &WCHAR;
+pub const LPVOID = *c_void;
+pub const LPWSTR = *WCHAR;
+pub const PVOID = *c_void;
+pub const PWSTR = *WCHAR;
pub const SIZE_T = usize;
pub const TCHAR = if (UNICODE) WCHAR else u8;
pub const UINT = c_uint;
@@ -218,7 +218,7 @@ pub const OVERLAPPED = extern struct {
Pointer: PVOID,
hEvent: HANDLE,
};
-pub const LPOVERLAPPED = &OVERLAPPED;
+pub const LPOVERLAPPED = *OVERLAPPED;
pub const MAX_PATH = 260;
@@ -271,11 +271,11 @@ pub const VOLUME_NAME_NT = 0x2;
pub const SECURITY_ATTRIBUTES = extern struct {
nLength: DWORD,
- lpSecurityDescriptor: ?&c_void,
+ lpSecurityDescriptor: ?*c_void,
bInheritHandle: BOOL,
};
-pub const PSECURITY_ATTRIBUTES = &SECURITY_ATTRIBUTES;
-pub const LPSECURITY_ATTRIBUTES = &SECURITY_ATTRIBUTES;
+pub const PSECURITY_ATTRIBUTES = *SECURITY_ATTRIBUTES;
+pub const LPSECURITY_ATTRIBUTES = *SECURITY_ATTRIBUTES;
pub const GENERIC_READ = 0x80000000;
pub const GENERIC_WRITE = 0x40000000;
diff --git a/std/os/windows/util.zig b/std/os/windows/util.zig
index 2bd8a157e4..7170346108 100644
--- a/std/os/windows/util.zig
+++ b/std/os/windows/util.zig
@@ -42,7 +42,7 @@ pub const WriteError = error{
};
pub fn windowsWrite(handle: windows.HANDLE, bytes: []const u8) WriteError!void {
- if (windows.WriteFile(handle, @ptrCast(&const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) {
+ if (windows.WriteFile(handle, @ptrCast(*const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.INVALID_USER_BUFFER => WriteError.SystemResources,
@@ -68,11 +68,11 @@ pub fn windowsIsCygwinPty(handle: windows.HANDLE) bool {
const size = @sizeOf(windows.FILE_NAME_INFO);
var name_info_bytes align(@alignOf(windows.FILE_NAME_INFO)) = []u8{0} ** (size + windows.MAX_PATH);
- if (windows.GetFileInformationByHandleEx(handle, windows.FileNameInfo, @ptrCast(&c_void, &name_info_bytes[0]), u32(name_info_bytes.len)) == 0) {
+ if (windows.GetFileInformationByHandleEx(handle, windows.FileNameInfo, @ptrCast(*c_void, &name_info_bytes[0]), u32(name_info_bytes.len)) == 0) {
return true;
}
- const name_info = @ptrCast(&const windows.FILE_NAME_INFO, &name_info_bytes[0]);
+ const name_info = @ptrCast(*const windows.FILE_NAME_INFO, &name_info_bytes[0]);
const name_bytes = name_info_bytes[size .. size + usize(name_info.FileNameLength)];
const name_wide = ([]u16)(name_bytes);
return mem.indexOf(u16, name_wide, []u16{ 'm', 's', 'y', 's', '-' }) != null or
@@ -91,7 +91,7 @@ pub const OpenError = error{
/// `file_path` needs to be copied in memory to add a null terminating byte, hence the allocator.
pub fn windowsOpen(
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
file_path: []const u8,
desired_access: windows.DWORD,
share_mode: windows.DWORD,
@@ -119,7 +119,7 @@ pub fn windowsOpen(
}
/// Caller must free result.
-pub fn createWindowsEnvBlock(allocator: &mem.Allocator, env_map: &const BufMap) ![]u8 {
+pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap) ![]u8 {
// count bytes needed
const bytes_needed = x: {
var bytes_needed: usize = 1; // 1 for the final null byte
@@ -150,7 +150,7 @@ pub fn createWindowsEnvBlock(allocator: &mem.Allocator, env_map: &const BufMap)
return result;
}
-pub fn windowsLoadDll(allocator: &mem.Allocator, dll_path: []const u8) !windows.HMODULE {
+pub fn windowsLoadDll(allocator: *mem.Allocator, dll_path: []const u8) !windows.HMODULE {
const padded_buff = try cstr.addNullByte(allocator, dll_path);
defer allocator.free(padded_buff);
return windows.LoadLibraryA(padded_buff.ptr) ?? error.DllNotFound;
diff --git a/std/os/zen.zig b/std/os/zen.zig
index 2411c5363e..2312b36dea 100644
--- a/std/os/zen.zig
+++ b/std/os/zen.zig
@@ -8,7 +8,7 @@ pub const Message = struct {
type: usize,
payload: usize,
- pub fn from(mailbox_id: &const MailboxId) Message {
+ pub fn from(mailbox_id: *const MailboxId) Message {
return Message{
.sender = MailboxId.Undefined,
.receiver = *mailbox_id,
@@ -17,7 +17,7 @@ pub const Message = struct {
};
}
- pub fn to(mailbox_id: &const MailboxId, msg_type: usize) Message {
+ pub fn to(mailbox_id: *const MailboxId, msg_type: usize) Message {
return Message{
.sender = MailboxId.This,
.receiver = *mailbox_id,
@@ -26,7 +26,7 @@ pub const Message = struct {
};
}
- pub fn withData(mailbox_id: &const MailboxId, msg_type: usize, payload: usize) Message {
+ pub fn withData(mailbox_id: *const MailboxId, msg_type: usize, payload: usize) Message {
return Message{
.sender = MailboxId.This,
.receiver = *mailbox_id,
@@ -67,7 +67,7 @@ pub const getErrno = @import("linux/index.zig").getErrno;
use @import("linux/errno.zig");
// TODO: implement this correctly.
-pub fn read(fd: i32, buf: &u8, count: usize) usize {
+pub fn read(fd: i32, buf: *u8, count: usize) usize {
switch (fd) {
STDIN_FILENO => {
var i: usize = 0;
@@ -75,7 +75,7 @@ pub fn read(fd: i32, buf: &u8, count: usize) usize {
send(Message.to(Server.Keyboard, 0));
var message = Message.from(MailboxId.This);
- receive(&message);
+ receive(*message);
buf[i] = u8(message.payload);
}
@@ -86,7 +86,7 @@ pub fn read(fd: i32, buf: &u8, count: usize) usize {
}
// TODO: implement this correctly.
-pub fn write(fd: i32, buf: &const u8, count: usize) usize {
+pub fn write(fd: i32, buf: *const u8, count: usize) usize {
switch (fd) {
STDOUT_FILENO, STDERR_FILENO => {
var i: usize = 0;
@@ -126,22 +126,22 @@ pub fn exit(status: i32) noreturn {
unreachable;
}
-pub fn createPort(mailbox_id: &const MailboxId) void {
+pub fn createPort(mailbox_id: *const MailboxId) void {
_ = switch (*mailbox_id) {
MailboxId.Port => |id| syscall1(Syscall.createPort, id),
else => unreachable,
};
}
-pub fn send(message: &const Message) void {
+pub fn send(message: *const Message) void {
_ = syscall1(Syscall.send, @ptrToInt(message));
}
-pub fn receive(destination: &Message) void {
+pub fn receive(destination: *Message) void {
_ = syscall1(Syscall.receive, @ptrToInt(destination));
}
-pub fn subscribeIRQ(irq: u8, mailbox_id: &const MailboxId) void {
+pub fn subscribeIRQ(irq: u8, mailbox_id: *const MailboxId) void {
_ = syscall2(Syscall.subscribeIRQ, irq, @ptrToInt(mailbox_id));
}
diff --git a/std/rand/index.zig b/std/rand/index.zig
index c32309a0fd..3a1a559cd9 100644
--- a/std/rand/index.zig
+++ b/std/rand/index.zig
@@ -28,15 +28,15 @@ pub const DefaultPrng = Xoroshiro128;
pub const DefaultCsprng = Isaac64;
pub const Random = struct {
- fillFn: fn (r: &Random, buf: []u8) void,
+ fillFn: fn (r: *Random, buf: []u8) void,
/// Read random bytes into the specified buffer until fill.
- pub fn bytes(r: &Random, buf: []u8) void {
+ pub fn bytes(r: *Random, buf: []u8) void {
r.fillFn(r, buf);
}
/// Return a random integer/boolean type.
- pub fn scalar(r: &Random, comptime T: type) T {
+ pub fn scalar(r: *Random, comptime T: type) T {
var rand_bytes: [@sizeOf(T)]u8 = undefined;
r.bytes(rand_bytes[0..]);
@@ -50,7 +50,7 @@ pub const Random = struct {
/// Get a random unsigned integer with even distribution between `start`
/// inclusive and `end` exclusive.
- pub fn range(r: &Random, comptime T: type, start: T, end: T) T {
+ pub fn range(r: *Random, comptime T: type, start: T, end: T) T {
assert(start <= end);
if (T.is_signed) {
const uint = @IntType(false, T.bit_count);
@@ -92,7 +92,7 @@ pub const Random = struct {
}
/// Return a floating point value evenly distributed in the range [0, 1).
- pub fn float(r: &Random, comptime T: type) T {
+ pub fn float(r: *Random, comptime T: type) T {
// Generate a uniform value between [1, 2) and scale down to [0, 1).
// Note: The lowest mantissa bit is always set to 0 so we only use half the available range.
switch (T) {
@@ -113,7 +113,7 @@ pub const Random = struct {
/// Return a floating point value normally distributed with mean = 0, stddev = 1.
///
/// To use different parameters, use: floatNorm(...) * desiredStddev + desiredMean.
- pub fn floatNorm(r: &Random, comptime T: type) T {
+ pub fn floatNorm(r: *Random, comptime T: type) T {
const value = ziggurat.next_f64(r, ziggurat.NormDist);
switch (T) {
f32 => return f32(value),
@@ -125,7 +125,7 @@ pub const Random = struct {
/// Return an exponentially distributed float with a rate parameter of 1.
///
/// To use a different rate parameter, use: floatExp(...) / desiredRate.
- pub fn floatExp(r: &Random, comptime T: type) T {
+ pub fn floatExp(r: *Random, comptime T: type) T {
const value = ziggurat.next_f64(r, ziggurat.ExpDist);
switch (T) {
f32 => return f32(value),
@@ -135,7 +135,7 @@ pub const Random = struct {
}
/// Shuffle a slice into a random order.
- pub fn shuffle(r: &Random, comptime T: type, buf: []T) void {
+ pub fn shuffle(r: *Random, comptime T: type, buf: []T) void {
if (buf.len < 2) {
return;
}
@@ -159,7 +159,7 @@ const SplitMix64 = struct {
return SplitMix64{ .s = seed };
}
- pub fn next(self: &SplitMix64) u64 {
+ pub fn next(self: *SplitMix64) u64 {
self.s +%= 0x9e3779b97f4a7c15;
var z = self.s;
@@ -208,7 +208,7 @@ pub const Pcg = struct {
return pcg;
}
- fn next(self: &Pcg) u32 {
+ fn next(self: *Pcg) u32 {
const l = self.s;
self.s = l *% default_multiplier +% (self.i | 1);
@@ -218,13 +218,13 @@ pub const Pcg = struct {
return (xor_s >> u5(rot)) | (xor_s << u5((0 -% rot) & 31));
}
- fn seed(self: &Pcg, init_s: u64) void {
+ fn seed(self: *Pcg, init_s: u64) void {
// Pcg requires 128-bits of seed.
var gen = SplitMix64.init(init_s);
self.seedTwo(gen.next(), gen.next());
}
- fn seedTwo(self: &Pcg, init_s: u64, init_i: u64) void {
+ fn seedTwo(self: *Pcg, init_s: u64, init_i: u64) void {
self.s = 0;
self.i = (init_s << 1) | 1;
self.s = self.s *% default_multiplier +% self.i;
@@ -232,7 +232,7 @@ pub const Pcg = struct {
self.s = self.s *% default_multiplier +% self.i;
}
- fn fill(r: &Random, buf: []u8) void {
+ fn fill(r: *Random, buf: []u8) void {
const self = @fieldParentPtr(Pcg, "random", r);
var i: usize = 0;
@@ -297,7 +297,7 @@ pub const Xoroshiro128 = struct {
return x;
}
- fn next(self: &Xoroshiro128) u64 {
+ fn next(self: *Xoroshiro128) u64 {
const s0 = self.s[0];
var s1 = self.s[1];
const r = s0 +% s1;
@@ -310,7 +310,7 @@ pub const Xoroshiro128 = struct {
}
// Skip 2^64 places ahead in the sequence
- fn jump(self: &Xoroshiro128) void {
+ fn jump(self: *Xoroshiro128) void {
var s0: u64 = 0;
var s1: u64 = 0;
@@ -334,7 +334,7 @@ pub const Xoroshiro128 = struct {
self.s[1] = s1;
}
- fn seed(self: &Xoroshiro128, init_s: u64) void {
+ fn seed(self: *Xoroshiro128, init_s: u64) void {
// Xoroshiro requires 128-bits of seed.
var gen = SplitMix64.init(init_s);
@@ -342,7 +342,7 @@ pub const Xoroshiro128 = struct {
self.s[1] = gen.next();
}
- fn fill(r: &Random, buf: []u8) void {
+ fn fill(r: *Random, buf: []u8) void {
const self = @fieldParentPtr(Xoroshiro128, "random", r);
var i: usize = 0;
@@ -435,7 +435,7 @@ pub const Isaac64 = struct {
return isaac;
}
- fn step(self: &Isaac64, mix: u64, base: usize, comptime m1: usize, comptime m2: usize) void {
+ fn step(self: *Isaac64, mix: u64, base: usize, comptime m1: usize, comptime m2: usize) void {
const x = self.m[base + m1];
self.a = mix +% self.m[base + m2];
@@ -446,7 +446,7 @@ pub const Isaac64 = struct {
self.r[self.r.len - 1 - base - m1] = self.b;
}
- fn refill(self: &Isaac64) void {
+ fn refill(self: *Isaac64) void {
const midpoint = self.r.len / 2;
self.c +%= 1;
@@ -475,7 +475,7 @@ pub const Isaac64 = struct {
self.i = 0;
}
- fn next(self: &Isaac64) u64 {
+ fn next(self: *Isaac64) u64 {
if (self.i >= self.r.len) {
self.refill();
}
@@ -485,7 +485,7 @@ pub const Isaac64 = struct {
return value;
}
- fn seed(self: &Isaac64, init_s: u64, comptime rounds: usize) void {
+ fn seed(self: *Isaac64, init_s: u64, comptime rounds: usize) void {
// We ignore the multi-pass requirement since we don't currently expose full access to
// seeding the self.m array completely.
mem.set(u64, self.m[0..], 0);
@@ -551,7 +551,7 @@ pub const Isaac64 = struct {
self.i = self.r.len; // trigger refill on first value
}
- fn fill(r: &Random, buf: []u8) void {
+ fn fill(r: *Random, buf: []u8) void {
const self = @fieldParentPtr(Isaac64, "random", r);
var i: usize = 0;
@@ -666,7 +666,7 @@ test "Random range" {
testRange(&prng.random, 10, 14);
}
-fn testRange(r: &Random, start: i32, end: i32) void {
+fn testRange(r: *Random, start: i32, end: i32) void {
const count = usize(end - start);
var values_buffer = []bool{false} ** 20;
const values = values_buffer[0..count];
diff --git a/std/rand/ziggurat.zig b/std/rand/ziggurat.zig
index 7daeb59165..774d3bd52a 100644
--- a/std/rand/ziggurat.zig
+++ b/std/rand/ziggurat.zig
@@ -12,7 +12,7 @@ const std = @import("../index.zig");
const math = std.math;
const Random = std.rand.Random;
-pub fn next_f64(random: &Random, comptime tables: &const ZigTable) f64 {
+pub fn next_f64(random: *Random, comptime tables: *const ZigTable) f64 {
while (true) {
// We manually construct a float from parts as we can avoid an extra random lookup here by
// using the unused exponent for the lookup table entry.
@@ -60,7 +60,7 @@ pub const ZigTable = struct {
// whether the distribution is symmetric
is_symmetric: bool,
// fallback calculation in the case we are in the 0 block
- zero_case: fn (&Random, f64) f64,
+ zero_case: fn (*Random, f64) f64,
};
// zigNorInit
@@ -70,7 +70,7 @@ fn ZigTableGen(
comptime v: f64,
comptime f: fn (f64) f64,
comptime f_inv: fn (f64) f64,
- comptime zero_case: fn (&Random, f64) f64,
+ comptime zero_case: fn (*Random, f64) f64,
) ZigTable {
var tables: ZigTable = undefined;
@@ -110,7 +110,7 @@ fn norm_f(x: f64) f64 {
fn norm_f_inv(y: f64) f64 {
return math.sqrt(-2.0 * math.ln(y));
}
-fn norm_zero_case(random: &Random, u: f64) f64 {
+fn norm_zero_case(random: *Random, u: f64) f64 {
var x: f64 = 1;
var y: f64 = 0;
@@ -149,7 +149,7 @@ fn exp_f(x: f64) f64 {
fn exp_f_inv(y: f64) f64 {
return -math.ln(y);
}
-fn exp_zero_case(random: &Random, _: f64) f64 {
+fn exp_zero_case(random: *Random, _: f64) f64 {
return exp_r - math.ln(random.float(f64));
}
diff --git a/std/segmented_list.zig b/std/segmented_list.zig
index d755135fe8..be9a2071a0 100644
--- a/std/segmented_list.zig
+++ b/std/segmented_list.zig
@@ -87,49 +87,49 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
const ShelfIndex = std.math.Log2Int(usize);
prealloc_segment: [prealloc_item_count]T,
- dynamic_segments: []&T,
- allocator: &Allocator,
+ dynamic_segments: []*T,
+ allocator: *Allocator,
len: usize,
pub const prealloc_count = prealloc_item_count;
/// Deinitialize with `deinit`
- pub fn init(allocator: &Allocator) Self {
+ pub fn init(allocator: *Allocator) Self {
return Self{
.allocator = allocator,
.len = 0,
.prealloc_segment = undefined,
- .dynamic_segments = []&T{},
+ .dynamic_segments = []*T{},
};
}
- pub fn deinit(self: &Self) void {
+ pub fn deinit(self: *Self) void {
self.freeShelves(ShelfIndex(self.dynamic_segments.len), 0);
self.allocator.free(self.dynamic_segments);
self.* = undefined;
}
- pub fn at(self: &Self, i: usize) &T {
+ pub fn at(self: *Self, i: usize) *T {
assert(i < self.len);
return self.uncheckedAt(i);
}
- pub fn count(self: &const Self) usize {
+ pub fn count(self: *const Self) usize {
return self.len;
}
- pub fn push(self: &Self, item: &const T) !void {
+ pub fn push(self: *Self, item: *const T) !void {
const new_item_ptr = try self.addOne();
new_item_ptr.* = item.*;
}
- pub fn pushMany(self: &Self, items: []const T) !void {
+ pub fn pushMany(self: *Self, items: []const T) !void {
for (items) |item| {
try self.push(item);
}
}
- pub fn pop(self: &Self) ?T {
+ pub fn pop(self: *Self) ?T {
if (self.len == 0) return null;
const index = self.len - 1;
@@ -138,7 +138,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
return result;
}
- pub fn addOne(self: &Self) !&T {
+ pub fn addOne(self: *Self) !*T {
const new_length = self.len + 1;
try self.growCapacity(new_length);
const result = self.uncheckedAt(self.len);
@@ -147,7 +147,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
/// Grows or shrinks capacity to match usage.
- pub fn setCapacity(self: &Self, new_capacity: usize) !void {
+ pub fn setCapacity(self: *Self, new_capacity: usize) !void {
if (new_capacity <= usize(1) << (prealloc_exp + self.dynamic_segments.len)) {
return self.shrinkCapacity(new_capacity);
} else {
@@ -156,15 +156,15 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
/// Only grows capacity, or retains current capacity
- pub fn growCapacity(self: &Self, new_capacity: usize) !void {
+ pub fn growCapacity(self: *Self, new_capacity: usize) !void {
const new_cap_shelf_count = shelfCount(new_capacity);
const old_shelf_count = ShelfIndex(self.dynamic_segments.len);
if (new_cap_shelf_count > old_shelf_count) {
- self.dynamic_segments = try self.allocator.realloc(&T, self.dynamic_segments, new_cap_shelf_count);
+ self.dynamic_segments = try self.allocator.realloc(*T, self.dynamic_segments, new_cap_shelf_count);
var i = old_shelf_count;
errdefer {
self.freeShelves(i, old_shelf_count);
- self.dynamic_segments = self.allocator.shrink(&T, self.dynamic_segments, old_shelf_count);
+ self.dynamic_segments = self.allocator.shrink(*T, self.dynamic_segments, old_shelf_count);
}
while (i < new_cap_shelf_count) : (i += 1) {
self.dynamic_segments[i] = (try self.allocator.alloc(T, shelfSize(i))).ptr;
@@ -173,12 +173,12 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
/// Only shrinks capacity or retains current capacity
- pub fn shrinkCapacity(self: &Self, new_capacity: usize) void {
+ pub fn shrinkCapacity(self: *Self, new_capacity: usize) void {
if (new_capacity <= prealloc_item_count) {
const len = ShelfIndex(self.dynamic_segments.len);
self.freeShelves(len, 0);
self.allocator.free(self.dynamic_segments);
- self.dynamic_segments = []&T{};
+ self.dynamic_segments = []*T{};
return;
}
@@ -190,10 +190,10 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
self.freeShelves(old_shelf_count, new_cap_shelf_count);
- self.dynamic_segments = self.allocator.shrink(&T, self.dynamic_segments, new_cap_shelf_count);
+ self.dynamic_segments = self.allocator.shrink(*T, self.dynamic_segments, new_cap_shelf_count);
}
- pub fn uncheckedAt(self: &Self, index: usize) &T {
+ pub fn uncheckedAt(self: *Self, index: usize) *T {
if (index < prealloc_item_count) {
return &self.prealloc_segment[index];
}
@@ -230,7 +230,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
return list_index + prealloc_item_count - (usize(1) << ((prealloc_exp + 1) + shelf_index));
}
- fn freeShelves(self: &Self, from_count: ShelfIndex, to_count: ShelfIndex) void {
+ fn freeShelves(self: *Self, from_count: ShelfIndex, to_count: ShelfIndex) void {
var i = from_count;
while (i != to_count) {
i -= 1;
@@ -239,13 +239,13 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
pub const Iterator = struct {
- list: &Self,
+ list: *Self,
index: usize,
box_index: usize,
shelf_index: ShelfIndex,
shelf_size: usize,
- pub fn next(it: &Iterator) ?&T {
+ pub fn next(it: *Iterator) ?*T {
if (it.index >= it.list.len) return null;
if (it.index < prealloc_item_count) {
const ptr = &it.list.prealloc_segment[it.index];
@@ -269,7 +269,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
return ptr;
}
- pub fn prev(it: &Iterator) ?&T {
+ pub fn prev(it: *Iterator) ?*T {
if (it.index == 0) return null;
it.index -= 1;
@@ -286,7 +286,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
return &it.list.dynamic_segments[it.shelf_index][it.box_index];
}
- pub fn peek(it: &Iterator) ?&T {
+ pub fn peek(it: *Iterator) ?*T {
if (it.index >= it.list.len)
return null;
if (it.index < prealloc_item_count)
@@ -295,7 +295,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
return &it.list.dynamic_segments[it.shelf_index][it.box_index];
}
- pub fn set(it: &Iterator, index: usize) void {
+ pub fn set(it: *Iterator, index: usize) void {
it.index = index;
if (index < prealloc_item_count) return;
it.shelf_index = shelfIndex(index);
@@ -304,7 +304,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
};
- pub fn iterator(self: &Self, start_index: usize) Iterator {
+ pub fn iterator(self: *Self, start_index: usize) Iterator {
var it = Iterator{
.list = self,
.index = undefined,
@@ -331,7 +331,7 @@ test "std.SegmentedList" {
try testSegmentedList(16, a);
}
-fn testSegmentedList(comptime prealloc: usize, allocator: &Allocator) !void {
+fn testSegmentedList(comptime prealloc: usize, allocator: *Allocator) !void {
var list = SegmentedList(i32, prealloc).init(allocator);
defer list.deinit();
diff --git a/std/sort.zig b/std/sort.zig
index 4e17718241..1b44c18dd9 100644
--- a/std/sort.zig
+++ b/std/sort.zig
@@ -5,7 +5,7 @@ const math = std.math;
const builtin = @import("builtin");
/// Stable in-place sort. O(n) best case, O(pow(n, 2)) worst case. O(1) memory (no allocator required).
-pub fn insertionSort(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) void {
+pub fn insertionSort(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) void {
{
var i: usize = 1;
while (i < items.len) : (i += 1) {
@@ -30,7 +30,7 @@ const Range = struct {
};
}
- fn length(self: &const Range) usize {
+ fn length(self: *const Range) usize {
return self.end - self.start;
}
};
@@ -58,12 +58,12 @@ const Iterator = struct {
};
}
- fn begin(self: &Iterator) void {
+ fn begin(self: *Iterator) void {
self.numerator = 0;
self.decimal = 0;
}
- fn nextRange(self: &Iterator) Range {
+ fn nextRange(self: *Iterator) Range {
const start = self.decimal;
self.decimal += self.decimal_step;
@@ -79,11 +79,11 @@ const Iterator = struct {
};
}
- fn finished(self: &Iterator) bool {
+ fn finished(self: *Iterator) bool {
return self.decimal >= self.size;
}
- fn nextLevel(self: &Iterator) bool {
+ fn nextLevel(self: *Iterator) bool {
self.decimal_step += self.decimal_step;
self.numerator_step += self.numerator_step;
if (self.numerator_step >= self.denominator) {
@@ -94,7 +94,7 @@ const Iterator = struct {
return (self.decimal_step < self.size);
}
- fn length(self: &Iterator) usize {
+ fn length(self: *Iterator) usize {
return self.decimal_step;
}
};
@@ -108,7 +108,7 @@ const Pull = struct {
/// Stable in-place sort. O(n) best case, O(n*log(n)) worst case and average case. O(1) memory (no allocator required).
/// Currently implemented as block sort.
-pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) void {
+pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) void {
// Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c
var cache: [512]T = undefined;
@@ -741,7 +741,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &con
}
// merge operation without a buffer
-fn mergeInPlace(comptime T: type, items: []T, A_arg: &const Range, B_arg: &const Range, lessThan: fn (&const T, &const T) bool) void {
+fn mergeInPlace(comptime T: type, items: []T, A_arg: *const Range, B_arg: *const Range, lessThan: fn (*const T, *const T) bool) void {
if (A_arg.length() == 0 or B_arg.length() == 0) return;
// this just repeatedly binary searches into B and rotates A into position.
@@ -783,7 +783,7 @@ fn mergeInPlace(comptime T: type, items: []T, A_arg: &const Range, B_arg: &const
}
// merge operation using an internal buffer
-fn mergeInternal(comptime T: type, items: []T, A: &const Range, B: &const Range, lessThan: fn (&const T, &const T) bool, buffer: &const Range) void {
+fn mergeInternal(comptime T: type, items: []T, A: *const Range, B: *const Range, lessThan: fn (*const T, *const T) bool, buffer: *const Range) void {
// whenever we find a value to add to the final array, swap it with the value that's already in that spot
// when this algorithm is finished, 'buffer' will contain its original contents, but in a different order
var A_count: usize = 0;
@@ -819,7 +819,7 @@ fn blockSwap(comptime T: type, items: []T, start1: usize, start2: usize, block_s
// combine a linear search with a binary search to reduce the number of comparisons in situations
// where have some idea as to how many unique values there are and where the next value might be
-fn findFirstForward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize {
+fn findFirstForward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@@ -833,7 +833,7 @@ fn findFirstForward(comptime T: type, items: []T, value: &const T, range: &const
return binaryFirst(T, items, value, Range.init(index - skip, index), lessThan);
}
-fn findFirstBackward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize {
+fn findFirstBackward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@@ -847,7 +847,7 @@ fn findFirstBackward(comptime T: type, items: []T, value: &const T, range: &cons
return binaryFirst(T, items, value, Range.init(index, index + skip), lessThan);
}
-fn findLastForward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize {
+fn findLastForward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@@ -861,7 +861,7 @@ fn findLastForward(comptime T: type, items: []T, value: &const T, range: &const
return binaryLast(T, items, value, Range.init(index - skip, index), lessThan);
}
-fn findLastBackward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize {
+fn findLastBackward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@@ -875,7 +875,7 @@ fn findLastBackward(comptime T: type, items: []T, value: &const T, range: &const
return binaryLast(T, items, value, Range.init(index, index + skip), lessThan);
}
-fn binaryFirst(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool) usize {
+fn binaryFirst(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool) usize {
var start = range.start;
var end = range.end - 1;
if (range.start >= range.end) return range.end;
@@ -893,7 +893,7 @@ fn binaryFirst(comptime T: type, items: []T, value: &const T, range: &const Rang
return start;
}
-fn binaryLast(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool) usize {
+fn binaryLast(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool) usize {
var start = range.start;
var end = range.end - 1;
if (range.start >= range.end) return range.end;
@@ -911,7 +911,7 @@ fn binaryLast(comptime T: type, items: []T, value: &const T, range: &const Range
return start;
}
-fn mergeInto(comptime T: type, from: []T, A: &const Range, B: &const Range, lessThan: fn (&const T, &const T) bool, into: []T) void {
+fn mergeInto(comptime T: type, from: []T, A: *const Range, B: *const Range, lessThan: fn (*const T, *const T) bool, into: []T) void {
var A_index: usize = A.start;
var B_index: usize = B.start;
const A_last = A.end;
@@ -941,7 +941,7 @@ fn mergeInto(comptime T: type, from: []T, A: &const Range, B: &const Range, less
}
}
-fn mergeExternal(comptime T: type, items: []T, A: &const Range, B: &const Range, lessThan: fn (&const T, &const T) bool, cache: []T) void {
+fn mergeExternal(comptime T: type, items: []T, A: *const Range, B: *const Range, lessThan: fn (*const T, *const T) bool, cache: []T) void {
// A fits into the cache, so use that instead of the internal buffer
var A_index: usize = 0;
var B_index: usize = B.start;
@@ -969,26 +969,26 @@ fn mergeExternal(comptime T: type, items: []T, A: &const Range, B: &const Range,
mem.copy(T, items[insert_index..], cache[A_index..A_last]);
}
-fn swap(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool, order: &[8]u8, x: usize, y: usize) void {
+fn swap(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool, order: *[8]u8, x: usize, y: usize) void {
if (lessThan(items[y], items[x]) or ((order.*)[x] > (order.*)[y] and !lessThan(items[x], items[y]))) {
mem.swap(T, &items[x], &items[y]);
mem.swap(u8, &(order.*)[x], &(order.*)[y]);
}
}
-fn i32asc(lhs: &const i32, rhs: &const i32) bool {
+fn i32asc(lhs: *const i32, rhs: *const i32) bool {
return lhs.* < rhs.*;
}
-fn i32desc(lhs: &const i32, rhs: &const i32) bool {
+fn i32desc(lhs: *const i32, rhs: *const i32) bool {
return rhs.* < lhs.*;
}
-fn u8asc(lhs: &const u8, rhs: &const u8) bool {
+fn u8asc(lhs: *const u8, rhs: *const u8) bool {
return lhs.* < rhs.*;
}
-fn u8desc(lhs: &const u8, rhs: &const u8) bool {
+fn u8desc(lhs: *const u8, rhs: *const u8) bool {
return rhs.* < lhs.*;
}
@@ -1125,7 +1125,7 @@ const IdAndValue = struct {
id: usize,
value: i32,
};
-fn cmpByValue(a: &const IdAndValue, b: &const IdAndValue) bool {
+fn cmpByValue(a: *const IdAndValue, b: *const IdAndValue) bool {
return i32asc(a.value, b.value);
}
@@ -1324,7 +1324,7 @@ test "sort fuzz testing" {
var fixed_buffer_mem: [100 * 1024]u8 = undefined;
-fn fuzzTest(rng: &std.rand.Random) void {
+fn fuzzTest(rng: *std.rand.Random) void {
const array_size = rng.range(usize, 0, 1000);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var array = fixed_allocator.allocator.alloc(IdAndValue, array_size) catch unreachable;
@@ -1345,7 +1345,7 @@ fn fuzzTest(rng: &std.rand.Random) void {
}
}
-pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) T {
+pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) T {
var i: usize = 0;
var smallest = items[0];
for (items[1..]) |item| {
@@ -1356,7 +1356,7 @@ pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &cons
return smallest;
}
-pub fn max(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) T {
+pub fn max(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) T {
var i: usize = 0;
var biggest = items[0];
for (items[1..]) |item| {
diff --git a/std/special/bootstrap.zig b/std/special/bootstrap.zig
index c10f4aa806..5ed7874ca5 100644
--- a/std/special/bootstrap.zig
+++ b/std/special/bootstrap.zig
@@ -5,7 +5,7 @@ const root = @import("@root");
const std = @import("std");
const builtin = @import("builtin");
-var argc_ptr: &usize = undefined;
+var argc_ptr: *usize = undefined;
comptime {
const strong_linkage = builtin.GlobalLinkage.Strong;
@@ -28,12 +28,12 @@ nakedcc fn _start() noreturn {
switch (builtin.arch) {
builtin.Arch.x86_64 => {
argc_ptr = asm ("lea (%%rsp), %[argc]"
- : [argc] "=r" (-> &usize)
+ : [argc] "=r" (-> *usize)
);
},
builtin.Arch.i386 => {
argc_ptr = asm ("lea (%%esp), %[argc]"
- : [argc] "=r" (-> &usize)
+ : [argc] "=r" (-> *usize)
);
},
else => @compileError("unsupported arch"),
@@ -51,13 +51,13 @@ extern fn WinMainCRTStartup() noreturn {
fn posixCallMainAndExit() noreturn {
const argc = argc_ptr.*;
- const argv = @ptrCast(&&u8, &argc_ptr[1]);
- const envp_nullable = @ptrCast(&?&u8, &argv[argc + 1]);
+ const argv = @ptrCast(**u8, &argc_ptr[1]);
+ const envp_nullable = @ptrCast(*?*u8, &argv[argc + 1]);
var envp_count: usize = 0;
while (envp_nullable[envp_count]) |_| : (envp_count += 1) {}
- const envp = @ptrCast(&&u8, envp_nullable)[0..envp_count];
+ const envp = @ptrCast(**u8, envp_nullable)[0..envp_count];
if (builtin.os == builtin.Os.linux) {
- const auxv = &@ptrCast(&usize, envp.ptr)[envp_count + 1];
+ const auxv = &@ptrCast(*usize, envp.ptr)[envp_count + 1];
var i: usize = 0;
while (auxv[i] != 0) : (i += 2) {
if (auxv[i] < std.os.linux_aux_raw.len) std.os.linux_aux_raw[auxv[i]] = auxv[i + 1];
@@ -68,16 +68,16 @@ fn posixCallMainAndExit() noreturn {
std.os.posix.exit(callMainWithArgs(argc, argv, envp));
}
-fn callMainWithArgs(argc: usize, argv: &&u8, envp: []&u8) u8 {
+fn callMainWithArgs(argc: usize, argv: **u8, envp: []*u8) u8 {
std.os.ArgIteratorPosix.raw = argv[0..argc];
std.os.posix_environ_raw = envp;
return callMain();
}
-extern fn main(c_argc: i32, c_argv: &&u8, c_envp: &?&u8) i32 {
+extern fn main(c_argc: i32, c_argv: **u8, c_envp: *?*u8) i32 {
var env_count: usize = 0;
while (c_envp[env_count] != null) : (env_count += 1) {}
- const envp = @ptrCast(&&u8, c_envp)[0..env_count];
+ const envp = @ptrCast(**u8, c_envp)[0..env_count];
return callMainWithArgs(usize(c_argc), c_argv, envp);
}
diff --git a/std/special/build_file_template.zig b/std/special/build_file_template.zig
index 1c06c93cdc..1e3eb01136 100644
--- a/std/special/build_file_template.zig
+++ b/std/special/build_file_template.zig
@@ -1,10 +1,10 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const mode = b.standardReleaseOptions();
const exe = b.addExecutable("YOUR_NAME_HERE", "src/main.zig");
exe.setBuildMode(mode);
- b.default_step.dependOn(&exe.step);
+ b.default_step.dependOn(*exe.step);
b.installArtifact(exe);
}
diff --git a/std/special/build_runner.zig b/std/special/build_runner.zig
index 3ff11bbee4..3471d6ed21 100644
--- a/std/special/build_runner.zig
+++ b/std/special/build_runner.zig
@@ -129,7 +129,7 @@ pub fn main() !void {
};
}
-fn runBuild(builder: &Builder) error!void {
+fn runBuild(builder: *Builder) error!void {
switch (@typeId(@typeOf(root.build).ReturnType)) {
builtin.TypeId.Void => root.build(builder),
builtin.TypeId.ErrorUnion => try root.build(builder),
@@ -137,7 +137,7 @@ fn runBuild(builder: &Builder) error!void {
}
}
-fn usage(builder: &Builder, already_ran_build: bool, out_stream: var) !void {
+fn usage(builder: *Builder, already_ran_build: bool, out_stream: var) !void {
// run the build script to collect the options
if (!already_ran_build) {
builder.setInstallPrefix(null);
@@ -195,7 +195,7 @@ fn usage(builder: &Builder, already_ran_build: bool, out_stream: var) !void {
);
}
-fn usageAndErr(builder: &Builder, already_ran_build: bool, out_stream: var) error {
+fn usageAndErr(builder: *Builder, already_ran_build: bool, out_stream: var) error {
usage(builder, already_ran_build, out_stream) catch {};
return error.InvalidArgs;
}
diff --git a/std/special/builtin.zig b/std/special/builtin.zig
index 63149d5161..9c9cd35103 100644
--- a/std/special/builtin.zig
+++ b/std/special/builtin.zig
@@ -5,7 +5,7 @@ const builtin = @import("builtin");
// Avoid dragging in the runtime safety mechanisms into this .o file,
// unless we're trying to test this file.
-pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn {
+pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
if (builtin.is_test) {
@setCold(true);
@import("std").debug.panic("{}", msg);
@@ -14,7 +14,7 @@ pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn
}
}
-export fn memset(dest: ?&u8, c: u8, n: usize) ?&u8 {
+export fn memset(dest: ?*u8, c: u8, n: usize) ?*u8 {
@setRuntimeSafety(false);
var index: usize = 0;
@@ -24,7 +24,7 @@ export fn memset(dest: ?&u8, c: u8, n: usize) ?&u8 {
return dest;
}
-export fn memcpy(noalias dest: ?&u8, noalias src: ?&const u8, n: usize) ?&u8 {
+export fn memcpy(noalias dest: ?*u8, noalias src: ?*const u8, n: usize) ?*u8 {
@setRuntimeSafety(false);
var index: usize = 0;
@@ -34,7 +34,7 @@ export fn memcpy(noalias dest: ?&u8, noalias src: ?&const u8, n: usize) ?&u8 {
return dest;
}
-export fn memmove(dest: ?&u8, src: ?&const u8, n: usize) ?&u8 {
+export fn memmove(dest: ?*u8, src: ?*const u8, n: usize) ?*u8 {
@setRuntimeSafety(false);
if (@ptrToInt(dest) < @ptrToInt(src)) {
diff --git a/std/special/compiler_rt/index.zig b/std/special/compiler_rt/index.zig
index 3e014d4d16..d328324320 100644
--- a/std/special/compiler_rt/index.zig
+++ b/std/special/compiler_rt/index.zig
@@ -78,7 +78,7 @@ const __udivmoddi4 = @import("udivmoddi4.zig").__udivmoddi4;
// Avoid dragging in the runtime safety mechanisms into this .o file,
// unless we're trying to test this file.
-pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn {
+pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
@setCold(true);
if (is_test) {
std.debug.panic("{}", msg);
@@ -284,7 +284,7 @@ nakedcc fn ___chkstk_ms() align(4) void {
);
}
-extern fn __udivmodsi4(a: u32, b: u32, rem: &u32) u32 {
+extern fn __udivmodsi4(a: u32, b: u32, rem: *u32) u32 {
@setRuntimeSafety(is_test);
const d = __udivsi3(a, b);
diff --git a/std/special/compiler_rt/udivmod.zig b/std/special/compiler_rt/udivmod.zig
index 0dee5e45f6..894dd02239 100644
--- a/std/special/compiler_rt/udivmod.zig
+++ b/std/special/compiler_rt/udivmod.zig
@@ -7,15 +7,15 @@ const low = switch (builtin.endian) {
};
const high = 1 - low;
-pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?&DoubleInt) DoubleInt {
+pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?*DoubleInt) DoubleInt {
@setRuntimeSafety(is_test);
const SingleInt = @IntType(false, @divExact(DoubleInt.bit_count, 2));
const SignedDoubleInt = @IntType(true, DoubleInt.bit_count);
const Log2SingleInt = @import("std").math.Log2Int(SingleInt);
- const n = @ptrCast(&const [2]SingleInt, &a).*; // TODO issue #421
- const d = @ptrCast(&const [2]SingleInt, &b).*; // TODO issue #421
+ const n = @ptrCast(*const [2]SingleInt, &a).*; // TODO issue #421
+ const d = @ptrCast(*const [2]SingleInt, &b).*; // TODO issue #421
var q: [2]SingleInt = undefined;
var r: [2]SingleInt = undefined;
var sr: c_uint = undefined;
@@ -57,7 +57,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
if (maybe_rem) |rem| {
r[high] = n[high] % d[high];
r[low] = 0;
- rem.* = @ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
+ rem.* = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
}
return n[high] / d[high];
}
@@ -69,7 +69,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
if (maybe_rem) |rem| {
r[low] = n[low];
r[high] = n[high] & (d[high] - 1);
- rem.* = @ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
+ rem.* = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
}
return n[high] >> Log2SingleInt(@ctz(d[high]));
}
@@ -109,7 +109,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
sr = @ctz(d[low]);
q[high] = n[high] >> Log2SingleInt(sr);
q[low] = (n[high] << Log2SingleInt(SingleInt.bit_count - sr)) | (n[low] >> Log2SingleInt(sr));
- return @ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &q[0]).*; // TODO issue #421
+ return @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*; // TODO issue #421
}
// K X
// ---
@@ -183,13 +183,13 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// r.all -= b;
// carry = 1;
// }
- r_all = @ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
+ r_all = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
const s: SignedDoubleInt = SignedDoubleInt(b -% r_all -% 1) >> (DoubleInt.bit_count - 1);
carry = u32(s & 1);
r_all -= b & @bitCast(DoubleInt, s);
- r = @ptrCast(&[2]SingleInt, &r_all).*; // TODO issue #421
+ r = @ptrCast(*[2]SingleInt, &r_all).*; // TODO issue #421
}
- const q_all = ((@ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &q[0]).*) << 1) | carry; // TODO issue #421
+ const q_all = ((@ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*) << 1) | carry; // TODO issue #421
if (maybe_rem) |rem| {
rem.* = r_all;
}
diff --git a/std/special/compiler_rt/udivmoddi4.zig b/std/special/compiler_rt/udivmoddi4.zig
index 6cc54bb6bf..de86c845e5 100644
--- a/std/special/compiler_rt/udivmoddi4.zig
+++ b/std/special/compiler_rt/udivmoddi4.zig
@@ -1,7 +1,7 @@
const udivmod = @import("udivmod.zig").udivmod;
const builtin = @import("builtin");
-pub extern fn __udivmoddi4(a: u64, b: u64, maybe_rem: ?&u64) u64 {
+pub extern fn __udivmoddi4(a: u64, b: u64, maybe_rem: ?*u64) u64 {
@setRuntimeSafety(builtin.is_test);
return udivmod(u64, a, b, maybe_rem);
}
diff --git a/std/special/compiler_rt/udivmodti4.zig b/std/special/compiler_rt/udivmodti4.zig
index 816f82b900..3fa596442f 100644
--- a/std/special/compiler_rt/udivmodti4.zig
+++ b/std/special/compiler_rt/udivmodti4.zig
@@ -2,12 +2,12 @@ const udivmod = @import("udivmod.zig").udivmod;
const builtin = @import("builtin");
const compiler_rt = @import("index.zig");
-pub extern fn __udivmodti4(a: u128, b: u128, maybe_rem: ?&u128) u128 {
+pub extern fn __udivmodti4(a: u128, b: u128, maybe_rem: ?*u128) u128 {
@setRuntimeSafety(builtin.is_test);
return udivmod(u128, a, b, maybe_rem);
}
-pub extern fn __udivmodti4_windows_x86_64(a: &const u128, b: &const u128, maybe_rem: ?&u128) void {
+pub extern fn __udivmodti4_windows_x86_64(a: *const u128, b: *const u128, maybe_rem: ?*u128) void {
@setRuntimeSafety(builtin.is_test);
compiler_rt.setXmm0(u128, udivmod(u128, a.*, b.*, maybe_rem));
}
diff --git a/std/special/compiler_rt/udivti3.zig b/std/special/compiler_rt/udivti3.zig
index ad0f09e733..510e21ac1d 100644
--- a/std/special/compiler_rt/udivti3.zig
+++ b/std/special/compiler_rt/udivti3.zig
@@ -6,7 +6,7 @@ pub extern fn __udivti3(a: u128, b: u128) u128 {
return udivmodti4.__udivmodti4(a, b, null);
}
-pub extern fn __udivti3_windows_x86_64(a: &const u128, b: &const u128) void {
+pub extern fn __udivti3_windows_x86_64(a: *const u128, b: *const u128) void {
@setRuntimeSafety(builtin.is_test);
udivmodti4.__udivmodti4_windows_x86_64(a, b, null);
}
diff --git a/std/special/compiler_rt/umodti3.zig b/std/special/compiler_rt/umodti3.zig
index 11e2955bb3..9551e63a6f 100644
--- a/std/special/compiler_rt/umodti3.zig
+++ b/std/special/compiler_rt/umodti3.zig
@@ -9,7 +9,7 @@ pub extern fn __umodti3(a: u128, b: u128) u128 {
return r;
}
-pub extern fn __umodti3_windows_x86_64(a: &const u128, b: &const u128) void {
+pub extern fn __umodti3_windows_x86_64(a: *const u128, b: *const u128) void {
@setRuntimeSafety(builtin.is_test);
compiler_rt.setXmm0(u128, __umodti3(a.*, b.*));
}
diff --git a/std/special/panic.zig b/std/special/panic.zig
index 8f933ddd97..ca1caea73c 100644
--- a/std/special/panic.zig
+++ b/std/special/panic.zig
@@ -6,7 +6,7 @@
const builtin = @import("builtin");
const std = @import("std");
-pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn {
+pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
@setCold(true);
switch (builtin.os) {
// TODO: fix panic in zen.
diff --git a/std/unicode.zig b/std/unicode.zig
index 36f04778f4..3d1bebdb55 100644
--- a/std/unicode.zig
+++ b/std/unicode.zig
@@ -196,7 +196,7 @@ pub const Utf8View = struct {
}
}
- pub fn iterator(s: &const Utf8View) Utf8Iterator {
+ pub fn iterator(s: *const Utf8View) Utf8Iterator {
return Utf8Iterator{
.bytes = s.bytes,
.i = 0,
@@ -208,7 +208,7 @@ const Utf8Iterator = struct {
bytes: []const u8,
i: usize,
- pub fn nextCodepointSlice(it: &Utf8Iterator) ?[]const u8 {
+ pub fn nextCodepointSlice(it: *Utf8Iterator) ?[]const u8 {
if (it.i >= it.bytes.len) {
return null;
}
@@ -219,7 +219,7 @@ const Utf8Iterator = struct {
return it.bytes[it.i - cp_len .. it.i];
}
- pub fn nextCodepoint(it: &Utf8Iterator) ?u32 {
+ pub fn nextCodepoint(it: *Utf8Iterator) ?u32 {
const slice = it.nextCodepointSlice() ?? return null;
switch (slice.len) {
diff --git a/std/zig/ast.zig b/std/zig/ast.zig
index 56d4f9c393..4d25ceb7db 100644
--- a/std/zig/ast.zig
+++ b/std/zig/ast.zig
@@ -9,26 +9,26 @@ pub const TokenIndex = usize;
pub const Tree = struct {
source: []const u8,
tokens: TokenList,
- root_node: &Node.Root,
+ root_node: *Node.Root,
arena_allocator: std.heap.ArenaAllocator,
errors: ErrorList,
pub const TokenList = SegmentedList(Token, 64);
pub const ErrorList = SegmentedList(Error, 0);
- pub fn deinit(self: &Tree) void {
+ pub fn deinit(self: *Tree) void {
self.arena_allocator.deinit();
}
- pub fn renderError(self: &Tree, parse_error: &Error, stream: var) !void {
+ pub fn renderError(self: *Tree, parse_error: *Error, stream: var) !void {
return parse_error.render(&self.tokens, stream);
}
- pub fn tokenSlice(self: &Tree, token_index: TokenIndex) []const u8 {
+ pub fn tokenSlice(self: *Tree, token_index: TokenIndex) []const u8 {
return self.tokenSlicePtr(self.tokens.at(token_index));
}
- pub fn tokenSlicePtr(self: &Tree, token: &const Token) []const u8 {
+ pub fn tokenSlicePtr(self: *Tree, token: *const Token) []const u8 {
return self.source[token.start..token.end];
}
@@ -39,7 +39,7 @@ pub const Tree = struct {
line_end: usize,
};
- pub fn tokenLocationPtr(self: &Tree, start_index: usize, token: &const Token) Location {
+ pub fn tokenLocationPtr(self: *Tree, start_index: usize, token: *const Token) Location {
var loc = Location{
.line = 0,
.column = 0,
@@ -64,24 +64,24 @@ pub const Tree = struct {
return loc;
}
- pub fn tokenLocation(self: &Tree, start_index: usize, token_index: TokenIndex) Location {
+ pub fn tokenLocation(self: *Tree, start_index: usize, token_index: TokenIndex) Location {
return self.tokenLocationPtr(start_index, self.tokens.at(token_index));
}
- pub fn tokensOnSameLine(self: &Tree, token1_index: TokenIndex, token2_index: TokenIndex) bool {
+ pub fn tokensOnSameLine(self: *Tree, token1_index: TokenIndex, token2_index: TokenIndex) bool {
return self.tokensOnSameLinePtr(self.tokens.at(token1_index), self.tokens.at(token2_index));
}
- pub fn tokensOnSameLinePtr(self: &Tree, token1: &const Token, token2: &const Token) bool {
+ pub fn tokensOnSameLinePtr(self: *Tree, token1: *const Token, token2: *const Token) bool {
return mem.indexOfScalar(u8, self.source[token1.end..token2.start], '\n') == null;
}
- pub fn dump(self: &Tree) void {
+ pub fn dump(self: *Tree) void {
self.root_node.base.dump(0);
}
/// Skips over comments
- pub fn prevToken(self: &Tree, token_index: TokenIndex) TokenIndex {
+ pub fn prevToken(self: *Tree, token_index: TokenIndex) TokenIndex {
var index = token_index - 1;
while (self.tokens.at(index).id == Token.Id.LineComment) {
index -= 1;
@@ -90,7 +90,7 @@ pub const Tree = struct {
}
/// Skips over comments
- pub fn nextToken(self: &Tree, token_index: TokenIndex) TokenIndex {
+ pub fn nextToken(self: *Tree, token_index: TokenIndex) TokenIndex {
var index = token_index + 1;
while (self.tokens.at(index).id == Token.Id.LineComment) {
index += 1;
@@ -120,7 +120,7 @@ pub const Error = union(enum) {
ExpectedToken: ExpectedToken,
ExpectedCommaOrEnd: ExpectedCommaOrEnd,
- pub fn render(self: &const Error, tokens: &Tree.TokenList, stream: var) !void {
+ pub fn render(self: *const Error, tokens: *Tree.TokenList, stream: var) !void {
switch (self.*) {
// TODO https://github.com/ziglang/zig/issues/683
@TagType(Error).InvalidToken => |*x| return x.render(tokens, stream),
@@ -145,7 +145,7 @@ pub const Error = union(enum) {
}
}
- pub fn loc(self: &const Error) TokenIndex {
+ pub fn loc(self: *const Error) TokenIndex {
switch (self.*) {
// TODO https://github.com/ziglang/zig/issues/683
@TagType(Error).InvalidToken => |x| return x.token,
@@ -188,17 +188,17 @@ pub const Error = union(enum) {
pub const ExtraVolatileQualifier = SimpleError("Extra volatile qualifier");
pub const ExpectedCall = struct {
- node: &Node,
+ node: *Node,
- pub fn render(self: &const ExpectedCall, tokens: &Tree.TokenList, stream: var) !void {
+ pub fn render(self: *const ExpectedCall, tokens: *Tree.TokenList, stream: var) !void {
return stream.print("expected " ++ @tagName(@TagType(Node.SuffixOp.Op).Call) ++ ", found {}", @tagName(self.node.id));
}
};
pub const ExpectedCallOrFnProto = struct {
- node: &Node,
+ node: *Node,
- pub fn render(self: &const ExpectedCallOrFnProto, tokens: &Tree.TokenList, stream: var) !void {
+ pub fn render(self: *const ExpectedCallOrFnProto, tokens: *Tree.TokenList, stream: var) !void {
return stream.print("expected " ++ @tagName(@TagType(Node.SuffixOp.Op).Call) ++ " or " ++ @tagName(Node.Id.FnProto) ++ ", found {}", @tagName(self.node.id));
}
};
@@ -207,7 +207,7 @@ pub const Error = union(enum) {
token: TokenIndex,
expected_id: @TagType(Token.Id),
- pub fn render(self: &const ExpectedToken, tokens: &Tree.TokenList, stream: var) !void {
+ pub fn render(self: *const ExpectedToken, tokens: *Tree.TokenList, stream: var) !void {
const token_name = @tagName(tokens.at(self.token).id);
return stream.print("expected {}, found {}", @tagName(self.expected_id), token_name);
}
@@ -217,7 +217,7 @@ pub const Error = union(enum) {
token: TokenIndex,
end_id: @TagType(Token.Id),
- pub fn render(self: &const ExpectedCommaOrEnd, tokens: &Tree.TokenList, stream: var) !void {
+ pub fn render(self: *const ExpectedCommaOrEnd, tokens: *Tree.TokenList, stream: var) !void {
const token_name = @tagName(tokens.at(self.token).id);
return stream.print("expected ',' or {}, found {}", @tagName(self.end_id), token_name);
}
@@ -229,7 +229,7 @@ pub const Error = union(enum) {
token: TokenIndex,
- pub fn render(self: &const ThisError, tokens: &Tree.TokenList, stream: var) !void {
+ pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: var) !void {
const token_name = @tagName(tokens.at(self.token).id);
return stream.print(msg, token_name);
}
@@ -242,7 +242,7 @@ pub const Error = union(enum) {
token: TokenIndex,
- pub fn render(self: &const ThisError, tokens: &Tree.TokenList, stream: var) !void {
+ pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: var) !void {
return stream.write(msg);
}
};
@@ -320,14 +320,14 @@ pub const Node = struct {
FieldInitializer,
};
- pub fn cast(base: &Node, comptime T: type) ?&T {
+ pub fn cast(base: *Node, comptime T: type) ?*T {
if (base.id == comptime typeToId(T)) {
return @fieldParentPtr(T, "base", base);
}
return null;
}
- pub fn iterate(base: &Node, index: usize) ?&Node {
+ pub fn iterate(base: *Node, index: usize) ?*Node {
comptime var i = 0;
inline while (i < @memberCount(Id)) : (i += 1) {
if (base.id == @field(Id, @memberName(Id, i))) {
@@ -338,7 +338,7 @@ pub const Node = struct {
unreachable;
}
- pub fn firstToken(base: &Node) TokenIndex {
+ pub fn firstToken(base: *Node) TokenIndex {
comptime var i = 0;
inline while (i < @memberCount(Id)) : (i += 1) {
if (base.id == @field(Id, @memberName(Id, i))) {
@@ -349,7 +349,7 @@ pub const Node = struct {
unreachable;
}
- pub fn lastToken(base: &Node) TokenIndex {
+ pub fn lastToken(base: *Node) TokenIndex {
comptime var i = 0;
inline while (i < @memberCount(Id)) : (i += 1) {
if (base.id == @field(Id, @memberName(Id, i))) {
@@ -370,7 +370,7 @@ pub const Node = struct {
unreachable;
}
- pub fn requireSemiColon(base: &const Node) bool {
+ pub fn requireSemiColon(base: *const Node) bool {
var n = base;
while (true) {
switch (n.id) {
@@ -443,7 +443,7 @@ pub const Node = struct {
}
}
- pub fn dump(self: &Node, indent: usize) void {
+ pub fn dump(self: *Node, indent: usize) void {
{
var i: usize = 0;
while (i < indent) : (i += 1) {
@@ -460,44 +460,44 @@ pub const Node = struct {
pub const Root = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
decls: DeclList,
eof_token: TokenIndex,
- pub const DeclList = SegmentedList(&Node, 4);
+ pub const DeclList = SegmentedList(*Node, 4);
- pub fn iterate(self: &Root, index: usize) ?&Node {
+ pub fn iterate(self: *Root, index: usize) ?*Node {
if (index < self.decls.len) {
return self.decls.at(index).*;
}
return null;
}
- pub fn firstToken(self: &Root) TokenIndex {
+ pub fn firstToken(self: *Root) TokenIndex {
return if (self.decls.len == 0) self.eof_token else (self.decls.at(0).*).firstToken();
}
- pub fn lastToken(self: &Root) TokenIndex {
+ pub fn lastToken(self: *Root) TokenIndex {
return if (self.decls.len == 0) self.eof_token else (self.decls.at(self.decls.len - 1).*).lastToken();
}
};
pub const VarDecl = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
visib_token: ?TokenIndex,
name_token: TokenIndex,
eq_token: TokenIndex,
mut_token: TokenIndex,
comptime_token: ?TokenIndex,
extern_export_token: ?TokenIndex,
- lib_name: ?&Node,
- type_node: ?&Node,
- align_node: ?&Node,
- init_node: ?&Node,
+ lib_name: ?*Node,
+ type_node: ?*Node,
+ align_node: ?*Node,
+ init_node: ?*Node,
semicolon_token: TokenIndex,
- pub fn iterate(self: &VarDecl, index: usize) ?&Node {
+ pub fn iterate(self: *VarDecl, index: usize) ?*Node {
var i = index;
if (self.type_node) |type_node| {
@@ -518,7 +518,7 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &VarDecl) TokenIndex {
+ pub fn firstToken(self: *VarDecl) TokenIndex {
if (self.visib_token) |visib_token| return visib_token;
if (self.comptime_token) |comptime_token| return comptime_token;
if (self.extern_export_token) |extern_export_token| return extern_export_token;
@@ -526,20 +526,20 @@ pub const Node = struct {
return self.mut_token;
}
- pub fn lastToken(self: &VarDecl) TokenIndex {
+ pub fn lastToken(self: *VarDecl) TokenIndex {
return self.semicolon_token;
}
};
pub const Use = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
visib_token: ?TokenIndex,
use_token: TokenIndex,
- expr: &Node,
+ expr: *Node,
semicolon_token: TokenIndex,
- pub fn iterate(self: &Use, index: usize) ?&Node {
+ pub fn iterate(self: *Use, index: usize) ?*Node {
var i = index;
if (i < 1) return self.expr;
@@ -548,12 +548,12 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &Use) TokenIndex {
+ pub fn firstToken(self: *Use) TokenIndex {
if (self.visib_token) |visib_token| return visib_token;
return self.use_token;
}
- pub fn lastToken(self: &Use) TokenIndex {
+ pub fn lastToken(self: *Use) TokenIndex {
return self.semicolon_token;
}
};
@@ -564,9 +564,9 @@ pub const Node = struct {
decls: DeclList,
rbrace_token: TokenIndex,
- pub const DeclList = SegmentedList(&Node, 2);
+ pub const DeclList = SegmentedList(*Node, 2);
- pub fn iterate(self: &ErrorSetDecl, index: usize) ?&Node {
+ pub fn iterate(self: *ErrorSetDecl, index: usize) ?*Node {
var i = index;
if (i < self.decls.len) return self.decls.at(i).*;
@@ -575,11 +575,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &ErrorSetDecl) TokenIndex {
+ pub fn firstToken(self: *ErrorSetDecl) TokenIndex {
return self.error_token;
}
- pub fn lastToken(self: &ErrorSetDecl) TokenIndex {
+ pub fn lastToken(self: *ErrorSetDecl) TokenIndex {
return self.rbrace_token;
}
};
@@ -597,11 +597,11 @@ pub const Node = struct {
const InitArg = union(enum) {
None,
- Enum: ?&Node,
- Type: &Node,
+ Enum: ?*Node,
+ Type: *Node,
};
- pub fn iterate(self: &ContainerDecl, index: usize) ?&Node {
+ pub fn iterate(self: *ContainerDecl, index: usize) ?*Node {
var i = index;
switch (self.init_arg_expr) {
@@ -618,26 +618,26 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &ContainerDecl) TokenIndex {
+ pub fn firstToken(self: *ContainerDecl) TokenIndex {
if (self.layout_token) |layout_token| {
return layout_token;
}
return self.kind_token;
}
- pub fn lastToken(self: &ContainerDecl) TokenIndex {
+ pub fn lastToken(self: *ContainerDecl) TokenIndex {
return self.rbrace_token;
}
};
pub const StructField = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
visib_token: ?TokenIndex,
name_token: TokenIndex,
- type_expr: &Node,
+ type_expr: *Node,
- pub fn iterate(self: &StructField, index: usize) ?&Node {
+ pub fn iterate(self: *StructField, index: usize) ?*Node {
var i = index;
if (i < 1) return self.type_expr;
@@ -646,24 +646,24 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &StructField) TokenIndex {
+ pub fn firstToken(self: *StructField) TokenIndex {
if (self.visib_token) |visib_token| return visib_token;
return self.name_token;
}
- pub fn lastToken(self: &StructField) TokenIndex {
+ pub fn lastToken(self: *StructField) TokenIndex {
return self.type_expr.lastToken();
}
};
pub const UnionTag = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
name_token: TokenIndex,
- type_expr: ?&Node,
- value_expr: ?&Node,
+ type_expr: ?*Node,
+ value_expr: ?*Node,
- pub fn iterate(self: &UnionTag, index: usize) ?&Node {
+ pub fn iterate(self: *UnionTag, index: usize) ?*Node {
var i = index;
if (self.type_expr) |type_expr| {
@@ -679,11 +679,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &UnionTag) TokenIndex {
+ pub fn firstToken(self: *UnionTag) TokenIndex {
return self.name_token;
}
- pub fn lastToken(self: &UnionTag) TokenIndex {
+ pub fn lastToken(self: *UnionTag) TokenIndex {
if (self.value_expr) |value_expr| {
return value_expr.lastToken();
}
@@ -697,11 +697,11 @@ pub const Node = struct {
pub const EnumTag = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
name_token: TokenIndex,
- value: ?&Node,
+ value: ?*Node,
- pub fn iterate(self: &EnumTag, index: usize) ?&Node {
+ pub fn iterate(self: *EnumTag, index: usize) ?*Node {
var i = index;
if (self.value) |value| {
@@ -712,11 +712,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &EnumTag) TokenIndex {
+ pub fn firstToken(self: *EnumTag) TokenIndex {
return self.name_token;
}
- pub fn lastToken(self: &EnumTag) TokenIndex {
+ pub fn lastToken(self: *EnumTag) TokenIndex {
if (self.value) |value| {
return value.lastToken();
}
@@ -727,25 +727,25 @@ pub const Node = struct {
pub const ErrorTag = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
name_token: TokenIndex,
- pub fn iterate(self: &ErrorTag, index: usize) ?&Node {
+ pub fn iterate(self: *ErrorTag, index: usize) ?*Node {
var i = index;
if (self.doc_comments) |comments| {
- if (i < 1) return &comments.base;
+ if (i < 1) return *comments.base;
i -= 1;
}
return null;
}
- pub fn firstToken(self: &ErrorTag) TokenIndex {
+ pub fn firstToken(self: *ErrorTag) TokenIndex {
return self.name_token;
}
- pub fn lastToken(self: &ErrorTag) TokenIndex {
+ pub fn lastToken(self: *ErrorTag) TokenIndex {
return self.name_token;
}
};
@@ -754,15 +754,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &Identifier, index: usize) ?&Node {
+ pub fn iterate(self: *Identifier, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &Identifier) TokenIndex {
+ pub fn firstToken(self: *Identifier) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &Identifier) TokenIndex {
+ pub fn lastToken(self: *Identifier) TokenIndex {
return self.token;
}
};
@@ -770,10 +770,10 @@ pub const Node = struct {
pub const AsyncAttribute = struct {
base: Node,
async_token: TokenIndex,
- allocator_type: ?&Node,
+ allocator_type: ?*Node,
rangle_bracket: ?TokenIndex,
- pub fn iterate(self: &AsyncAttribute, index: usize) ?&Node {
+ pub fn iterate(self: *AsyncAttribute, index: usize) ?*Node {
var i = index;
if (self.allocator_type) |allocator_type| {
@@ -784,11 +784,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &AsyncAttribute) TokenIndex {
+ pub fn firstToken(self: *AsyncAttribute) TokenIndex {
return self.async_token;
}
- pub fn lastToken(self: &AsyncAttribute) TokenIndex {
+ pub fn lastToken(self: *AsyncAttribute) TokenIndex {
if (self.rangle_bracket) |rangle_bracket| {
return rangle_bracket;
}
@@ -799,7 +799,7 @@ pub const Node = struct {
pub const FnProto = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
visib_token: ?TokenIndex,
fn_token: TokenIndex,
name_token: ?TokenIndex,
@@ -808,19 +808,19 @@ pub const Node = struct {
var_args_token: ?TokenIndex,
extern_export_inline_token: ?TokenIndex,
cc_token: ?TokenIndex,
- async_attr: ?&AsyncAttribute,
- body_node: ?&Node,
- lib_name: ?&Node, // populated if this is an extern declaration
- align_expr: ?&Node, // populated if align(A) is present
+ async_attr: ?*AsyncAttribute,
+ body_node: ?*Node,
+ lib_name: ?*Node, // populated if this is an extern declaration
+ align_expr: ?*Node, // populated if align(A) is present
- pub const ParamList = SegmentedList(&Node, 2);
+ pub const ParamList = SegmentedList(*Node, 2);
pub const ReturnType = union(enum) {
- Explicit: &Node,
- InferErrorSet: &Node,
+ Explicit: *Node,
+ InferErrorSet: *Node,
};
- pub fn iterate(self: &FnProto, index: usize) ?&Node {
+ pub fn iterate(self: *FnProto, index: usize) ?*Node {
var i = index;
if (self.lib_name) |lib_name| {
@@ -856,7 +856,7 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &FnProto) TokenIndex {
+ pub fn firstToken(self: *FnProto) TokenIndex {
if (self.visib_token) |visib_token| return visib_token;
if (self.extern_export_inline_token) |extern_export_inline_token| return extern_export_inline_token;
assert(self.lib_name == null);
@@ -864,7 +864,7 @@ pub const Node = struct {
return self.fn_token;
}
- pub fn lastToken(self: &FnProto) TokenIndex {
+ pub fn lastToken(self: *FnProto) TokenIndex {
if (self.body_node) |body_node| return body_node.lastToken();
switch (self.return_type) {
// TODO allow this and next prong to share bodies since the types are the same
@@ -881,10 +881,10 @@ pub const Node = struct {
pub const Result = struct {
arrow_token: TokenIndex,
- return_type: &Node,
+ return_type: *Node,
};
- pub fn iterate(self: &PromiseType, index: usize) ?&Node {
+ pub fn iterate(self: *PromiseType, index: usize) ?*Node {
var i = index;
if (self.result) |result| {
@@ -895,11 +895,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &PromiseType) TokenIndex {
+ pub fn firstToken(self: *PromiseType) TokenIndex {
return self.promise_token;
}
- pub fn lastToken(self: &PromiseType) TokenIndex {
+ pub fn lastToken(self: *PromiseType) TokenIndex {
if (self.result) |result| return result.return_type.lastToken();
return self.promise_token;
}
@@ -910,10 +910,10 @@ pub const Node = struct {
comptime_token: ?TokenIndex,
noalias_token: ?TokenIndex,
name_token: ?TokenIndex,
- type_node: &Node,
+ type_node: *Node,
var_args_token: ?TokenIndex,
- pub fn iterate(self: &ParamDecl, index: usize) ?&Node {
+ pub fn iterate(self: *ParamDecl, index: usize) ?*Node {
var i = index;
if (i < 1) return self.type_node;
@@ -922,14 +922,14 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &ParamDecl) TokenIndex {
+ pub fn firstToken(self: *ParamDecl) TokenIndex {
if (self.comptime_token) |comptime_token| return comptime_token;
if (self.noalias_token) |noalias_token| return noalias_token;
if (self.name_token) |name_token| return name_token;
return self.type_node.firstToken();
}
- pub fn lastToken(self: &ParamDecl) TokenIndex {
+ pub fn lastToken(self: *ParamDecl) TokenIndex {
if (self.var_args_token) |var_args_token| return var_args_token;
return self.type_node.lastToken();
}
@@ -944,7 +944,7 @@ pub const Node = struct {
pub const StatementList = Root.DeclList;
- pub fn iterate(self: &Block, index: usize) ?&Node {
+ pub fn iterate(self: *Block, index: usize) ?*Node {
var i = index;
if (i < self.statements.len) return self.statements.at(i).*;
@@ -953,7 +953,7 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &Block) TokenIndex {
+ pub fn firstToken(self: *Block) TokenIndex {
if (self.label) |label| {
return label;
}
@@ -961,7 +961,7 @@ pub const Node = struct {
return self.lbrace;
}
- pub fn lastToken(self: &Block) TokenIndex {
+ pub fn lastToken(self: *Block) TokenIndex {
return self.rbrace;
}
};
@@ -970,14 +970,14 @@ pub const Node = struct {
base: Node,
defer_token: TokenIndex,
kind: Kind,
- expr: &Node,
+ expr: *Node,
const Kind = enum {
Error,
Unconditional,
};
- pub fn iterate(self: &Defer, index: usize) ?&Node {
+ pub fn iterate(self: *Defer, index: usize) ?*Node {
var i = index;
if (i < 1) return self.expr;
@@ -986,22 +986,22 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &Defer) TokenIndex {
+ pub fn firstToken(self: *Defer) TokenIndex {
return self.defer_token;
}
- pub fn lastToken(self: &Defer) TokenIndex {
+ pub fn lastToken(self: *Defer) TokenIndex {
return self.expr.lastToken();
}
};
pub const Comptime = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
comptime_token: TokenIndex,
- expr: &Node,
+ expr: *Node,
- pub fn iterate(self: &Comptime, index: usize) ?&Node {
+ pub fn iterate(self: *Comptime, index: usize) ?*Node {
var i = index;
if (i < 1) return self.expr;
@@ -1010,11 +1010,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &Comptime) TokenIndex {
+ pub fn firstToken(self: *Comptime) TokenIndex {
return self.comptime_token;
}
- pub fn lastToken(self: &Comptime) TokenIndex {
+ pub fn lastToken(self: *Comptime) TokenIndex {
return self.expr.lastToken();
}
};
@@ -1022,10 +1022,10 @@ pub const Node = struct {
pub const Payload = struct {
base: Node,
lpipe: TokenIndex,
- error_symbol: &Node,
+ error_symbol: *Node,
rpipe: TokenIndex,
- pub fn iterate(self: &Payload, index: usize) ?&Node {
+ pub fn iterate(self: *Payload, index: usize) ?*Node {
var i = index;
if (i < 1) return self.error_symbol;
@@ -1034,11 +1034,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &Payload) TokenIndex {
+ pub fn firstToken(self: *Payload) TokenIndex {
return self.lpipe;
}
- pub fn lastToken(self: &Payload) TokenIndex {
+ pub fn lastToken(self: *Payload) TokenIndex {
return self.rpipe;
}
};
@@ -1047,10 +1047,10 @@ pub const Node = struct {
base: Node,
lpipe: TokenIndex,
ptr_token: ?TokenIndex,
- value_symbol: &Node,
+ value_symbol: *Node,
rpipe: TokenIndex,
- pub fn iterate(self: &PointerPayload, index: usize) ?&Node {
+ pub fn iterate(self: *PointerPayload, index: usize) ?*Node {
var i = index;
if (i < 1) return self.value_symbol;
@@ -1059,11 +1059,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &PointerPayload) TokenIndex {
+ pub fn firstToken(self: *PointerPayload) TokenIndex {
return self.lpipe;
}
- pub fn lastToken(self: &PointerPayload) TokenIndex {
+ pub fn lastToken(self: *PointerPayload) TokenIndex {
return self.rpipe;
}
};
@@ -1072,11 +1072,11 @@ pub const Node = struct {
base: Node,
lpipe: TokenIndex,
ptr_token: ?TokenIndex,
- value_symbol: &Node,
- index_symbol: ?&Node,
+ value_symbol: *Node,
+ index_symbol: ?*Node,
rpipe: TokenIndex,
- pub fn iterate(self: &PointerIndexPayload, index: usize) ?&Node {
+ pub fn iterate(self: *PointerIndexPayload, index: usize) ?*Node {
var i = index;
if (i < 1) return self.value_symbol;
@@ -1090,11 +1090,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &PointerIndexPayload) TokenIndex {
+ pub fn firstToken(self: *PointerIndexPayload) TokenIndex {
return self.lpipe;
}
- pub fn lastToken(self: &PointerIndexPayload) TokenIndex {
+ pub fn lastToken(self: *PointerIndexPayload) TokenIndex {
return self.rpipe;
}
};
@@ -1102,10 +1102,10 @@ pub const Node = struct {
pub const Else = struct {
base: Node,
else_token: TokenIndex,
- payload: ?&Node,
- body: &Node,
+ payload: ?*Node,
+ body: *Node,
- pub fn iterate(self: &Else, index: usize) ?&Node {
+ pub fn iterate(self: *Else, index: usize) ?*Node {
var i = index;
if (self.payload) |payload| {
@@ -1119,11 +1119,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &Else) TokenIndex {
+ pub fn firstToken(self: *Else) TokenIndex {
return self.else_token;
}
- pub fn lastToken(self: &Else) TokenIndex {
+ pub fn lastToken(self: *Else) TokenIndex {
return self.body.lastToken();
}
};
@@ -1131,15 +1131,15 @@ pub const Node = struct {
pub const Switch = struct {
base: Node,
switch_token: TokenIndex,
- expr: &Node,
+ expr: *Node,
/// these must be SwitchCase nodes
cases: CaseList,
rbrace: TokenIndex,
- pub const CaseList = SegmentedList(&Node, 2);
+ pub const CaseList = SegmentedList(*Node, 2);
- pub fn iterate(self: &Switch, index: usize) ?&Node {
+ pub fn iterate(self: *Switch, index: usize) ?*Node {
var i = index;
if (i < 1) return self.expr;
@@ -1151,11 +1151,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &Switch) TokenIndex {
+ pub fn firstToken(self: *Switch) TokenIndex {
return self.switch_token;
}
- pub fn lastToken(self: &Switch) TokenIndex {
+ pub fn lastToken(self: *Switch) TokenIndex {
return self.rbrace;
}
};
@@ -1164,12 +1164,12 @@ pub const Node = struct {
base: Node,
items: ItemList,
arrow_token: TokenIndex,
- payload: ?&Node,
- expr: &Node,
+ payload: ?*Node,
+ expr: *Node,
- pub const ItemList = SegmentedList(&Node, 1);
+ pub const ItemList = SegmentedList(*Node, 1);
- pub fn iterate(self: &SwitchCase, index: usize) ?&Node {
+ pub fn iterate(self: *SwitchCase, index: usize) ?*Node {
var i = index;
if (i < self.items.len) return self.items.at(i).*;
@@ -1186,11 +1186,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &SwitchCase) TokenIndex {
+ pub fn firstToken(self: *SwitchCase) TokenIndex {
return (self.items.at(0).*).firstToken();
}
- pub fn lastToken(self: &SwitchCase) TokenIndex {
+ pub fn lastToken(self: *SwitchCase) TokenIndex {
return self.expr.lastToken();
}
};
@@ -1199,15 +1199,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &SwitchElse, index: usize) ?&Node {
+ pub fn iterate(self: *SwitchElse, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &SwitchElse) TokenIndex {
+ pub fn firstToken(self: *SwitchElse) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &SwitchElse) TokenIndex {
+ pub fn lastToken(self: *SwitchElse) TokenIndex {
return self.token;
}
};
@@ -1217,13 +1217,13 @@ pub const Node = struct {
label: ?TokenIndex,
inline_token: ?TokenIndex,
while_token: TokenIndex,
- condition: &Node,
- payload: ?&Node,
- continue_expr: ?&Node,
- body: &Node,
- @"else": ?&Else,
+ condition: *Node,
+ payload: ?*Node,
+ continue_expr: ?*Node,
+ body: *Node,
+ @"else": ?*Else,
- pub fn iterate(self: &While, index: usize) ?&Node {
+ pub fn iterate(self: *While, index: usize) ?*Node {
var i = index;
if (i < 1) return self.condition;
@@ -1243,14 +1243,14 @@ pub const Node = struct {
i -= 1;
if (self.@"else") |@"else"| {
- if (i < 1) return &@"else".base;
+ if (i < 1) return *@"else".base;
i -= 1;
}
return null;
}
- pub fn firstToken(self: &While) TokenIndex {
+ pub fn firstToken(self: *While) TokenIndex {
if (self.label) |label| {
return label;
}
@@ -1262,7 +1262,7 @@ pub const Node = struct {
return self.while_token;
}
- pub fn lastToken(self: &While) TokenIndex {
+ pub fn lastToken(self: *While) TokenIndex {
if (self.@"else") |@"else"| {
return @"else".body.lastToken();
}
@@ -1276,12 +1276,12 @@ pub const Node = struct {
label: ?TokenIndex,
inline_token: ?TokenIndex,
for_token: TokenIndex,
- array_expr: &Node,
- payload: ?&Node,
- body: &Node,
- @"else": ?&Else,
+ array_expr: *Node,
+ payload: ?*Node,
+ body: *Node,
+ @"else": ?*Else,
- pub fn iterate(self: &For, index: usize) ?&Node {
+ pub fn iterate(self: *For, index: usize) ?*Node {
var i = index;
if (i < 1) return self.array_expr;
@@ -1296,14 +1296,14 @@ pub const Node = struct {
i -= 1;
if (self.@"else") |@"else"| {
- if (i < 1) return &@"else".base;
+ if (i < 1) return *@"else".base;
i -= 1;
}
return null;
}
- pub fn firstToken(self: &For) TokenIndex {
+ pub fn firstToken(self: *For) TokenIndex {
if (self.label) |label| {
return label;
}
@@ -1315,7 +1315,7 @@ pub const Node = struct {
return self.for_token;
}
- pub fn lastToken(self: &For) TokenIndex {
+ pub fn lastToken(self: *For) TokenIndex {
if (self.@"else") |@"else"| {
return @"else".body.lastToken();
}
@@ -1327,12 +1327,12 @@ pub const Node = struct {
pub const If = struct {
base: Node,
if_token: TokenIndex,
- condition: &Node,
- payload: ?&Node,
- body: &Node,
- @"else": ?&Else,
+ condition: *Node,
+ payload: ?*Node,
+ body: *Node,
+ @"else": ?*Else,
- pub fn iterate(self: &If, index: usize) ?&Node {
+ pub fn iterate(self: *If, index: usize) ?*Node {
var i = index;
if (i < 1) return self.condition;
@@ -1347,18 +1347,18 @@ pub const Node = struct {
i -= 1;
if (self.@"else") |@"else"| {
- if (i < 1) return &@"else".base;
+ if (i < 1) return *@"else".base;
i -= 1;
}
return null;
}
- pub fn firstToken(self: &If) TokenIndex {
+ pub fn firstToken(self: *If) TokenIndex {
return self.if_token;
}
- pub fn lastToken(self: &If) TokenIndex {
+ pub fn lastToken(self: *If) TokenIndex {
if (self.@"else") |@"else"| {
return @"else".body.lastToken();
}
@@ -1370,9 +1370,9 @@ pub const Node = struct {
pub const InfixOp = struct {
base: Node,
op_token: TokenIndex,
- lhs: &Node,
+ lhs: *Node,
op: Op,
- rhs: &Node,
+ rhs: *Node,
pub const Op = union(enum) {
Add,
@@ -1401,7 +1401,7 @@ pub const Node = struct {
BitXor,
BoolAnd,
BoolOr,
- Catch: ?&Node,
+ Catch: ?*Node,
Div,
EqualEqual,
ErrorUnion,
@@ -1420,7 +1420,7 @@ pub const Node = struct {
UnwrapMaybe,
};
- pub fn iterate(self: &InfixOp, index: usize) ?&Node {
+ pub fn iterate(self: *InfixOp, index: usize) ?*Node {
var i = index;
if (i < 1) return self.lhs;
@@ -1485,11 +1485,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &InfixOp) TokenIndex {
+ pub fn firstToken(self: *InfixOp) TokenIndex {
return self.lhs.firstToken();
}
- pub fn lastToken(self: &InfixOp) TokenIndex {
+ pub fn lastToken(self: *InfixOp) TokenIndex {
return self.rhs.lastToken();
}
};
@@ -1498,11 +1498,11 @@ pub const Node = struct {
base: Node,
op_token: TokenIndex,
op: Op,
- rhs: &Node,
+ rhs: *Node,
pub const Op = union(enum) {
AddrOf: AddrOfInfo,
- ArrayType: &Node,
+ ArrayType: *Node,
Await,
BitNot,
BoolNot,
@@ -1523,17 +1523,17 @@ pub const Node = struct {
volatile_token: ?TokenIndex,
pub const Align = struct {
- node: &Node,
+ node: *Node,
bit_range: ?BitRange,
pub const BitRange = struct {
- start: &Node,
- end: &Node,
+ start: *Node,
+ end: *Node,
};
};
};
- pub fn iterate(self: &PrefixOp, index: usize) ?&Node {
+ pub fn iterate(self: *PrefixOp, index: usize) ?*Node {
var i = index;
switch (self.op) {
@@ -1573,11 +1573,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &PrefixOp) TokenIndex {
+ pub fn firstToken(self: *PrefixOp) TokenIndex {
return self.op_token;
}
- pub fn lastToken(self: &PrefixOp) TokenIndex {
+ pub fn lastToken(self: *PrefixOp) TokenIndex {
return self.rhs.lastToken();
}
};
@@ -1586,9 +1586,9 @@ pub const Node = struct {
base: Node,
period_token: TokenIndex,
name_token: TokenIndex,
- expr: &Node,
+ expr: *Node,
- pub fn iterate(self: &FieldInitializer, index: usize) ?&Node {
+ pub fn iterate(self: *FieldInitializer, index: usize) ?*Node {
var i = index;
if (i < 1) return self.expr;
@@ -1597,45 +1597,45 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &FieldInitializer) TokenIndex {
+ pub fn firstToken(self: *FieldInitializer) TokenIndex {
return self.period_token;
}
- pub fn lastToken(self: &FieldInitializer) TokenIndex {
+ pub fn lastToken(self: *FieldInitializer) TokenIndex {
return self.expr.lastToken();
}
};
pub const SuffixOp = struct {
base: Node,
- lhs: &Node,
+ lhs: *Node,
op: Op,
rtoken: TokenIndex,
pub const Op = union(enum) {
Call: Call,
- ArrayAccess: &Node,
+ ArrayAccess: *Node,
Slice: Slice,
ArrayInitializer: InitList,
StructInitializer: InitList,
Deref,
- pub const InitList = SegmentedList(&Node, 2);
+ pub const InitList = SegmentedList(*Node, 2);
pub const Call = struct {
params: ParamList,
- async_attr: ?&AsyncAttribute,
+ async_attr: ?*AsyncAttribute,
- pub const ParamList = SegmentedList(&Node, 2);
+ pub const ParamList = SegmentedList(*Node, 2);
};
pub const Slice = struct {
- start: &Node,
- end: ?&Node,
+ start: *Node,
+ end: ?*Node,
};
};
- pub fn iterate(self: &SuffixOp, index: usize) ?&Node {
+ pub fn iterate(self: *SuffixOp, index: usize) ?*Node {
var i = index;
if (i < 1) return self.lhs;
@@ -1673,7 +1673,7 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &SuffixOp) TokenIndex {
+ pub fn firstToken(self: *SuffixOp) TokenIndex {
switch (self.op) {
@TagType(Op).Call => |*call_info| if (call_info.async_attr) |async_attr| return async_attr.firstToken(),
else => {},
@@ -1681,7 +1681,7 @@ pub const Node = struct {
return self.lhs.firstToken();
}
- pub fn lastToken(self: &SuffixOp) TokenIndex {
+ pub fn lastToken(self: *SuffixOp) TokenIndex {
return self.rtoken;
}
};
@@ -1689,10 +1689,10 @@ pub const Node = struct {
pub const GroupedExpression = struct {
base: Node,
lparen: TokenIndex,
- expr: &Node,
+ expr: *Node,
rparen: TokenIndex,
- pub fn iterate(self: &GroupedExpression, index: usize) ?&Node {
+ pub fn iterate(self: *GroupedExpression, index: usize) ?*Node {
var i = index;
if (i < 1) return self.expr;
@@ -1701,11 +1701,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &GroupedExpression) TokenIndex {
+ pub fn firstToken(self: *GroupedExpression) TokenIndex {
return self.lparen;
}
- pub fn lastToken(self: &GroupedExpression) TokenIndex {
+ pub fn lastToken(self: *GroupedExpression) TokenIndex {
return self.rparen;
}
};
@@ -1714,15 +1714,15 @@ pub const Node = struct {
base: Node,
ltoken: TokenIndex,
kind: Kind,
- rhs: ?&Node,
+ rhs: ?*Node,
const Kind = union(enum) {
- Break: ?&Node,
- Continue: ?&Node,
+ Break: ?*Node,
+ Continue: ?*Node,
Return,
};
- pub fn iterate(self: &ControlFlowExpression, index: usize) ?&Node {
+ pub fn iterate(self: *ControlFlowExpression, index: usize) ?*Node {
var i = index;
switch (self.kind) {
@@ -1749,11 +1749,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &ControlFlowExpression) TokenIndex {
+ pub fn firstToken(self: *ControlFlowExpression) TokenIndex {
return self.ltoken;
}
- pub fn lastToken(self: &ControlFlowExpression) TokenIndex {
+ pub fn lastToken(self: *ControlFlowExpression) TokenIndex {
if (self.rhs) |rhs| {
return rhs.lastToken();
}
@@ -1780,10 +1780,10 @@ pub const Node = struct {
base: Node,
label: ?TokenIndex,
suspend_token: TokenIndex,
- payload: ?&Node,
- body: ?&Node,
+ payload: ?*Node,
+ body: ?*Node,
- pub fn iterate(self: &Suspend, index: usize) ?&Node {
+ pub fn iterate(self: *Suspend, index: usize) ?*Node {
var i = index;
if (self.payload) |payload| {
@@ -1799,12 +1799,12 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &Suspend) TokenIndex {
+ pub fn firstToken(self: *Suspend) TokenIndex {
if (self.label) |label| return label;
return self.suspend_token;
}
- pub fn lastToken(self: &Suspend) TokenIndex {
+ pub fn lastToken(self: *Suspend) TokenIndex {
if (self.body) |body| {
return body.lastToken();
}
@@ -1821,15 +1821,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &IntegerLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *IntegerLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &IntegerLiteral) TokenIndex {
+ pub fn firstToken(self: *IntegerLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &IntegerLiteral) TokenIndex {
+ pub fn lastToken(self: *IntegerLiteral) TokenIndex {
return self.token;
}
};
@@ -1838,15 +1838,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &FloatLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *FloatLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &FloatLiteral) TokenIndex {
+ pub fn firstToken(self: *FloatLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &FloatLiteral) TokenIndex {
+ pub fn lastToken(self: *FloatLiteral) TokenIndex {
return self.token;
}
};
@@ -1857,9 +1857,9 @@ pub const Node = struct {
params: ParamList,
rparen_token: TokenIndex,
- pub const ParamList = SegmentedList(&Node, 2);
+ pub const ParamList = SegmentedList(*Node, 2);
- pub fn iterate(self: &BuiltinCall, index: usize) ?&Node {
+ pub fn iterate(self: *BuiltinCall, index: usize) ?*Node {
var i = index;
if (i < self.params.len) return self.params.at(i).*;
@@ -1868,11 +1868,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &BuiltinCall) TokenIndex {
+ pub fn firstToken(self: *BuiltinCall) TokenIndex {
return self.builtin_token;
}
- pub fn lastToken(self: &BuiltinCall) TokenIndex {
+ pub fn lastToken(self: *BuiltinCall) TokenIndex {
return self.rparen_token;
}
};
@@ -1881,15 +1881,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &StringLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *StringLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &StringLiteral) TokenIndex {
+ pub fn firstToken(self: *StringLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &StringLiteral) TokenIndex {
+ pub fn lastToken(self: *StringLiteral) TokenIndex {
return self.token;
}
};
@@ -1900,15 +1900,15 @@ pub const Node = struct {
pub const LineList = SegmentedList(TokenIndex, 4);
- pub fn iterate(self: &MultilineStringLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *MultilineStringLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &MultilineStringLiteral) TokenIndex {
+ pub fn firstToken(self: *MultilineStringLiteral) TokenIndex {
return self.lines.at(0).*;
}
- pub fn lastToken(self: &MultilineStringLiteral) TokenIndex {
+ pub fn lastToken(self: *MultilineStringLiteral) TokenIndex {
return self.lines.at(self.lines.len - 1).*;
}
};
@@ -1917,15 +1917,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &CharLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *CharLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &CharLiteral) TokenIndex {
+ pub fn firstToken(self: *CharLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &CharLiteral) TokenIndex {
+ pub fn lastToken(self: *CharLiteral) TokenIndex {
return self.token;
}
};
@@ -1934,15 +1934,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &BoolLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *BoolLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &BoolLiteral) TokenIndex {
+ pub fn firstToken(self: *BoolLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &BoolLiteral) TokenIndex {
+ pub fn lastToken(self: *BoolLiteral) TokenIndex {
return self.token;
}
};
@@ -1951,15 +1951,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &NullLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *NullLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &NullLiteral) TokenIndex {
+ pub fn firstToken(self: *NullLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &NullLiteral) TokenIndex {
+ pub fn lastToken(self: *NullLiteral) TokenIndex {
return self.token;
}
};
@@ -1968,15 +1968,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &UndefinedLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *UndefinedLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &UndefinedLiteral) TokenIndex {
+ pub fn firstToken(self: *UndefinedLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &UndefinedLiteral) TokenIndex {
+ pub fn lastToken(self: *UndefinedLiteral) TokenIndex {
return self.token;
}
};
@@ -1985,15 +1985,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &ThisLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *ThisLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &ThisLiteral) TokenIndex {
+ pub fn firstToken(self: *ThisLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &ThisLiteral) TokenIndex {
+ pub fn lastToken(self: *ThisLiteral) TokenIndex {
return self.token;
}
};
@@ -2001,17 +2001,17 @@ pub const Node = struct {
pub const AsmOutput = struct {
base: Node,
lbracket: TokenIndex,
- symbolic_name: &Node,
- constraint: &Node,
+ symbolic_name: *Node,
+ constraint: *Node,
kind: Kind,
rparen: TokenIndex,
const Kind = union(enum) {
- Variable: &Identifier,
- Return: &Node,
+ Variable: *Identifier,
+ Return: *Node,
};
- pub fn iterate(self: &AsmOutput, index: usize) ?&Node {
+ pub fn iterate(self: *AsmOutput, index: usize) ?*Node {
var i = index;
if (i < 1) return self.symbolic_name;
@@ -2022,7 +2022,7 @@ pub const Node = struct {
switch (self.kind) {
Kind.Variable => |variable_name| {
- if (i < 1) return &variable_name.base;
+ if (i < 1) return *variable_name.base;
i -= 1;
},
Kind.Return => |return_type| {
@@ -2034,11 +2034,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &AsmOutput) TokenIndex {
+ pub fn firstToken(self: *AsmOutput) TokenIndex {
return self.lbracket;
}
- pub fn lastToken(self: &AsmOutput) TokenIndex {
+ pub fn lastToken(self: *AsmOutput) TokenIndex {
return self.rparen;
}
};
@@ -2046,12 +2046,12 @@ pub const Node = struct {
pub const AsmInput = struct {
base: Node,
lbracket: TokenIndex,
- symbolic_name: &Node,
- constraint: &Node,
- expr: &Node,
+ symbolic_name: *Node,
+ constraint: *Node,
+ expr: *Node,
rparen: TokenIndex,
- pub fn iterate(self: &AsmInput, index: usize) ?&Node {
+ pub fn iterate(self: *AsmInput, index: usize) ?*Node {
var i = index;
if (i < 1) return self.symbolic_name;
@@ -2066,11 +2066,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &AsmInput) TokenIndex {
+ pub fn firstToken(self: *AsmInput) TokenIndex {
return self.lbracket;
}
- pub fn lastToken(self: &AsmInput) TokenIndex {
+ pub fn lastToken(self: *AsmInput) TokenIndex {
return self.rparen;
}
};
@@ -2079,33 +2079,33 @@ pub const Node = struct {
base: Node,
asm_token: TokenIndex,
volatile_token: ?TokenIndex,
- template: &Node,
+ template: *Node,
outputs: OutputList,
inputs: InputList,
clobbers: ClobberList,
rparen: TokenIndex,
- const OutputList = SegmentedList(&AsmOutput, 2);
- const InputList = SegmentedList(&AsmInput, 2);
+ const OutputList = SegmentedList(*AsmOutput, 2);
+ const InputList = SegmentedList(*AsmInput, 2);
const ClobberList = SegmentedList(TokenIndex, 2);
- pub fn iterate(self: &Asm, index: usize) ?&Node {
+ pub fn iterate(self: *Asm, index: usize) ?*Node {
var i = index;
- if (i < self.outputs.len) return &(self.outputs.at(index).*).base;
+ if (i < self.outputs.len) return *(self.outputs.at(index).*).base;
i -= self.outputs.len;
- if (i < self.inputs.len) return &(self.inputs.at(index).*).base;
+ if (i < self.inputs.len) return *(self.inputs.at(index).*).base;
i -= self.inputs.len;
return null;
}
- pub fn firstToken(self: &Asm) TokenIndex {
+ pub fn firstToken(self: *Asm) TokenIndex {
return self.asm_token;
}
- pub fn lastToken(self: &Asm) TokenIndex {
+ pub fn lastToken(self: *Asm) TokenIndex {
return self.rparen;
}
};
@@ -2114,15 +2114,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &Unreachable, index: usize) ?&Node {
+ pub fn iterate(self: *Unreachable, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &Unreachable) TokenIndex {
+ pub fn firstToken(self: *Unreachable) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &Unreachable) TokenIndex {
+ pub fn lastToken(self: *Unreachable) TokenIndex {
return self.token;
}
};
@@ -2131,15 +2131,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &ErrorType, index: usize) ?&Node {
+ pub fn iterate(self: *ErrorType, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &ErrorType) TokenIndex {
+ pub fn firstToken(self: *ErrorType) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &ErrorType) TokenIndex {
+ pub fn lastToken(self: *ErrorType) TokenIndex {
return self.token;
}
};
@@ -2148,15 +2148,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &VarType, index: usize) ?&Node {
+ pub fn iterate(self: *VarType, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &VarType) TokenIndex {
+ pub fn firstToken(self: *VarType) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &VarType) TokenIndex {
+ pub fn lastToken(self: *VarType) TokenIndex {
return self.token;
}
};
@@ -2167,27 +2167,27 @@ pub const Node = struct {
pub const LineList = SegmentedList(TokenIndex, 4);
- pub fn iterate(self: &DocComment, index: usize) ?&Node {
+ pub fn iterate(self: *DocComment, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &DocComment) TokenIndex {
+ pub fn firstToken(self: *DocComment) TokenIndex {
return self.lines.at(0).*;
}
- pub fn lastToken(self: &DocComment) TokenIndex {
+ pub fn lastToken(self: *DocComment) TokenIndex {
return self.lines.at(self.lines.len - 1).*;
}
};
pub const TestDecl = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
test_token: TokenIndex,
- name: &Node,
- body_node: &Node,
+ name: *Node,
+ body_node: *Node,
- pub fn iterate(self: &TestDecl, index: usize) ?&Node {
+ pub fn iterate(self: *TestDecl, index: usize) ?*Node {
var i = index;
if (i < 1) return self.body_node;
@@ -2196,11 +2196,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &TestDecl) TokenIndex {
+ pub fn firstToken(self: *TestDecl) TokenIndex {
return self.test_token;
}
- pub fn lastToken(self: &TestDecl) TokenIndex {
+ pub fn lastToken(self: *TestDecl) TokenIndex {
return self.body_node.lastToken();
}
};
diff --git a/std/zig/bench.zig b/std/zig/bench.zig
index c3b6b0d3d3..59392889a6 100644
--- a/std/zig/bench.zig
+++ b/std/zig/bench.zig
@@ -24,15 +24,15 @@ pub fn main() !void {
const mb_per_sec = bytes_per_sec / (1024 * 1024);
var stdout_file = try std.io.getStdOut();
- const stdout = &std.io.FileOutStream.init(&stdout_file).stream;
+ const stdout = *std.io.FileOutStream.init(*stdout_file).stream;
try stdout.print("{.3} MB/s, {} KB used \n", mb_per_sec, memory_used / 1024);
}
fn testOnce() usize {
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- var allocator = &fixed_buf_alloc.allocator;
+ var allocator = *fixed_buf_alloc.allocator;
var tokenizer = Tokenizer.init(source);
- var parser = Parser.init(&tokenizer, allocator, "(memory buffer)");
+ var parser = Parser.init(*tokenizer, allocator, "(memory buffer)");
_ = parser.parse() catch @panic("parse failure");
return fixed_buf_alloc.end_index;
}
diff --git a/std/zig/parse.zig b/std/zig/parse.zig
index 05554f5d34..6d29300aed 100644
--- a/std/zig/parse.zig
+++ b/std/zig/parse.zig
@@ -9,7 +9,7 @@ const Error = ast.Error;
/// Result should be freed with tree.deinit() when there are
/// no more references to any of the tokens or nodes.
-pub fn parse(allocator: &mem.Allocator, source: []const u8) !ast.Tree {
+pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
var tree_arena = std.heap.ArenaAllocator.init(allocator);
errdefer tree_arena.deinit();
@@ -2754,16 +2754,16 @@ pub fn parse(allocator: &mem.Allocator, source: []const u8) !ast.Tree {
}
const AnnotatedToken = struct {
- ptr: &Token,
+ ptr: *Token,
index: TokenIndex,
};
const TopLevelDeclCtx = struct {
- decls: &ast.Node.Root.DeclList,
+ decls: *ast.Node.Root.DeclList,
visib_token: ?TokenIndex,
extern_export_inline_token: ?AnnotatedToken,
- lib_name: ?&ast.Node,
- comments: ?&ast.Node.DocComment,
+ lib_name: ?*ast.Node,
+ comments: ?*ast.Node.DocComment,
};
const VarDeclCtx = struct {
@@ -2771,21 +2771,21 @@ const VarDeclCtx = struct {
visib_token: ?TokenIndex,
comptime_token: ?TokenIndex,
extern_export_token: ?TokenIndex,
- lib_name: ?&ast.Node,
- list: &ast.Node.Root.DeclList,
- comments: ?&ast.Node.DocComment,
+ lib_name: ?*ast.Node,
+ list: *ast.Node.Root.DeclList,
+ comments: ?*ast.Node.DocComment,
};
const TopLevelExternOrFieldCtx = struct {
visib_token: TokenIndex,
- container_decl: &ast.Node.ContainerDecl,
- comments: ?&ast.Node.DocComment,
+ container_decl: *ast.Node.ContainerDecl,
+ comments: ?*ast.Node.DocComment,
};
const ExternTypeCtx = struct {
opt_ctx: OptionalCtx,
extern_token: TokenIndex,
- comments: ?&ast.Node.DocComment,
+ comments: ?*ast.Node.DocComment,
};
const ContainerKindCtx = struct {
@@ -2795,24 +2795,24 @@ const ContainerKindCtx = struct {
const ExpectTokenSave = struct {
id: @TagType(Token.Id),
- ptr: &TokenIndex,
+ ptr: *TokenIndex,
};
const OptionalTokenSave = struct {
id: @TagType(Token.Id),
- ptr: &?TokenIndex,
+ ptr: *?TokenIndex,
};
const ExprListCtx = struct {
- list: &ast.Node.SuffixOp.Op.InitList,
+ list: *ast.Node.SuffixOp.Op.InitList,
end: Token.Id,
- ptr: &TokenIndex,
+ ptr: *TokenIndex,
};
fn ListSave(comptime List: type) type {
return struct {
- list: &List,
- ptr: &TokenIndex,
+ list: *List,
+ ptr: *TokenIndex,
};
}
@@ -2841,7 +2841,7 @@ const LoopCtx = struct {
const AsyncEndCtx = struct {
ctx: OptionalCtx,
- attribute: &ast.Node.AsyncAttribute,
+ attribute: *ast.Node.AsyncAttribute,
};
const ErrorTypeOrSetDeclCtx = struct {
@@ -2850,21 +2850,21 @@ const ErrorTypeOrSetDeclCtx = struct {
};
const ParamDeclEndCtx = struct {
- fn_proto: &ast.Node.FnProto,
- param_decl: &ast.Node.ParamDecl,
+ fn_proto: *ast.Node.FnProto,
+ param_decl: *ast.Node.ParamDecl,
};
const ComptimeStatementCtx = struct {
comptime_token: TokenIndex,
- block: &ast.Node.Block,
+ block: *ast.Node.Block,
};
const OptionalCtx = union(enum) {
- Optional: &?&ast.Node,
- RequiredNull: &?&ast.Node,
- Required: &&ast.Node,
+ Optional: *?*ast.Node,
+ RequiredNull: *?*ast.Node,
+ Required: **ast.Node,
- pub fn store(self: &const OptionalCtx, value: &ast.Node) void {
+ pub fn store(self: *const OptionalCtx, value: *ast.Node) void {
switch (self.*) {
OptionalCtx.Optional => |ptr| ptr.* = value,
OptionalCtx.RequiredNull => |ptr| ptr.* = value,
@@ -2872,7 +2872,7 @@ const OptionalCtx = union(enum) {
}
}
- pub fn get(self: &const OptionalCtx) ?&ast.Node {
+ pub fn get(self: *const OptionalCtx) ?*ast.Node {
switch (self.*) {
OptionalCtx.Optional => |ptr| return ptr.*,
OptionalCtx.RequiredNull => |ptr| return ??ptr.*,
@@ -2880,7 +2880,7 @@ const OptionalCtx = union(enum) {
}
}
- pub fn toRequired(self: &const OptionalCtx) OptionalCtx {
+ pub fn toRequired(self: *const OptionalCtx) OptionalCtx {
switch (self.*) {
OptionalCtx.Optional => |ptr| {
return OptionalCtx{ .RequiredNull = ptr };
@@ -2892,8 +2892,8 @@ const OptionalCtx = union(enum) {
};
const AddCommentsCtx = struct {
- node_ptr: &&ast.Node,
- comments: ?&ast.Node.DocComment,
+ node_ptr: **ast.Node,
+ comments: ?*ast.Node.DocComment,
};
const State = union(enum) {
@@ -2904,67 +2904,67 @@ const State = union(enum) {
TopLevelExternOrField: TopLevelExternOrFieldCtx,
ContainerKind: ContainerKindCtx,
- ContainerInitArgStart: &ast.Node.ContainerDecl,
- ContainerInitArg: &ast.Node.ContainerDecl,
- ContainerDecl: &ast.Node.ContainerDecl,
+ ContainerInitArgStart: *ast.Node.ContainerDecl,
+ ContainerInitArg: *ast.Node.ContainerDecl,
+ ContainerDecl: *ast.Node.ContainerDecl,
VarDecl: VarDeclCtx,
- VarDeclAlign: &ast.Node.VarDecl,
- VarDeclEq: &ast.Node.VarDecl,
- VarDeclSemiColon: &ast.Node.VarDecl,
-
- FnDef: &ast.Node.FnProto,
- FnProto: &ast.Node.FnProto,
- FnProtoAlign: &ast.Node.FnProto,
- FnProtoReturnType: &ast.Node.FnProto,
-
- ParamDecl: &ast.Node.FnProto,
- ParamDeclAliasOrComptime: &ast.Node.ParamDecl,
- ParamDeclName: &ast.Node.ParamDecl,
+ VarDeclAlign: *ast.Node.VarDecl,
+ VarDeclEq: *ast.Node.VarDecl,
+ VarDeclSemiColon: *ast.Node.VarDecl,
+
+ FnDef: *ast.Node.FnProto,
+ FnProto: *ast.Node.FnProto,
+ FnProtoAlign: *ast.Node.FnProto,
+ FnProtoReturnType: *ast.Node.FnProto,
+
+ ParamDecl: *ast.Node.FnProto,
+ ParamDeclAliasOrComptime: *ast.Node.ParamDecl,
+ ParamDeclName: *ast.Node.ParamDecl,
ParamDeclEnd: ParamDeclEndCtx,
- ParamDeclComma: &ast.Node.FnProto,
+ ParamDeclComma: *ast.Node.FnProto,
MaybeLabeledExpression: MaybeLabeledExpressionCtx,
LabeledExpression: LabelCtx,
Inline: InlineCtx,
While: LoopCtx,
- WhileContinueExpr: &?&ast.Node,
+ WhileContinueExpr: *?*ast.Node,
For: LoopCtx,
- Else: &?&ast.Node.Else,
+ Else: *?*ast.Node.Else,
- Block: &ast.Node.Block,
- Statement: &ast.Node.Block,
+ Block: *ast.Node.Block,
+ Statement: *ast.Node.Block,
ComptimeStatement: ComptimeStatementCtx,
- Semicolon: &&ast.Node,
+ Semicolon: **ast.Node,
- AsmOutputItems: &ast.Node.Asm.OutputList,
- AsmOutputReturnOrType: &ast.Node.AsmOutput,
- AsmInputItems: &ast.Node.Asm.InputList,
- AsmClobberItems: &ast.Node.Asm.ClobberList,
+ AsmOutputItems: *ast.Node.Asm.OutputList,
+ AsmOutputReturnOrType: *ast.Node.AsmOutput,
+ AsmInputItems: *ast.Node.Asm.InputList,
+ AsmClobberItems: *ast.Node.Asm.ClobberList,
ExprListItemOrEnd: ExprListCtx,
ExprListCommaOrEnd: ExprListCtx,
FieldInitListItemOrEnd: ListSave(ast.Node.SuffixOp.Op.InitList),
FieldInitListCommaOrEnd: ListSave(ast.Node.SuffixOp.Op.InitList),
- FieldListCommaOrEnd: &ast.Node.ContainerDecl,
+ FieldListCommaOrEnd: *ast.Node.ContainerDecl,
FieldInitValue: OptionalCtx,
ErrorTagListItemOrEnd: ListSave(ast.Node.ErrorSetDecl.DeclList),
ErrorTagListCommaOrEnd: ListSave(ast.Node.ErrorSetDecl.DeclList),
SwitchCaseOrEnd: ListSave(ast.Node.Switch.CaseList),
SwitchCaseCommaOrEnd: ListSave(ast.Node.Switch.CaseList),
- SwitchCaseFirstItem: &ast.Node.SwitchCase,
- SwitchCaseItemCommaOrEnd: &ast.Node.SwitchCase,
- SwitchCaseItemOrEnd: &ast.Node.SwitchCase,
+ SwitchCaseFirstItem: *ast.Node.SwitchCase,
+ SwitchCaseItemCommaOrEnd: *ast.Node.SwitchCase,
+ SwitchCaseItemOrEnd: *ast.Node.SwitchCase,
- SuspendBody: &ast.Node.Suspend,
- AsyncAllocator: &ast.Node.AsyncAttribute,
+ SuspendBody: *ast.Node.Suspend,
+ AsyncAllocator: *ast.Node.AsyncAttribute,
AsyncEnd: AsyncEndCtx,
ExternType: ExternTypeCtx,
- SliceOrArrayAccess: &ast.Node.SuffixOp,
- SliceOrArrayType: &ast.Node.PrefixOp,
- AddrOfModifiers: &ast.Node.PrefixOp.AddrOfInfo,
- AlignBitRange: &ast.Node.PrefixOp.AddrOfInfo.Align,
+ SliceOrArrayAccess: *ast.Node.SuffixOp,
+ SliceOrArrayType: *ast.Node.PrefixOp,
+ AddrOfModifiers: *ast.Node.PrefixOp.AddrOfInfo,
+ AlignBitRange: *ast.Node.PrefixOp.AddrOfInfo.Align,
Payload: OptionalCtx,
PointerPayload: OptionalCtx,
@@ -3007,7 +3007,7 @@ const State = union(enum) {
ErrorTypeOrSetDecl: ErrorTypeOrSetDeclCtx,
StringLiteral: OptionalCtx,
Identifier: OptionalCtx,
- ErrorTag: &&ast.Node,
+ ErrorTag: **ast.Node,
IfToken: @TagType(Token.Id),
IfTokenSave: ExpectTokenSave,
@@ -3016,7 +3016,7 @@ const State = union(enum) {
OptionalTokenSave: OptionalTokenSave,
};
-fn pushDocComment(arena: &mem.Allocator, line_comment: TokenIndex, result: &?&ast.Node.DocComment) !void {
+fn pushDocComment(arena: *mem.Allocator, line_comment: TokenIndex, result: *?*ast.Node.DocComment) !void {
const node = blk: {
if (result.*) |comment_node| {
break :blk comment_node;
@@ -3032,8 +3032,8 @@ fn pushDocComment(arena: &mem.Allocator, line_comment: TokenIndex, result: &?&as
try node.lines.push(line_comment);
}
-fn eatDocComments(arena: &mem.Allocator, tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree) !?&ast.Node.DocComment {
- var result: ?&ast.Node.DocComment = null;
+fn eatDocComments(arena: *mem.Allocator, tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) !?*ast.Node.DocComment {
+ var result: ?*ast.Node.DocComment = null;
while (true) {
if (eatToken(tok_it, tree, Token.Id.DocComment)) |line_comment| {
try pushDocComment(arena, line_comment, &result);
@@ -3044,7 +3044,7 @@ fn eatDocComments(arena: &mem.Allocator, tok_it: &ast.Tree.TokenList.Iterator, t
return result;
}
-fn parseStringLiteral(arena: &mem.Allocator, tok_it: &ast.Tree.TokenList.Iterator, token_ptr: &const Token, token_index: TokenIndex, tree: &ast.Tree) !?&ast.Node {
+fn parseStringLiteral(arena: *mem.Allocator, tok_it: *ast.Tree.TokenList.Iterator, token_ptr: *const Token, token_index: TokenIndex, tree: *ast.Tree) !?*ast.Node {
switch (token_ptr.id) {
Token.Id.StringLiteral => {
return &(try createLiteral(arena, ast.Node.StringLiteral, token_index)).base;
@@ -3071,11 +3071,11 @@ fn parseStringLiteral(arena: &mem.Allocator, tok_it: &ast.Tree.TokenList.Iterato
},
// TODO: We shouldn't need a cast, but:
// zig: /home/jc/Documents/zig/src/ir.cpp:7962: TypeTableEntry* ir_resolve_peer_types(IrAnalyze*, AstNode*, IrInstruction**, size_t): Assertion `err_set_type != nullptr' failed.
- else => return (?&ast.Node)(null),
+ else => return (?*ast.Node)(null),
}
}
-fn parseBlockExpr(stack: &std.ArrayList(State), arena: &mem.Allocator, ctx: &const OptionalCtx, token_ptr: &const Token, token_index: TokenIndex) !bool {
+fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *const OptionalCtx, token_ptr: *const Token, token_index: TokenIndex) !bool {
switch (token_ptr.id) {
Token.Id.Keyword_suspend => {
const node = try arena.construct(ast.Node.Suspend{
@@ -3189,7 +3189,7 @@ const ExpectCommaOrEndResult = union(enum) {
parse_error: Error,
};
-fn expectCommaOrEnd(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree, end: @TagType(Token.Id)) ExpectCommaOrEndResult {
+fn expectCommaOrEnd(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree, end: @TagType(Token.Id)) ExpectCommaOrEndResult {
const token = nextToken(tok_it, tree);
const token_index = token.index;
const token_ptr = token.ptr;
@@ -3212,7 +3212,7 @@ fn expectCommaOrEnd(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree, end:
}
}
-fn tokenIdToAssignment(id: &const Token.Id) ?ast.Node.InfixOp.Op {
+fn tokenIdToAssignment(id: *const Token.Id) ?ast.Node.InfixOp.Op {
// TODO: We have to cast all cases because of this:
// error: expected type '?InfixOp', found '?@TagType(InfixOp)'
return switch (id.*) {
@@ -3307,21 +3307,21 @@ fn tokenIdToPrefixOp(id: @TagType(Token.Id)) ?ast.Node.PrefixOp.Op {
};
}
-fn createLiteral(arena: &mem.Allocator, comptime T: type, token_index: TokenIndex) !&T {
+fn createLiteral(arena: *mem.Allocator, comptime T: type, token_index: TokenIndex) !*T {
return arena.construct(T{
.base = ast.Node{ .id = ast.Node.typeToId(T) },
.token = token_index,
});
}
-fn createToCtxLiteral(arena: &mem.Allocator, opt_ctx: &const OptionalCtx, comptime T: type, token_index: TokenIndex) !&T {
+fn createToCtxLiteral(arena: *mem.Allocator, opt_ctx: *const OptionalCtx, comptime T: type, token_index: TokenIndex) !*T {
const node = try createLiteral(arena, T, token_index);
opt_ctx.store(&node.base);
return node;
}
-fn eatToken(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree, id: @TagType(Token.Id)) ?TokenIndex {
+fn eatToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree, id: @TagType(Token.Id)) ?TokenIndex {
const token = ??tok_it.peek();
if (token.id == id) {
@@ -3331,7 +3331,7 @@ fn eatToken(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree, id: @TagType(
return null;
}
-fn nextToken(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree) AnnotatedToken {
+fn nextToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) AnnotatedToken {
const result = AnnotatedToken{
.index = tok_it.index,
.ptr = ??tok_it.next(),
@@ -3345,7 +3345,7 @@ fn nextToken(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree) AnnotatedTok
}
}
-fn prevToken(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree) void {
+fn prevToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) void {
while (true) {
const prev_tok = tok_it.prev() ?? return;
if (prev_tok.id == Token.Id.LineComment) continue;
diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig
index 69903bc3fd..8507470bcc 100644
--- a/std/zig/parser_test.zig
+++ b/std/zig/parser_test.zig
@@ -1803,7 +1803,7 @@ const io = std.io;
var fixed_buffer_mem: [100 * 1024]u8 = undefined;
-fn testParse(source: []const u8, allocator: &mem.Allocator, anything_changed: &bool) ![]u8 {
+fn testParse(source: []const u8, allocator: *mem.Allocator, anything_changed: *bool) ![]u8 {
var stderr_file = try io.getStdErr();
var stderr = &io.FileOutStream.init(&stderr_file).stream;
diff --git a/std/zig/render.zig b/std/zig/render.zig
index ac07917ff1..07e01241b7 100644
--- a/std/zig/render.zig
+++ b/std/zig/render.zig
@@ -13,7 +13,7 @@ pub const Error = error{
};
/// Returns whether anything changed
-pub fn render(allocator: &mem.Allocator, stream: var, tree: &ast.Tree) (@typeOf(stream).Child.Error || Error)!bool {
+pub fn render(allocator: *mem.Allocator, stream: var, tree: *ast.Tree) (@typeOf(stream).Child.Error || Error)!bool {
comptime assert(@typeId(@typeOf(stream)) == builtin.TypeId.Pointer);
var anything_changed: bool = false;
@@ -24,13 +24,13 @@ pub fn render(allocator: &mem.Allocator, stream: var, tree: &ast.Tree) (@typeOf(
const StreamError = @typeOf(stream).Child.Error;
const Stream = std.io.OutStream(StreamError);
- anything_changed_ptr: &bool,
+ anything_changed_ptr: *bool,
child_stream: @typeOf(stream),
stream: Stream,
source_index: usize,
source: []const u8,
- fn write(iface_stream: &Stream, bytes: []const u8) StreamError!void {
+ fn write(iface_stream: *Stream, bytes: []const u8) StreamError!void {
const self = @fieldParentPtr(MyStream, "stream", iface_stream);
if (!self.anything_changed_ptr.*) {
@@ -63,9 +63,9 @@ pub fn render(allocator: &mem.Allocator, stream: var, tree: &ast.Tree) (@typeOf(
}
fn renderRoot(
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
stream: var,
- tree: &ast.Tree,
+ tree: *ast.Tree,
) (@typeOf(stream).Child.Error || Error)!void {
// render all the line comments at the beginning of the file
var tok_it = tree.tokens.iterator(0);
@@ -90,7 +90,7 @@ fn renderRoot(
}
}
-fn renderExtraNewline(tree: &ast.Tree, stream: var, start_col: &usize, node: &ast.Node) !void {
+fn renderExtraNewline(tree: *ast.Tree, stream: var, start_col: *usize, node: *ast.Node) !void {
const first_token = node.firstToken();
var prev_token = first_token;
while (tree.tokens.at(prev_token - 1).id == Token.Id.DocComment) {
@@ -104,7 +104,7 @@ fn renderExtraNewline(tree: &ast.Tree, stream: var, start_col: &usize, node: &as
}
}
-fn renderTopLevelDecl(allocator: &mem.Allocator, stream: var, tree: &ast.Tree, indent: usize, start_col: &usize, decl: &ast.Node) (@typeOf(stream).Child.Error || Error)!void {
+fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node) (@typeOf(stream).Child.Error || Error)!void {
switch (decl.id) {
ast.Node.Id.FnProto => {
const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
@@ -214,12 +214,12 @@ fn renderTopLevelDecl(allocator: &mem.Allocator, stream: var, tree: &ast.Tree, i
}
fn renderExpression(
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
stream: var,
- tree: &ast.Tree,
+ tree: *ast.Tree,
indent: usize,
- start_col: &usize,
- base: &ast.Node,
+ start_col: *usize,
+ base: *ast.Node,
space: Space,
) (@typeOf(stream).Child.Error || Error)!void {
switch (base.id) {
@@ -1640,12 +1640,12 @@ fn renderExpression(
}
fn renderVarDecl(
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
stream: var,
- tree: &ast.Tree,
+ tree: *ast.Tree,
indent: usize,
- start_col: &usize,
- var_decl: &ast.Node.VarDecl,
+ start_col: *usize,
+ var_decl: *ast.Node.VarDecl,
) (@typeOf(stream).Child.Error || Error)!void {
if (var_decl.visib_token) |visib_token| {
try renderToken(tree, stream, visib_token, indent, start_col, Space.Space); // pub
@@ -1696,12 +1696,12 @@ fn renderVarDecl(
}
fn renderParamDecl(
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
stream: var,
- tree: &ast.Tree,
+ tree: *ast.Tree,
indent: usize,
- start_col: &usize,
- base: &ast.Node,
+ start_col: *usize,
+ base: *ast.Node,
space: Space,
) (@typeOf(stream).Child.Error || Error)!void {
const param_decl = @fieldParentPtr(ast.Node.ParamDecl, "base", base);
@@ -1724,12 +1724,12 @@ fn renderParamDecl(
}
fn renderStatement(
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
stream: var,
- tree: &ast.Tree,
+ tree: *ast.Tree,
indent: usize,
- start_col: &usize,
- base: &ast.Node,
+ start_col: *usize,
+ base: *ast.Node,
) (@typeOf(stream).Child.Error || Error)!void {
switch (base.id) {
ast.Node.Id.VarDecl => {
@@ -1761,7 +1761,7 @@ const Space = enum {
BlockStart,
};
-fn renderToken(tree: &ast.Tree, stream: var, token_index: ast.TokenIndex, indent: usize, start_col: &usize, space: Space) (@typeOf(stream).Child.Error || Error)!void {
+fn renderToken(tree: *ast.Tree, stream: var, token_index: ast.TokenIndex, indent: usize, start_col: *usize, space: Space) (@typeOf(stream).Child.Error || Error)!void {
if (space == Space.BlockStart) {
if (start_col.* < indent + indent_delta)
return renderToken(tree, stream, token_index, indent, start_col, Space.Space);
@@ -1928,11 +1928,11 @@ fn renderToken(tree: &ast.Tree, stream: var, token_index: ast.TokenIndex, indent
}
fn renderDocComments(
- tree: &ast.Tree,
+ tree: *ast.Tree,
stream: var,
node: var,
indent: usize,
- start_col: &usize,
+ start_col: *usize,
) (@typeOf(stream).Child.Error || Error)!void {
const comment = node.doc_comments ?? return;
var it = comment.lines.iterator(0);
@@ -1949,7 +1949,7 @@ fn renderDocComments(
}
}
-fn nodeIsBlock(base: &const ast.Node) bool {
+fn nodeIsBlock(base: *const ast.Node) bool {
return switch (base.id) {
ast.Node.Id.Block,
ast.Node.Id.If,
@@ -1961,7 +1961,7 @@ fn nodeIsBlock(base: &const ast.Node) bool {
};
}
-fn nodeCausesSliceOpSpace(base: &ast.Node) bool {
+fn nodeCausesSliceOpSpace(base: *ast.Node) bool {
const infix_op = base.cast(ast.Node.InfixOp) ?? return false;
return switch (infix_op.op) {
ast.Node.InfixOp.Op.Period => false,
diff --git a/std/zig/tokenizer.zig b/std/zig/tokenizer.zig
index 7c3b3210fb..8378a9011d 100644
--- a/std/zig/tokenizer.zig
+++ b/std/zig/tokenizer.zig
@@ -200,7 +200,7 @@ pub const Tokenizer = struct {
pending_invalid_token: ?Token,
/// For debugging purposes
- pub fn dump(self: &Tokenizer, token: &const Token) void {
+ pub fn dump(self: *Tokenizer, token: *const Token) void {
std.debug.warn("{} \"{}\"\n", @tagName(token.id), self.buffer[token.start..token.end]);
}
@@ -265,7 +265,7 @@ pub const Tokenizer = struct {
SawAtSign,
};
- pub fn next(self: &Tokenizer) Token {
+ pub fn next(self: *Tokenizer) Token {
if (self.pending_invalid_token) |token| {
self.pending_invalid_token = null;
return token;
@@ -1089,7 +1089,7 @@ pub const Tokenizer = struct {
return result;
}
- fn checkLiteralCharacter(self: &Tokenizer) void {
+ fn checkLiteralCharacter(self: *Tokenizer) void {
if (self.pending_invalid_token != null) return;
const invalid_length = self.getInvalidCharacterLength();
if (invalid_length == 0) return;
@@ -1100,7 +1100,7 @@ pub const Tokenizer = struct {
};
}
- fn getInvalidCharacterLength(self: &Tokenizer) u3 {
+ fn getInvalidCharacterLength(self: *Tokenizer) u3 {
const c0 = self.buffer[self.index];
if (c0 < 0x80) {
if (c0 < 0x20 or c0 == 0x7f) {
diff --git a/test/assemble_and_link.zig b/test/assemble_and_link.zig
index 2593f3306a..8c727e87b5 100644
--- a/test/assemble_and_link.zig
+++ b/test/assemble_and_link.zig
@@ -1,7 +1,7 @@
const builtin = @import("builtin");
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.CompareOutputContext) void {
+pub fn addCases(cases: *tests.CompareOutputContext) void {
if (builtin.os == builtin.Os.linux and builtin.arch == builtin.Arch.x86_64) {
cases.addAsm("hello world linux x86_64",
\\.text
diff --git a/test/build_examples.zig b/test/build_examples.zig
index 7a4c0f35d9..1ba0ca46cf 100644
--- a/test/build_examples.zig
+++ b/test/build_examples.zig
@@ -2,7 +2,7 @@ const tests = @import("tests.zig");
const builtin = @import("builtin");
const is_windows = builtin.os == builtin.Os.windows;
-pub fn addCases(cases: &tests.BuildExamplesContext) void {
+pub fn addCases(cases: *tests.BuildExamplesContext) void {
cases.add("example/hello_world/hello.zig");
cases.addC("example/hello_world/hello_libc.zig");
cases.add("example/cat/main.zig");
diff --git a/test/cases/align.zig b/test/cases/align.zig
index 582063766f..99bdcdf940 100644
--- a/test/cases/align.zig
+++ b/test/cases/align.zig
@@ -5,7 +5,7 @@ var foo: u8 align(4) = 100;
test "global variable alignment" {
assert(@typeOf(&foo).alignment == 4);
- assert(@typeOf(&foo) == &align(4) u8);
+ assert(@typeOf(&foo) == *align(4) u8);
const slice = (&foo)[0..1];
assert(@typeOf(slice) == []align(4) u8);
}
@@ -30,7 +30,7 @@ var baz: packed struct {
} = undefined;
test "packed struct alignment" {
- assert(@typeOf(&baz.b) == &align(1) u32);
+ assert(@typeOf(&baz.b) == *align(1) u32);
}
const blah: packed struct {
@@ -40,11 +40,11 @@ const blah: packed struct {
} = undefined;
test "bit field alignment" {
- assert(@typeOf(&blah.b) == &align(1:3:6) const u3);
+ assert(@typeOf(&blah.b) == *align(1:3:6) const u3);
}
test "default alignment allows unspecified in type syntax" {
- assert(&u32 == &align(@alignOf(u32)) u32);
+ assert(*u32 == *align(@alignOf(u32)) u32);
}
test "implicitly decreasing pointer alignment" {
@@ -53,7 +53,7 @@ test "implicitly decreasing pointer alignment" {
assert(addUnaligned(&a, &b) == 7);
}
-fn addUnaligned(a: &align(1) const u32, b: &align(1) const u32) u32 {
+fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 {
return a.* + b.*;
}
@@ -76,7 +76,7 @@ fn testBytesAlign(b: u8) void {
b,
b,
};
- const ptr = @ptrCast(&u32, &bytes[0]);
+ const ptr = @ptrCast(*u32, &bytes[0]);
assert(ptr.* == 0x33333333);
}
@@ -99,10 +99,10 @@ test "@alignCast pointers" {
expectsOnly1(&x);
assert(x == 2);
}
-fn expectsOnly1(x: &align(1) u32) void {
+fn expectsOnly1(x: *align(1) u32) void {
expects4(@alignCast(4, x));
}
-fn expects4(x: &align(4) u32) void {
+fn expects4(x: *align(4) u32) void {
x.* += 1;
}
@@ -163,8 +163,8 @@ fn whyWouldYouEverDoThis(comptime align_bytes: u8) align(align_bytes) u8 {
test "@ptrCast preserves alignment of bigger source" {
var x: u32 align(16) = 1234;
- const ptr = @ptrCast(&u8, &x);
- assert(@typeOf(ptr) == &align(16) u8);
+ const ptr = @ptrCast(*u8, &x);
+ assert(@typeOf(ptr) == *align(16) u8);
}
test "compile-time known array index has best alignment possible" {
@@ -175,10 +175,10 @@ test "compile-time known array index has best alignment possible" {
3,
4,
};
- assert(@typeOf(&array[0]) == &align(4) u8);
- assert(@typeOf(&array[1]) == &u8);
- assert(@typeOf(&array[2]) == &align(2) u8);
- assert(@typeOf(&array[3]) == &u8);
+ assert(@typeOf(&array[0]) == *align(4) u8);
+ assert(@typeOf(&array[1]) == *u8);
+ assert(@typeOf(&array[2]) == *align(2) u8);
+ assert(@typeOf(&array[3]) == *u8);
// because align is too small but we still figure out to use 2
var bigger align(2) = []u64{
@@ -187,10 +187,10 @@ test "compile-time known array index has best alignment possible" {
3,
4,
};
- assert(@typeOf(&bigger[0]) == &align(2) u64);
- assert(@typeOf(&bigger[1]) == &align(2) u64);
- assert(@typeOf(&bigger[2]) == &align(2) u64);
- assert(@typeOf(&bigger[3]) == &align(2) u64);
+ assert(@typeOf(&bigger[0]) == *align(2) u64);
+ assert(@typeOf(&bigger[1]) == *align(2) u64);
+ assert(@typeOf(&bigger[2]) == *align(2) u64);
+ assert(@typeOf(&bigger[3]) == *align(2) u64);
// because pointer is align 2 and u32 align % 2 == 0 we can assume align 2
var smaller align(2) = []u32{
@@ -199,21 +199,21 @@ test "compile-time known array index has best alignment possible" {
3,
4,
};
- testIndex(&smaller[0], 0, &align(2) u32);
- testIndex(&smaller[0], 1, &align(2) u32);
- testIndex(&smaller[0], 2, &align(2) u32);
- testIndex(&smaller[0], 3, &align(2) u32);
+ testIndex(&smaller[0], 0, *align(2) u32);
+ testIndex(&smaller[0], 1, *align(2) u32);
+ testIndex(&smaller[0], 2, *align(2) u32);
+ testIndex(&smaller[0], 3, *align(2) u32);
// has to use ABI alignment because index known at runtime only
- testIndex2(&array[0], 0, &u8);
- testIndex2(&array[0], 1, &u8);
- testIndex2(&array[0], 2, &u8);
- testIndex2(&array[0], 3, &u8);
+ testIndex2(&array[0], 0, *u8);
+ testIndex2(&array[0], 1, *u8);
+ testIndex2(&array[0], 2, *u8);
+ testIndex2(&array[0], 3, *u8);
}
-fn testIndex(smaller: &align(2) u32, index: usize, comptime T: type) void {
+fn testIndex(smaller: *align(2) u32, index: usize, comptime T: type) void {
assert(@typeOf(&smaller[index]) == T);
}
-fn testIndex2(ptr: &align(4) u8, index: usize, comptime T: type) void {
+fn testIndex2(ptr: *align(4) u8, index: usize, comptime T: type) void {
assert(@typeOf(&ptr[index]) == T);
}
diff --git a/test/cases/atomics.zig b/test/cases/atomics.zig
index d406285d29..67c9ab3dd1 100644
--- a/test/cases/atomics.zig
+++ b/test/cases/atomics.zig
@@ -34,7 +34,7 @@ test "atomicrmw and atomicload" {
testAtomicLoad(&data);
}
-fn testAtomicRmw(ptr: &u8) void {
+fn testAtomicRmw(ptr: *u8) void {
const prev_value = @atomicRmw(u8, ptr, AtomicRmwOp.Xchg, 42, AtomicOrder.SeqCst);
assert(prev_value == 200);
comptime {
@@ -45,7 +45,7 @@ fn testAtomicRmw(ptr: &u8) void {
}
}
-fn testAtomicLoad(ptr: &u8) void {
+fn testAtomicLoad(ptr: *u8) void {
const x = @atomicLoad(u8, ptr, AtomicOrder.SeqCst);
assert(x == 42);
}
@@ -54,18 +54,18 @@ test "cmpxchg with ptr" {
var data1: i32 = 1234;
var data2: i32 = 5678;
var data3: i32 = 9101;
- var x: &i32 = &data1;
- if (@cmpxchgWeak(&i32, &x, &data2, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| {
+ var x: *i32 = &data1;
+ if (@cmpxchgWeak(*i32, &x, &data2, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| {
assert(x1 == &data1);
} else {
@panic("cmpxchg should have failed");
}
- while (@cmpxchgWeak(&i32, &x, &data1, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| {
+ while (@cmpxchgWeak(*i32, &x, &data1, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| {
assert(x1 == &data1);
}
assert(x == &data3);
- assert(@cmpxchgStrong(&i32, &x, &data3, &data2, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null);
+ assert(@cmpxchgStrong(*i32, &x, &data3, &data2, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null);
assert(x == &data2);
}
diff --git a/test/cases/bugs/655.zig b/test/cases/bugs/655.zig
index 4431767d5c..50374d4e6d 100644
--- a/test/cases/bugs/655.zig
+++ b/test/cases/bugs/655.zig
@@ -3,10 +3,10 @@ const other_file = @import("655_other_file.zig");
test "function with &const parameter with type dereferenced by namespace" {
const x: other_file.Integer = 1234;
- comptime std.debug.assert(@typeOf(&x) == &const other_file.Integer);
+ comptime std.debug.assert(@typeOf(&x) == *const other_file.Integer);
foo(x);
}
-fn foo(x: &const other_file.Integer) void {
+fn foo(x: *const other_file.Integer) void {
std.debug.assert(x.* == 1234);
}
diff --git a/test/cases/bugs/828.zig b/test/cases/bugs/828.zig
index 10d7370b90..50ae0fd279 100644
--- a/test/cases/bugs/828.zig
+++ b/test/cases/bugs/828.zig
@@ -3,7 +3,7 @@ const CountBy = struct {
const One = CountBy{ .a = 1 };
- pub fn counter(self: &const CountBy) Counter {
+ pub fn counter(self: *const CountBy) Counter {
return Counter{ .i = 0 };
}
};
@@ -11,13 +11,13 @@ const CountBy = struct {
const Counter = struct {
i: usize,
- pub fn count(self: &Counter) bool {
+ pub fn count(self: *Counter) bool {
self.i += 1;
return self.i <= 10;
}
};
-fn constCount(comptime cb: &const CountBy, comptime unused: u32) void {
+fn constCount(comptime cb: *const CountBy, comptime unused: u32) void {
comptime {
var cnt = cb.counter();
if (cnt.i != 0) @compileError("Counter instance reused!");
diff --git a/test/cases/bugs/920.zig b/test/cases/bugs/920.zig
index c315206072..2903f05a29 100644
--- a/test/cases/bugs/920.zig
+++ b/test/cases/bugs/920.zig
@@ -9,10 +9,10 @@ const ZigTable = struct {
pdf: fn (f64) f64,
is_symmetric: bool,
- zero_case: fn (&Random, f64) f64,
+ zero_case: fn (*Random, f64) f64,
};
-fn ZigTableGen(comptime is_symmetric: bool, comptime r: f64, comptime v: f64, comptime f: fn (f64) f64, comptime f_inv: fn (f64) f64, comptime zero_case: fn (&Random, f64) f64) ZigTable {
+fn ZigTableGen(comptime is_symmetric: bool, comptime r: f64, comptime v: f64, comptime f: fn (f64) f64, comptime f_inv: fn (f64) f64, comptime zero_case: fn (*Random, f64) f64) ZigTable {
var tables: ZigTable = undefined;
tables.is_symmetric = is_symmetric;
@@ -45,7 +45,7 @@ fn norm_f(x: f64) f64 {
fn norm_f_inv(y: f64) f64 {
return math.sqrt(-2.0 * math.ln(y));
}
-fn norm_zero_case(random: &Random, u: f64) f64 {
+fn norm_zero_case(random: *Random, u: f64) f64 {
return 0.0;
}
diff --git a/test/cases/cast.zig b/test/cases/cast.zig
index e37451ea93..7358a4ffd8 100644
--- a/test/cases/cast.zig
+++ b/test/cases/cast.zig
@@ -3,20 +3,20 @@ const mem = @import("std").mem;
test "int to ptr cast" {
const x = usize(13);
- const y = @intToPtr(&u8, x);
+ const y = @intToPtr(*u8, x);
const z = @ptrToInt(y);
assert(z == 13);
}
test "integer literal to pointer cast" {
- const vga_mem = @intToPtr(&u16, 0xB8000);
+ const vga_mem = @intToPtr(*u16, 0xB8000);
assert(@ptrToInt(vga_mem) == 0xB8000);
}
test "pointer reinterpret const float to int" {
const float: f64 = 5.99999999999994648725e-01;
const float_ptr = &float;
- const int_ptr = @ptrCast(&const i32, float_ptr);
+ const int_ptr = @ptrCast(*const i32, float_ptr);
const int_val = int_ptr.*;
assert(int_val == 858993411);
}
@@ -28,7 +28,7 @@ test "implicitly cast a pointer to a const pointer of it" {
assert(x == 2);
}
-fn funcWithConstPtrPtr(x: &const &i32) void {
+fn funcWithConstPtrPtr(x: *const *i32) void {
x.*.* += 1;
}
@@ -66,11 +66,11 @@ fn Struct(comptime T: type) type {
const Self = this;
x: T,
- fn pointer(self: &const Self) Self {
+ fn pointer(self: *const Self) Self {
return self.*;
}
- fn maybePointer(self: ?&const Self) Self {
+ fn maybePointer(self: ?*const Self) Self {
const none = Self{ .x = if (T == void) void{} else 0 };
return (self ?? &none).*;
}
@@ -80,11 +80,11 @@ fn Struct(comptime T: type) type {
const Union = union {
x: u8,
- fn pointer(self: &const Union) Union {
+ fn pointer(self: *const Union) Union {
return self.*;
}
- fn maybePointer(self: ?&const Union) Union {
+ fn maybePointer(self: ?*const Union) Union {
const none = Union{ .x = 0 };
return (self ?? &none).*;
}
@@ -94,11 +94,11 @@ const Enum = enum {
None,
Some,
- fn pointer(self: &const Enum) Enum {
+ fn pointer(self: *const Enum) Enum {
return self.*;
}
- fn maybePointer(self: ?&const Enum) Enum {
+ fn maybePointer(self: ?*const Enum) Enum {
return (self ?? &Enum.None).*;
}
};
@@ -107,16 +107,16 @@ test "implicitly cast indirect pointer to maybe-indirect pointer" {
const S = struct {
const Self = this;
x: u8,
- fn constConst(p: &const &const Self) u8 {
+ fn constConst(p: *const *const Self) u8 {
return (p.*).x;
}
- fn maybeConstConst(p: ?&const &const Self) u8 {
+ fn maybeConstConst(p: ?*const *const Self) u8 {
return ((??p).*).x;
}
- fn constConstConst(p: &const &const &const Self) u8 {
+ fn constConstConst(p: *const *const *const Self) u8 {
return (p.*.*).x;
}
- fn maybeConstConstConst(p: ?&const &const &const Self) u8 {
+ fn maybeConstConstConst(p: ?*const *const *const Self) u8 {
return ((??p).*.*).x;
}
};
@@ -166,12 +166,12 @@ fn testPeerResolveArrayConstSlice(b: bool) void {
}
test "integer literal to &const int" {
- const x: &const i32 = 3;
+ const x: *const i32 = 3;
assert(x.* == 3);
}
test "string literal to &const []const u8" {
- const x: &const []const u8 = "hello";
+ const x: *const []const u8 = "hello";
assert(mem.eql(u8, x.*, "hello"));
}
@@ -209,11 +209,11 @@ test "return null from fn() error!?&T" {
const b = returnNullLitFromMaybeTypeErrorRef();
assert((try a) == null and (try b) == null);
}
-fn returnNullFromMaybeTypeErrorRef() error!?&A {
- const a: ?&A = null;
+fn returnNullFromMaybeTypeErrorRef() error!?*A {
+ const a: ?*A = null;
return a;
}
-fn returnNullLitFromMaybeTypeErrorRef() error!?&A {
+fn returnNullLitFromMaybeTypeErrorRef() error!?*A {
return null;
}
@@ -312,7 +312,7 @@ test "implicit cast from &const [N]T to []const T" {
fn testCastConstArrayRefToConstSlice() void {
const blah = "aoeu";
const const_array_ref = &blah;
- assert(@typeOf(const_array_ref) == &const [4]u8);
+ assert(@typeOf(const_array_ref) == *const [4]u8);
const slice: []const u8 = const_array_ref;
assert(mem.eql(u8, slice, "aoeu"));
}
@@ -322,7 +322,7 @@ test "var args implicitly casts by value arg to const ref" {
}
fn foo(args: ...) void {
- assert(@typeOf(args[0]) == &const [5]u8);
+ assert(@typeOf(args[0]) == *const [5]u8);
}
test "peer type resolution: error and [N]T" {
diff --git a/test/cases/const_slice_child.zig b/test/cases/const_slice_child.zig
index a92c589186..e012c729a0 100644
--- a/test/cases/const_slice_child.zig
+++ b/test/cases/const_slice_child.zig
@@ -1,10 +1,10 @@
const debug = @import("std").debug;
const assert = debug.assert;
-var argv: &const &const u8 = undefined;
+var argv: *const *const u8 = undefined;
test "const slice child" {
- const strs = ([]&const u8){
+ const strs = ([]*const u8){
c"one",
c"two",
c"three",
@@ -29,7 +29,7 @@ fn bar(argc: usize) void {
foo(args);
}
-fn strlen(ptr: &const u8) usize {
+fn strlen(ptr: *const u8) usize {
var count: usize = 0;
while (ptr[count] != 0) : (count += 1) {}
return count;
diff --git a/test/cases/coroutines.zig b/test/cases/coroutines.zig
index 8a071c6aad..4d2aa54a69 100644
--- a/test/cases/coroutines.zig
+++ b/test/cases/coroutines.zig
@@ -154,7 +154,7 @@ test "async function with dot syntax" {
test "async fn pointer in a struct field" {
var data: i32 = 1;
const Foo = struct {
- bar: async<&std.mem.Allocator> fn (&i32) void,
+ bar: async<*std.mem.Allocator> fn (*i32) void,
};
var foo = Foo{ .bar = simpleAsyncFn2 };
const p = (async foo.bar(&data)) catch unreachable;
@@ -162,7 +162,7 @@ test "async fn pointer in a struct field" {
cancel p;
assert(data == 4);
}
-async<&std.mem.Allocator> fn simpleAsyncFn2(y: &i32) void {
+async<*std.mem.Allocator> fn simpleAsyncFn2(y: *i32) void {
defer y.* += 2;
y.* += 1;
suspend;
@@ -220,7 +220,7 @@ test "break from suspend" {
cancel p;
std.debug.assert(my_result == 2);
}
-async fn testBreakFromSuspend(my_result: &i32) void {
+async fn testBreakFromSuspend(my_result: *i32) void {
s: suspend |p| {
break :s;
}
diff --git a/test/cases/enum.zig b/test/cases/enum.zig
index cbcbc5e306..ae9f04869b 100644
--- a/test/cases/enum.zig
+++ b/test/cases/enum.zig
@@ -56,14 +56,14 @@ test "constant enum with payload" {
shouldBeNotEmpty(full);
}
-fn shouldBeEmpty(x: &const AnEnumWithPayload) void {
+fn shouldBeEmpty(x: *const AnEnumWithPayload) void {
switch (x.*) {
AnEnumWithPayload.Empty => {},
else => unreachable,
}
}
-fn shouldBeNotEmpty(x: &const AnEnumWithPayload) void {
+fn shouldBeNotEmpty(x: *const AnEnumWithPayload) void {
switch (x.*) {
AnEnumWithPayload.Empty => unreachable,
else => {},
@@ -750,15 +750,15 @@ test "bit field access with enum fields" {
assert(data.b == B.Four3);
}
-fn getA(data: &const BitFieldOfEnums) A {
+fn getA(data: *const BitFieldOfEnums) A {
return data.a;
}
-fn getB(data: &const BitFieldOfEnums) B {
+fn getB(data: *const BitFieldOfEnums) B {
return data.b;
}
-fn getC(data: &const BitFieldOfEnums) C {
+fn getC(data: *const BitFieldOfEnums) C {
return data.c;
}
diff --git a/test/cases/enum_with_members.zig b/test/cases/enum_with_members.zig
index 8fafa70b02..18174186a9 100644
--- a/test/cases/enum_with_members.zig
+++ b/test/cases/enum_with_members.zig
@@ -6,7 +6,7 @@ const ET = union(enum) {
SINT: i32,
UINT: u32,
- pub fn print(a: &const ET, buf: []u8) error!usize {
+ pub fn print(a: *const ET, buf: []u8) error!usize {
return switch (a.*) {
ET.SINT => |x| fmt.formatIntBuf(buf, x, 10, false, 0),
ET.UINT => |x| fmt.formatIntBuf(buf, x, 10, false, 0),
diff --git a/test/cases/eval.zig b/test/cases/eval.zig
index 8a6dc25bd8..6c8bcfdbab 100644
--- a/test/cases/eval.zig
+++ b/test/cases/eval.zig
@@ -282,7 +282,7 @@ fn fnWithFloatMode() f32 {
const SimpleStruct = struct {
field: i32,
- fn method(self: &const SimpleStruct) i32 {
+ fn method(self: *const SimpleStruct) i32 {
return self.field + 3;
}
};
@@ -367,7 +367,7 @@ test "const global shares pointer with other same one" {
assertEqualPtrs(&hi1[0], &hi2[0]);
comptime assert(&hi1[0] == &hi2[0]);
}
-fn assertEqualPtrs(ptr1: &const u8, ptr2: &const u8) void {
+fn assertEqualPtrs(ptr1: *const u8, ptr2: *const u8) void {
assert(ptr1 == ptr2);
}
@@ -418,9 +418,9 @@ test "string literal used as comptime slice is memoized" {
}
test "comptime slice of undefined pointer of length 0" {
- const slice1 = (&i32)(undefined)[0..0];
+ const slice1 = (*i32)(undefined)[0..0];
assert(slice1.len == 0);
- const slice2 = (&i32)(undefined)[100..100];
+ const slice2 = (*i32)(undefined)[100..100];
assert(slice2.len == 0);
}
@@ -472,7 +472,7 @@ test "comptime function with mutable pointer is not memoized" {
}
}
-fn increment(value: &i32) void {
+fn increment(value: *i32) void {
value.* += 1;
}
@@ -517,7 +517,7 @@ test "comptime slice of pointer preserves comptime var" {
const SingleFieldStruct = struct {
x: i32,
- fn read_x(self: &const SingleFieldStruct) i32 {
+ fn read_x(self: *const SingleFieldStruct) i32 {
return self.x;
}
};
diff --git a/test/cases/field_parent_ptr.zig b/test/cases/field_parent_ptr.zig
index 1a7de9ce35..00d4e0f367 100644
--- a/test/cases/field_parent_ptr.zig
+++ b/test/cases/field_parent_ptr.zig
@@ -24,7 +24,7 @@ const foo = Foo{
.d = -10,
};
-fn testParentFieldPtr(c: &const i32) void {
+fn testParentFieldPtr(c: *const i32) void {
assert(c == &foo.c);
const base = @fieldParentPtr(Foo, "c", c);
@@ -32,7 +32,7 @@ fn testParentFieldPtr(c: &const i32) void {
assert(&base.c == c);
}
-fn testParentFieldPtrFirst(a: &const bool) void {
+fn testParentFieldPtrFirst(a: *const bool) void {
assert(a == &foo.a);
const base = @fieldParentPtr(Foo, "a", a);
diff --git a/test/cases/fn_in_struct_in_comptime.zig b/test/cases/fn_in_struct_in_comptime.zig
index c22da71940..fabb57e9cb 100644
--- a/test/cases/fn_in_struct_in_comptime.zig
+++ b/test/cases/fn_in_struct_in_comptime.zig
@@ -1,9 +1,9 @@
const assert = @import("std").debug.assert;
-fn get_foo() fn (&u8) usize {
+fn get_foo() fn (*u8) usize {
comptime {
return struct {
- fn func(ptr: &u8) usize {
+ fn func(ptr: *u8) usize {
var u = @ptrToInt(ptr);
return u;
}
@@ -13,5 +13,5 @@ fn get_foo() fn (&u8) usize {
test "define a function in an anonymous struct in comptime" {
const foo = get_foo();
- assert(foo(@intToPtr(&u8, 12345)) == 12345);
+ assert(foo(@intToPtr(*u8, 12345)) == 12345);
}
diff --git a/test/cases/generics.zig b/test/cases/generics.zig
index 37cd1b89e4..a76990e2a1 100644
--- a/test/cases/generics.zig
+++ b/test/cases/generics.zig
@@ -96,8 +96,8 @@ test "generic struct" {
fn GenNode(comptime T: type) type {
return struct {
value: T,
- next: ?&GenNode(T),
- fn getVal(n: &const GenNode(T)) T {
+ next: ?*GenNode(T),
+ fn getVal(n: *const GenNode(T)) T {
return n.value;
}
};
@@ -126,11 +126,11 @@ test "generic fn with implicit cast" {
13,
}) == 0);
}
-fn getByte(ptr: ?&const u8) u8 {
+fn getByte(ptr: ?*const u8) u8 {
return (??ptr).*;
}
fn getFirstByte(comptime T: type, mem: []const T) u8 {
- return getByte(@ptrCast(&const u8, &mem[0]));
+ return getByte(@ptrCast(*const u8, &mem[0]));
}
const foos = []fn (var) bool{
diff --git a/test/cases/incomplete_struct_param_tld.zig b/test/cases/incomplete_struct_param_tld.zig
index a2f57743d0..552d6ef185 100644
--- a/test/cases/incomplete_struct_param_tld.zig
+++ b/test/cases/incomplete_struct_param_tld.zig
@@ -11,12 +11,12 @@ const B = struct {
const C = struct {
x: i32,
- fn d(c: &const C) i32 {
+ fn d(c: *const C) i32 {
return c.x;
}
};
-fn foo(a: &const A) i32 {
+fn foo(a: *const A) i32 {
return a.b.c.d();
}
diff --git a/test/cases/math.zig b/test/cases/math.zig
index 0b4622702f..5f16e903b2 100644
--- a/test/cases/math.zig
+++ b/test/cases/math.zig
@@ -28,13 +28,13 @@ fn testDivision() void {
assert(divTrunc(f32, -5.0, 3.0) == -1.0);
comptime {
- assert(1194735857077236777412821811143690633098347576 % 508740759824825164163191790951174292733114988 == 177254337427586449086438229241342047632117600);
- assert(@rem(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -177254337427586449086438229241342047632117600);
- assert(1194735857077236777412821811143690633098347576 / 508740759824825164163191790951174292733114988 == 2);
- assert(@divTrunc(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -2);
- assert(@divTrunc(1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == -2);
- assert(@divTrunc(-1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == 2);
- assert(4126227191251978491697987544882340798050766755606969681711 % 10 == 1);
+ assert(1194735857077236777412821811143690633098347576 % 508740759824825164163191790951174292733114988 == 177254337427586449086438229241342047632117600,);
+ assert(@rem(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -177254337427586449086438229241342047632117600,);
+ assert(1194735857077236777412821811143690633098347576 / 508740759824825164163191790951174292733114988 == 2,);
+ assert(@divTrunc(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -2,);
+ assert(@divTrunc(1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == -2,);
+ assert(@divTrunc(-1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == 2,);
+ assert(4126227191251978491697987544882340798050766755606969681711 % 10 == 1,);
}
}
fn div(comptime T: type, a: T, b: T) T {
@@ -324,8 +324,8 @@ test "big number addition" {
test "big number multiplication" {
comptime {
- assert(45960427431263824329884196484953148229 * 128339149605334697009938835852565949723 == 5898522172026096622534201617172456926982464453350084962781392314016180490567);
- assert(594491908217841670578297176641415611445982232488944558774612 * 390603545391089362063884922208143568023166603618446395589768 == 232210647056203049913662402532976186578842425262306016094292237500303028346593132411865381225871291702600263463125370016);
+ assert(45960427431263824329884196484953148229 * 128339149605334697009938835852565949723 == 5898522172026096622534201617172456926982464453350084962781392314016180490567,);
+ assert(594491908217841670578297176641415611445982232488944558774612 * 390603545391089362063884922208143568023166603618446395589768 == 232210647056203049913662402532976186578842425262306016094292237500303028346593132411865381225871291702600263463125370016,);
}
}
diff --git a/test/cases/misc.zig b/test/cases/misc.zig
index b6b2da8de5..919b978f9f 100644
--- a/test/cases/misc.zig
+++ b/test/cases/misc.zig
@@ -252,20 +252,20 @@ test "multiline C string" {
}
test "type equality" {
- assert(&const u8 != &u8);
+ assert(*const u8 != *u8);
}
const global_a: i32 = 1234;
-const global_b: &const i32 = &global_a;
-const global_c: &const f32 = @ptrCast(&const f32, global_b);
+const global_b: *const i32 = &global_a;
+const global_c: *const f32 = @ptrCast(*const f32, global_b);
test "compile time global reinterpret" {
- const d = @ptrCast(&const i32, global_c);
+ const d = @ptrCast(*const i32, global_c);
assert(d.* == 1234);
}
test "explicit cast maybe pointers" {
- const a: ?&i32 = undefined;
- const b: ?&f32 = @ptrCast(?&f32, a);
+ const a: ?*i32 = undefined;
+ const b: ?*f32 = @ptrCast(?*f32, a);
}
test "generic malloc free" {
@@ -274,7 +274,7 @@ test "generic malloc free" {
}
var some_mem: [100]u8 = undefined;
fn memAlloc(comptime T: type, n: usize) error![]T {
- return @ptrCast(&T, &some_mem[0])[0..n];
+ return @ptrCast(*T, &some_mem[0])[0..n];
}
fn memFree(comptime T: type, memory: []T) void {}
@@ -357,7 +357,7 @@ const test3_foo = Test3Foo{
},
};
const test3_bar = Test3Foo{ .Two = 13 };
-fn test3_1(f: &const Test3Foo) void {
+fn test3_1(f: *const Test3Foo) void {
switch (f.*) {
Test3Foo.Three => |pt| {
assert(pt.x == 3);
@@ -366,7 +366,7 @@ fn test3_1(f: &const Test3Foo) void {
else => unreachable,
}
}
-fn test3_2(f: &const Test3Foo) void {
+fn test3_2(f: *const Test3Foo) void {
switch (f.*) {
Test3Foo.Two => |x| {
assert(x == 13);
@@ -393,7 +393,7 @@ test "pointer comparison" {
const b = &a;
assert(ptrEql(b, b));
}
-fn ptrEql(a: &const []const u8, b: &const []const u8) bool {
+fn ptrEql(a: *const []const u8, b: *const []const u8) bool {
return a == b;
}
@@ -446,13 +446,13 @@ fn testPointerToVoidReturnType() error!void {
return a.*;
}
const test_pointer_to_void_return_type_x = void{};
-fn testPointerToVoidReturnType2() &const void {
+fn testPointerToVoidReturnType2() *const void {
return &test_pointer_to_void_return_type_x;
}
test "non const ptr to aliased type" {
const int = i32;
- assert(?&int == ?&i32);
+ assert(?*int == ?*i32);
}
test "array 2D const double ptr" {
@@ -463,7 +463,7 @@ test "array 2D const double ptr" {
testArray2DConstDoublePtr(&rect_2d_vertexes[0][0]);
}
-fn testArray2DConstDoublePtr(ptr: &const f32) void {
+fn testArray2DConstDoublePtr(ptr: *const f32) void {
assert(ptr[0] == 1.0);
assert(ptr[1] == 2.0);
}
@@ -497,7 +497,7 @@ test "@typeId" {
assert(@typeId(u64) == Tid.Int);
assert(@typeId(f32) == Tid.Float);
assert(@typeId(f64) == Tid.Float);
- assert(@typeId(&f32) == Tid.Pointer);
+ assert(@typeId(*f32) == Tid.Pointer);
assert(@typeId([2]u8) == Tid.Array);
assert(@typeId(AStruct) == Tid.Struct);
assert(@typeId(@typeOf(1)) == Tid.IntLiteral);
@@ -540,7 +540,7 @@ test "@typeName" {
};
comptime {
assert(mem.eql(u8, @typeName(i64), "i64"));
- assert(mem.eql(u8, @typeName(&usize), "&usize"));
+ assert(mem.eql(u8, @typeName(*usize), "*usize"));
// https://github.com/ziglang/zig/issues/675
assert(mem.eql(u8, @typeName(TypeFromFn(u8)), "TypeFromFn(u8)"));
assert(mem.eql(u8, @typeName(Struct), "Struct"));
@@ -555,7 +555,7 @@ fn TypeFromFn(comptime T: type) type {
test "volatile load and store" {
var number: i32 = 1234;
- const ptr = (&volatile i32)(&number);
+ const ptr = (*volatile i32)(&number);
ptr.* += 1;
assert(ptr.* == 1235);
}
@@ -587,28 +587,28 @@ var global_ptr = &gdt[0];
// can't really run this test but we can make sure it has no compile error
// and generates code
-const vram = @intToPtr(&volatile u8, 0x20000000)[0..0x8000];
+const vram = @intToPtr(*volatile u8, 0x20000000)[0..0x8000];
export fn writeToVRam() void {
vram[0] = 'X';
}
test "pointer child field" {
- assert((&u32).Child == u32);
+ assert((*u32).Child == u32);
}
const OpaqueA = @OpaqueType();
const OpaqueB = @OpaqueType();
test "@OpaqueType" {
- assert(&OpaqueA != &OpaqueB);
+ assert(*OpaqueA != *OpaqueB);
assert(mem.eql(u8, @typeName(OpaqueA), "OpaqueA"));
assert(mem.eql(u8, @typeName(OpaqueB), "OpaqueB"));
}
test "variable is allowed to be a pointer to an opaque type" {
var x: i32 = 1234;
- _ = hereIsAnOpaqueType(@ptrCast(&OpaqueA, &x));
+ _ = hereIsAnOpaqueType(@ptrCast(*OpaqueA, &x));
}
-fn hereIsAnOpaqueType(ptr: &OpaqueA) &OpaqueA {
+fn hereIsAnOpaqueType(ptr: *OpaqueA) *OpaqueA {
var a = ptr;
return a;
}
@@ -692,7 +692,7 @@ test "packed struct, enum, union parameters in extern function" {
}, PackedUnion{ .a = 1 }, PackedEnum.A);
}
-export fn testPackedStuff(a: &const PackedStruct, b: &const PackedUnion, c: PackedEnum) void {}
+export fn testPackedStuff(a: *const PackedStruct, b: *const PackedUnion, c: PackedEnum) void {}
test "slicing zero length array" {
const s1 = ""[0..];
@@ -703,8 +703,8 @@ test "slicing zero length array" {
assert(mem.eql(u32, s2, []u32{}));
}
-const addr1 = @ptrCast(&const u8, emptyFn);
+const addr1 = @ptrCast(*const u8, emptyFn);
test "comptime cast fn to ptr" {
- const addr2 = @ptrCast(&const u8, emptyFn);
+ const addr2 = @ptrCast(*const u8, emptyFn);
comptime assert(addr1 == addr2);
}
diff --git a/test/cases/null.zig b/test/cases/null.zig
index 936e5fafbd..bd78990ff4 100644
--- a/test/cases/null.zig
+++ b/test/cases/null.zig
@@ -65,7 +65,7 @@ test "if var maybe pointer" {
.d = 1,
}) == 15);
}
-fn shouldBeAPlus1(p: &const Particle) u64 {
+fn shouldBeAPlus1(p: *const Particle) u64 {
var maybe_particle: ?Particle = p.*;
if (maybe_particle) |*particle| {
particle.a += 1;
diff --git a/test/cases/reflection.zig b/test/cases/reflection.zig
index b82ce6340f..48fcc9ef03 100644
--- a/test/cases/reflection.zig
+++ b/test/cases/reflection.zig
@@ -5,7 +5,7 @@ const reflection = this;
test "reflection: array, pointer, nullable, error union type child" {
comptime {
assert(([10]u8).Child == u8);
- assert((&u8).Child == u8);
+ assert((*u8).Child == u8);
assert((error!u8).Payload == u8);
assert((?u8).Child == u8);
}
diff --git a/test/cases/slice.zig b/test/cases/slice.zig
index eae6fa895e..24e5239e2d 100644
--- a/test/cases/slice.zig
+++ b/test/cases/slice.zig
@@ -1,7 +1,7 @@
const assert = @import("std").debug.assert;
const mem = @import("std").mem;
-const x = @intToPtr(&i32, 0x1000)[0..0x500];
+const x = @intToPtr(*i32, 0x1000)[0..0x500];
const y = x[0x100..];
test "compile time slice of pointer to hard coded address" {
assert(@ptrToInt(x.ptr) == 0x1000);
diff --git a/test/cases/struct.zig b/test/cases/struct.zig
index d4a1c7fbe3..0712e508de 100644
--- a/test/cases/struct.zig
+++ b/test/cases/struct.zig
@@ -43,7 +43,7 @@ const VoidStructFieldsFoo = struct {
test "structs" {
var foo: StructFoo = undefined;
- @memset(@ptrCast(&u8, &foo), 0, @sizeOf(StructFoo));
+ @memset(@ptrCast(*u8, &foo), 0, @sizeOf(StructFoo));
foo.a += 1;
foo.b = foo.a == 1;
testFoo(foo);
@@ -55,16 +55,16 @@ const StructFoo = struct {
b: bool,
c: f32,
};
-fn testFoo(foo: &const StructFoo) void {
+fn testFoo(foo: *const StructFoo) void {
assert(foo.b);
}
-fn testMutation(foo: &StructFoo) void {
+fn testMutation(foo: *StructFoo) void {
foo.c = 100;
}
const Node = struct {
val: Val,
- next: &Node,
+ next: *Node,
};
const Val = struct {
@@ -112,7 +112,7 @@ fn aFunc() i32 {
return 13;
}
-fn callStructField(foo: &const Foo) i32 {
+fn callStructField(foo: *const Foo) i32 {
return foo.ptr();
}
@@ -124,7 +124,7 @@ test "store member function in variable" {
}
const MemberFnTestFoo = struct {
x: i32,
- fn member(foo: &const MemberFnTestFoo) i32 {
+ fn member(foo: *const MemberFnTestFoo) i32 {
return foo.x;
}
};
@@ -141,7 +141,7 @@ test "member functions" {
}
const MemberFnRand = struct {
seed: u32,
- pub fn getSeed(r: &const MemberFnRand) u32 {
+ pub fn getSeed(r: *const MemberFnRand) u32 {
return r.seed;
}
};
@@ -166,7 +166,7 @@ test "empty struct method call" {
assert(es.method() == 1234);
}
const EmptyStruct = struct {
- fn method(es: &const EmptyStruct) i32 {
+ fn method(es: *const EmptyStruct) i32 {
return 1234;
}
};
@@ -228,15 +228,15 @@ test "bit field access" {
assert(data.b == 3);
}
-fn getA(data: &const BitField1) u3 {
+fn getA(data: *const BitField1) u3 {
return data.a;
}
-fn getB(data: &const BitField1) u3 {
+fn getB(data: *const BitField1) u3 {
return data.b;
}
-fn getC(data: &const BitField1) u2 {
+fn getC(data: *const BitField1) u2 {
return data.c;
}
@@ -396,8 +396,8 @@ const Bitfields = packed struct {
test "native bit field understands endianness" {
var all: u64 = 0x7765443322221111;
var bytes: [8]u8 = undefined;
- @memcpy(&bytes[0], @ptrCast(&u8, &all), 8);
- var bitfields = @ptrCast(&Bitfields, &bytes[0]).*;
+ @memcpy(&bytes[0], @ptrCast(*u8, &all), 8);
+ var bitfields = @ptrCast(*Bitfields, &bytes[0]).*;
assert(bitfields.f1 == 0x1111);
assert(bitfields.f2 == 0x2222);
@@ -415,7 +415,7 @@ test "align 1 field before self referential align 8 field as slice return type"
const Expr = union(enum) {
Literal: u8,
- Question: &Expr,
+ Question: *Expr,
};
fn alloc(comptime T: type) []T {
diff --git a/test/cases/struct_contains_null_ptr_itself.zig b/test/cases/struct_contains_null_ptr_itself.zig
index b6cb1a94cc..21175974b3 100644
--- a/test/cases/struct_contains_null_ptr_itself.zig
+++ b/test/cases/struct_contains_null_ptr_itself.zig
@@ -2,13 +2,13 @@ const std = @import("std");
const assert = std.debug.assert;
test "struct contains null pointer which contains original struct" {
- var x: ?&NodeLineComment = null;
+ var x: ?*NodeLineComment = null;
assert(x == null);
}
pub const Node = struct {
id: Id,
- comment: ?&NodeLineComment,
+ comment: ?*NodeLineComment,
pub const Id = enum {
Root,
diff --git a/test/cases/switch.zig b/test/cases/switch.zig
index 495fa9f3ed..c6a4b60f09 100644
--- a/test/cases/switch.zig
+++ b/test/cases/switch.zig
@@ -90,7 +90,7 @@ const SwitchProngWithVarEnum = union(enum) {
Two: f32,
Meh: void,
};
-fn switchProngWithVarFn(a: &const SwitchProngWithVarEnum) void {
+fn switchProngWithVarFn(a: *const SwitchProngWithVarEnum) void {
switch (a.*) {
SwitchProngWithVarEnum.One => |x| {
assert(x == 13);
diff --git a/test/cases/this.zig b/test/cases/this.zig
index 5e433b5037..ba51d0ac90 100644
--- a/test/cases/this.zig
+++ b/test/cases/this.zig
@@ -8,7 +8,7 @@ fn Point(comptime T: type) type {
x: T,
y: T,
- fn addOne(self: &Self) void {
+ fn addOne(self: *Self) void {
self.x += 1;
self.y += 1;
}
diff --git a/test/cases/type_info.zig b/test/cases/type_info.zig
index 2561d70865..921ff785a7 100644
--- a/test/cases/type_info.zig
+++ b/test/cases/type_info.zig
@@ -37,7 +37,7 @@ test "type info: pointer type info" {
}
fn testPointer() void {
- const u32_ptr_info = @typeInfo(&u32);
+ const u32_ptr_info = @typeInfo(*u32);
assert(TypeId(u32_ptr_info) == TypeId.Pointer);
assert(u32_ptr_info.Pointer.is_const == false);
assert(u32_ptr_info.Pointer.is_volatile == false);
@@ -169,14 +169,14 @@ fn testUnion() void {
assert(notag_union_info.Union.fields[1].field_type == u32);
const TestExternUnion = extern union {
- foo: &c_void,
+ foo: *c_void,
};
const extern_union_info = @typeInfo(TestExternUnion);
assert(extern_union_info.Union.layout == TypeInfo.ContainerLayout.Extern);
assert(extern_union_info.Union.tag_type == @typeOf(undefined));
assert(extern_union_info.Union.fields[0].enum_field == null);
- assert(extern_union_info.Union.fields[0].field_type == &c_void);
+ assert(extern_union_info.Union.fields[0].field_type == *c_void);
}
test "type info: struct info" {
@@ -190,13 +190,13 @@ fn testStruct() void {
assert(struct_info.Struct.layout == TypeInfo.ContainerLayout.Packed);
assert(struct_info.Struct.fields.len == 3);
assert(struct_info.Struct.fields[1].offset == null);
- assert(struct_info.Struct.fields[2].field_type == &TestStruct);
+ assert(struct_info.Struct.fields[2].field_type == *TestStruct);
assert(struct_info.Struct.defs.len == 2);
assert(struct_info.Struct.defs[0].is_pub);
assert(!struct_info.Struct.defs[0].data.Fn.is_extern);
assert(struct_info.Struct.defs[0].data.Fn.lib_name == null);
assert(struct_info.Struct.defs[0].data.Fn.return_type == void);
- assert(struct_info.Struct.defs[0].data.Fn.fn_type == fn (&const TestStruct) void);
+ assert(struct_info.Struct.defs[0].data.Fn.fn_type == fn (*const TestStruct) void);
}
const TestStruct = packed struct {
@@ -204,9 +204,9 @@ const TestStruct = packed struct {
fieldA: usize,
fieldB: void,
- fieldC: &Self,
+ fieldC: *Self,
- pub fn foo(self: &const Self) void {}
+ pub fn foo(self: *const Self) void {}
};
test "type info: function type info" {
@@ -227,7 +227,7 @@ fn testFunction() void {
const test_instance: TestStruct = undefined;
const bound_fn_info = @typeInfo(@typeOf(test_instance.foo));
assert(TypeId(bound_fn_info) == TypeId.BoundFn);
- assert(bound_fn_info.BoundFn.args[0].arg_type == &const TestStruct);
+ assert(bound_fn_info.BoundFn.args[0].arg_type == *const TestStruct);
}
fn foo(comptime a: usize, b: bool, args: ...) usize {
diff --git a/test/cases/undefined.zig b/test/cases/undefined.zig
index f1af10e532..83c620d211 100644
--- a/test/cases/undefined.zig
+++ b/test/cases/undefined.zig
@@ -27,12 +27,12 @@ test "init static array to undefined" {
const Foo = struct {
x: i32,
- fn setFooXMethod(foo: &Foo) void {
+ fn setFooXMethod(foo: *Foo) void {
foo.x = 3;
}
};
-fn setFooX(foo: &Foo) void {
+fn setFooX(foo: *Foo) void {
foo.x = 2;
}
diff --git a/test/cases/union.zig b/test/cases/union.zig
index 005ad08e6a..bdcbbdb452 100644
--- a/test/cases/union.zig
+++ b/test/cases/union.zig
@@ -68,11 +68,11 @@ test "init union with runtime value" {
assert(foo.int == 42);
}
-fn setFloat(foo: &Foo, x: f64) void {
+fn setFloat(foo: *Foo, x: f64) void {
foo.* = Foo{ .float = x };
}
-fn setInt(foo: &Foo, x: i32) void {
+fn setInt(foo: *Foo, x: i32) void {
foo.* = Foo{ .int = x };
}
@@ -108,7 +108,7 @@ fn doTest() void {
assert(bar(Payload{ .A = 1234 }) == -10);
}
-fn bar(value: &const Payload) i32 {
+fn bar(value: *const Payload) i32 {
assert(Letter(value.*) == Letter.A);
return switch (value.*) {
Payload.A => |x| return x - 1244,
@@ -147,7 +147,7 @@ test "union(enum(u32)) with specified and unspecified tag values" {
comptime testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 });
}
-fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: &const MultipleChoice2) void {
+fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: *const MultipleChoice2) void {
assert(u32(@TagType(MultipleChoice2)(x.*)) == 60);
assert(1123 == switch (x.*) {
MultipleChoice2.A => 1,
@@ -163,7 +163,7 @@ fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: &const MultipleChoice2) void
}
const ExternPtrOrInt = extern union {
- ptr: &u8,
+ ptr: *u8,
int: u64,
};
test "extern union size" {
@@ -171,7 +171,7 @@ test "extern union size" {
}
const PackedPtrOrInt = packed union {
- ptr: &u8,
+ ptr: *u8,
int: u64,
};
test "extern union size" {
@@ -206,7 +206,7 @@ test "cast union to tag type of union" {
comptime testCastUnionToTagType(TheUnion{ .B = 1234 });
}
-fn testCastUnionToTagType(x: &const TheUnion) void {
+fn testCastUnionToTagType(x: *const TheUnion) void {
assert(TheTag(x.*) == TheTag.B);
}
@@ -243,7 +243,7 @@ const TheUnion2 = union(enum) {
Item2: i32,
};
-fn assertIsTheUnion2Item1(value: &const TheUnion2) void {
+fn assertIsTheUnion2Item1(value: *const TheUnion2) void {
assert(value.* == TheUnion2.Item1);
}
diff --git a/test/compare_output.zig b/test/compare_output.zig
index 0170477b8b..00ad4a709b 100644
--- a/test/compare_output.zig
+++ b/test/compare_output.zig
@@ -3,10 +3,10 @@ const std = @import("std");
const os = std.os;
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.CompareOutputContext) void {
+pub fn addCases(cases: *tests.CompareOutputContext) void {
cases.addC("hello world with libc",
\\const c = @cImport(@cInclude("stdio.h"));
- \\export fn main(argc: c_int, argv: &&u8) c_int {
+ \\export fn main(argc: c_int, argv: **u8) c_int {
\\ _ = c.puts(c"Hello, world!");
\\ return 0;
\\}
@@ -139,7 +139,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\ @cInclude("stdio.h");
\\});
\\
- \\export fn main(argc: c_int, argv: &&u8) c_int {
+ \\export fn main(argc: c_int, argv: **u8) c_int {
\\ if (is_windows) {
\\ // we want actual \n, not \r\n
\\ _ = c._setmode(1, c._O_BINARY);
@@ -284,9 +284,9 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
cases.addC("expose function pointer to C land",
\\const c = @cImport(@cInclude("stdlib.h"));
\\
- \\export fn compare_fn(a: ?&const c_void, b: ?&const c_void) c_int {
- \\ const a_int = @ptrCast(&align(1) const i32, a ?? unreachable);
- \\ const b_int = @ptrCast(&align(1) const i32, b ?? unreachable);
+ \\export fn compare_fn(a: ?*const c_void, b: ?*const c_void) c_int {
+ \\ const a_int = @ptrCast(*align(1) const i32, a ?? unreachable);
+ \\ const b_int = @ptrCast(*align(1) const i32, b ?? unreachable);
\\ if (a_int.* < b_int.*) {
\\ return -1;
\\ } else if (a_int.* > b_int.*) {
@@ -299,7 +299,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\export fn main() c_int {
\\ var array = []u32 { 1, 7, 3, 2, 0, 9, 4, 8, 6, 5 };
\\
- \\ c.qsort(@ptrCast(&c_void, &array[0]), c_ulong(array.len), @sizeOf(i32), compare_fn);
+ \\ c.qsort(@ptrCast(*c_void, &array[0]), c_ulong(array.len), @sizeOf(i32), compare_fn);
\\
\\ for (array) |item, i| {
\\ if (item != i) {
@@ -324,7 +324,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\ @cInclude("stdio.h");
\\});
\\
- \\export fn main(argc: c_int, argv: &&u8) c_int {
+ \\export fn main(argc: c_int, argv: **u8) c_int {
\\ if (is_windows) {
\\ // we want actual \n, not \r\n
\\ _ = c._setmode(1, c._O_BINARY);
@@ -344,13 +344,13 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\const Foo = struct {
\\ field1: Bar,
\\
- \\ fn method(a: &const Foo) bool { return true; }
+ \\ fn method(a: *const Foo) bool { return true; }
\\};
\\
\\const Bar = struct {
\\ field2: i32,
\\
- \\ fn method(b: &const Bar) bool { return true; }
+ \\ fn method(b: *const Bar) bool { return true; }
\\};
\\
\\pub fn main() void {
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 5215953d0a..1297ed29ab 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1,6 +1,6 @@
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.CompileErrorContext) void {
+pub fn addCases(cases: *tests.CompileErrorContext) void {
cases.add(
"invalid deref on switch target",
\\comptime {
@@ -109,7 +109,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
"@ptrCast discards const qualifier",
\\export fn entry() void {
\\ const x: i32 = 1234;
- \\ const y = @ptrCast(&i32, &x);
+ \\ const y = @ptrCast(*i32, &x);
\\}
,
".tmp_source.zig:3:15: error: cast discards const qualifier",
@@ -118,7 +118,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"comptime slice of undefined pointer non-zero len",
\\export fn entry() void {
- \\ const slice = (&i32)(undefined)[0..1];
+ \\ const slice = (*i32)(undefined)[0..1];
\\}
,
".tmp_source.zig:2:36: error: non-zero length slice of undefined pointer",
@@ -126,7 +126,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"type checking function pointers",
- \\fn a(b: fn (&const u8) void) void {
+ \\fn a(b: fn (*const u8) void) void {
\\ b('a');
\\}
\\fn c(d: u8) void {
@@ -136,7 +136,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ a(c);
\\}
,
- ".tmp_source.zig:8:7: error: expected type 'fn(&const u8) void', found 'fn(u8) void'",
+ ".tmp_source.zig:8:7: error: expected type 'fn(*const u8) void', found 'fn(u8) void'",
);
cases.add(
@@ -594,15 +594,15 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"attempt to use 0 bit type in extern fn",
- \\extern fn foo(ptr: extern fn(&void) void) void;
+ \\extern fn foo(ptr: extern fn(*void) void) void;
\\
\\export fn entry() void {
\\ foo(bar);
\\}
\\
- \\extern fn bar(x: &void) void { }
+ \\extern fn bar(x: *void) void { }
,
- ".tmp_source.zig:7:18: error: parameter of type '&void' has 0 bits; not allowed in function with calling convention 'ccc'",
+ ".tmp_source.zig:7:18: error: parameter of type '*void' has 0 bits; not allowed in function with calling convention 'ccc'",
);
cases.add(
@@ -911,10 +911,10 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"pointer to noreturn",
- \\fn a() &noreturn {}
+ \\fn a() *noreturn {}
\\export fn entry() void { _ = a(); }
,
- ".tmp_source.zig:1:9: error: pointer to noreturn not allowed",
+ ".tmp_source.zig:1:8: error: pointer to noreturn not allowed",
);
cases.add(
@@ -985,7 +985,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ return a;
\\}
,
- ".tmp_source.zig:3:12: error: expected type 'i32', found '&const u8'",
+ ".tmp_source.zig:3:12: error: expected type 'i32', found '*const u8'",
);
cases.add(
@@ -1446,7 +1446,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"switch expression - switch on pointer type with no else",
- \\fn foo(x: &u8) void {
+ \\fn foo(x: *u8) void {
\\ switch (x) {
\\ &y => {},
\\ }
@@ -1454,7 +1454,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\const y: u8 = 100;
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
,
- ".tmp_source.zig:2:5: error: else prong required when switching on type '&u8'",
+ ".tmp_source.zig:2:5: error: else prong required when switching on type '*u8'",
);
cases.add(
@@ -1501,10 +1501,10 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
"address of number literal",
\\const x = 3;
\\const y = &x;
- \\fn foo() &const i32 { return y; }
+ \\fn foo() *const i32 { return y; }
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
,
- ".tmp_source.zig:3:30: error: expected type '&const i32', found '&const (integer literal)'",
+ ".tmp_source.zig:3:30: error: expected type '*const i32', found '*const (integer literal)'",
);
cases.add(
@@ -1529,10 +1529,10 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ a: i32,
\\ b: i32,
\\
- \\ fn member_a(foo: &const Foo) i32 {
+ \\ fn member_a(foo: *const Foo) i32 {
\\ return foo.a;
\\ }
- \\ fn member_b(foo: &const Foo) i32 {
+ \\ fn member_b(foo: *const Foo) i32 {
\\ return foo.b;
\\ }
\\};
@@ -1543,7 +1543,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ Foo.member_b,
\\};
\\
- \\fn f(foo: &const Foo, index: usize) void {
+ \\fn f(foo: *const Foo, index: usize) void {
\\ const result = members[index]();
\\}
\\
@@ -1692,11 +1692,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"assign null to non-nullable pointer",
- \\const a: &u8 = null;
+ \\const a: *u8 = null;
\\
\\export fn entry() usize { return @sizeOf(@typeOf(a)); }
,
- ".tmp_source.zig:1:16: error: expected type '&u8', found '(null)'",
+ ".tmp_source.zig:1:16: error: expected type '*u8', found '(null)'",
);
cases.add(
@@ -1806,7 +1806,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ One: void,
\\ Two: i32,
\\};
- \\fn bad_eql_2(a: &const EnumWithData, b: &const EnumWithData) bool {
+ \\fn bad_eql_2(a: *const EnumWithData, b: *const EnumWithData) bool {
\\ return a.* == b.*;
\\}
\\
@@ -2011,9 +2011,9 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"wrong number of arguments for method fn call",
\\const Foo = struct {
- \\ fn method(self: &const Foo, a: i32) void {}
+ \\ fn method(self: *const Foo, a: i32) void {}
\\};
- \\fn f(foo: &const Foo) void {
+ \\fn f(foo: *const Foo) void {
\\
\\ foo.method(1, 2);
\\}
@@ -2062,7 +2062,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"misspelled type with pointer only reference",
\\const JasonHM = u8;
- \\const JasonList = &JsonNode;
+ \\const JasonList = *JsonNode;
\\
\\const JsonOA = union(enum) {
\\ JSONArray: JsonList,
@@ -2113,16 +2113,16 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ derp.init();
\\}
,
- ".tmp_source.zig:14:5: error: expected type 'i32', found '&const Foo'",
+ ".tmp_source.zig:14:5: error: expected type 'i32', found '*const Foo'",
);
cases.add(
"method call with first arg type wrong container",
\\pub const List = struct {
\\ len: usize,
- \\ allocator: &Allocator,
+ \\ allocator: *Allocator,
\\
- \\ pub fn init(allocator: &Allocator) List {
+ \\ pub fn init(allocator: *Allocator) List {
\\ return List {
\\ .len = 0,
\\ .allocator = allocator,
@@ -2143,7 +2143,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ x.init();
\\}
,
- ".tmp_source.zig:23:5: error: expected type '&Allocator', found '&List'",
+ ".tmp_source.zig:23:5: error: expected type '*Allocator', found '*List'",
);
cases.add(
@@ -2308,17 +2308,17 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ c: u2,
\\};
\\
- \\fn foo(bit_field: &const BitField) u3 {
+ \\fn foo(bit_field: *const BitField) u3 {
\\ return bar(&bit_field.b);
\\}
\\
- \\fn bar(x: &const u3) u3 {
+ \\fn bar(x: *const u3) u3 {
\\ return x.*;
\\}
\\
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
,
- ".tmp_source.zig:8:26: error: expected type '&const u3', found '&align(1:3:6) const u3'",
+ ".tmp_source.zig:8:26: error: expected type '*const u3', found '*align(1:3:6) const u3'",
);
cases.add(
@@ -2441,13 +2441,13 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ const b = &a;
\\ return ptrEql(b, b);
\\}
- \\fn ptrEql(a: &[]const u8, b: &[]const u8) bool {
+ \\fn ptrEql(a: *[]const u8, b: *[]const u8) bool {
\\ return true;
\\}
\\
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
,
- ".tmp_source.zig:4:19: error: expected type '&[]const u8', found '&const []const u8'",
+ ".tmp_source.zig:4:19: error: expected type '*[]const u8', found '*const []const u8'",
);
cases.addCase(x: {
@@ -2493,7 +2493,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"ptrcast to non-pointer",
- \\export fn entry(a: &i32) usize {
+ \\export fn entry(a: *i32) usize {
\\ return @ptrCast(usize, a);
\\}
,
@@ -2542,16 +2542,16 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
"int to ptr of 0 bits",
\\export fn foo() void {
\\ var x: usize = 0x1000;
- \\ var y: &void = @intToPtr(&void, x);
+ \\ var y: *void = @intToPtr(*void, x);
\\}
,
- ".tmp_source.zig:3:31: error: type '&void' has 0 bits and cannot store information",
+ ".tmp_source.zig:3:30: error: type '*void' has 0 bits and cannot store information",
);
cases.add(
"@fieldParentPtr - non struct",
\\const Foo = i32;
- \\export fn foo(a: &i32) &Foo {
+ \\export fn foo(a: *i32) *Foo {
\\ return @fieldParentPtr(Foo, "a", a);
\\}
,
@@ -2563,7 +2563,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\const Foo = extern struct {
\\ derp: i32,
\\};
- \\export fn foo(a: &i32) &Foo {
+ \\export fn foo(a: *i32) *Foo {
\\ return @fieldParentPtr(Foo, "a", a);
\\}
,
@@ -2575,7 +2575,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\const Foo = extern struct {
\\ a: i32,
\\};
- \\export fn foo(a: i32) &Foo {
+ \\export fn foo(a: i32) *Foo {
\\ return @fieldParentPtr(Foo, "a", a);
\\}
,
@@ -2591,7 +2591,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\const foo = Foo { .a = 1, .b = 2, };
\\
\\comptime {
- \\ const field_ptr = @intToPtr(&i32, 0x1234);
+ \\ const field_ptr = @intToPtr(*i32, 0x1234);
\\ const another_foo_ptr = @fieldParentPtr(Foo, "b", field_ptr);
\\}
,
@@ -2682,7 +2682,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"returning address of local variable - simple",
- \\export fn foo() &i32 {
+ \\export fn foo() *i32 {
\\ var a: i32 = undefined;
\\ return &a;
\\}
@@ -2692,7 +2692,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"returning address of local variable - phi",
- \\export fn foo(c: bool) &i32 {
+ \\export fn foo(c: bool) *i32 {
\\ var a: i32 = undefined;
\\ var b: i32 = undefined;
\\ return if (c) &a else &b;
@@ -3086,11 +3086,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ bar(&foo.b);
\\}
\\
- \\fn bar(x: &u32) void {
+ \\fn bar(x: *u32) void {
\\ x.* += 1;
\\}
,
- ".tmp_source.zig:8:13: error: expected type '&u32', found '&align(1) u32'",
+ ".tmp_source.zig:8:13: error: expected type '*u32', found '*align(1) u32'",
);
cases.add(
@@ -3117,13 +3117,13 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
"increase pointer alignment in @ptrCast",
\\export fn entry() u32 {
\\ var bytes: [4]u8 = []u8{0x01, 0x02, 0x03, 0x04};
- \\ const ptr = @ptrCast(&u32, &bytes[0]);
+ \\ const ptr = @ptrCast(*u32, &bytes[0]);
\\ return ptr.*;
\\}
,
".tmp_source.zig:3:17: error: cast increases pointer alignment",
- ".tmp_source.zig:3:38: note: '&u8' has alignment 1",
- ".tmp_source.zig:3:27: note: '&u32' has alignment 4",
+ ".tmp_source.zig:3:38: note: '*u8' has alignment 1",
+ ".tmp_source.zig:3:26: note: '*u32' has alignment 4",
);
cases.add(
@@ -3169,7 +3169,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ return x == 5678;
\\}
,
- ".tmp_source.zig:4:32: error: expected type '&i32', found '&align(1) i32'",
+ ".tmp_source.zig:4:32: error: expected type '*i32', found '*align(1) i32'",
);
cases.add(
@@ -3198,20 +3198,20 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"wrong pointer implicitly casted to pointer to @OpaqueType()",
\\const Derp = @OpaqueType();
- \\extern fn bar(d: &Derp) void;
+ \\extern fn bar(d: *Derp) void;
\\export fn foo() void {
\\ var x = u8(1);
- \\ bar(@ptrCast(&c_void, &x));
+ \\ bar(@ptrCast(*c_void, &x));
\\}
,
- ".tmp_source.zig:5:9: error: expected type '&Derp', found '&c_void'",
+ ".tmp_source.zig:5:9: error: expected type '*Derp', found '*c_void'",
);
cases.add(
"non-const variables of things that require const variables",
\\const Opaque = @OpaqueType();
\\
- \\export fn entry(opaque: &Opaque) void {
+ \\export fn entry(opaque: *Opaque) void {
\\ var m2 = &2;
\\ const y: u32 = m2.*;
\\
@@ -3229,10 +3229,10 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
\\
\\const Foo = struct {
- \\ fn bar(self: &const Foo) void {}
+ \\ fn bar(self: *const Foo) void {}
\\};
,
- ".tmp_source.zig:4:4: error: variable of type '&const (integer literal)' must be const or comptime",
+ ".tmp_source.zig:4:4: error: variable of type '*const (integer literal)' must be const or comptime",
".tmp_source.zig:7:4: error: variable of type '(undefined)' must be const or comptime",
".tmp_source.zig:8:4: error: variable of type '(integer literal)' must be const or comptime",
".tmp_source.zig:9:4: error: variable of type '(float literal)' must be const or comptime",
@@ -3241,7 +3241,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
".tmp_source.zig:12:4: error: variable of type 'Opaque' must be const or comptime",
".tmp_source.zig:13:4: error: variable of type 'type' must be const or comptime",
".tmp_source.zig:14:4: error: variable of type '(namespace)' must be const or comptime",
- ".tmp_source.zig:15:4: error: variable of type '(bound fn(&const Foo) void)' must be const or comptime",
+ ".tmp_source.zig:15:4: error: variable of type '(bound fn(*const Foo) void)' must be const or comptime",
".tmp_source.zig:17:4: error: unreachable code",
);
@@ -3397,14 +3397,14 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\
\\export fn entry() bool {
\\ var x: i32 = 1;
- \\ return bar(@ptrCast(&MyType, &x));
+ \\ return bar(@ptrCast(*MyType, &x));
\\}
\\
- \\fn bar(x: &MyType) bool {
+ \\fn bar(x: *MyType) bool {
\\ return x.blah;
\\}
,
- ".tmp_source.zig:9:13: error: type '&MyType' does not support field access",
+ ".tmp_source.zig:9:13: error: type '*MyType' does not support field access",
);
cases.add(
@@ -3535,9 +3535,9 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\export fn entry() void {
\\ foo("hello",);
\\}
- \\pub extern fn foo(format: &const u8, ...) void;
+ \\pub extern fn foo(format: *const u8, ...) void;
,
- ".tmp_source.zig:2:9: error: expected type '&const u8', found '[5]u8'",
+ ".tmp_source.zig:2:9: error: expected type '*const u8', found '[5]u8'",
);
cases.add(
@@ -3902,7 +3902,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ const a = Payload { .A = 1234 };
\\ foo(a);
\\}
- \\fn foo(a: &const Payload) void {
+ \\fn foo(a: *const Payload) void {
\\ switch (a.*) {
\\ Payload.A => {},
\\ else => unreachable,
diff --git a/test/gen_h.zig b/test/gen_h.zig
index 2def39bed7..9559c3395c 100644
--- a/test/gen_h.zig
+++ b/test/gen_h.zig
@@ -1,6 +1,6 @@
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.GenHContext) void {
+pub fn addCases(cases: *tests.GenHContext) void {
cases.add("declare enum",
\\const Foo = extern enum { A, B, C };
\\export fn entry(foo: Foo) void { }
diff --git a/test/runtime_safety.zig b/test/runtime_safety.zig
index 1fea6347ab..71d1d68764 100644
--- a/test/runtime_safety.zig
+++ b/test/runtime_safety.zig
@@ -1,6 +1,6 @@
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.CompareOutputContext) void {
+pub fn addCases(cases: *tests.CompareOutputContext) void {
cases.addRuntimeSafety("calling panic",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
diff --git a/test/standalone/brace_expansion/build.zig b/test/standalone/brace_expansion/build.zig
index 7752f599df..64f3c08583 100644
--- a/test/standalone/brace_expansion/build.zig
+++ b/test/standalone/brace_expansion/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const main = b.addTest("main.zig");
main.setBuildMode(b.standardReleaseOptions());
diff --git a/test/standalone/brace_expansion/main.zig b/test/standalone/brace_expansion/main.zig
index c96cc2cbb9..ccb4f6dd45 100644
--- a/test/standalone/brace_expansion/main.zig
+++ b/test/standalone/brace_expansion/main.zig
@@ -14,7 +14,7 @@ const Token = union(enum) {
Eof,
};
-var global_allocator: &mem.Allocator = undefined;
+var global_allocator: *mem.Allocator = undefined;
fn tokenize(input: []const u8) !ArrayList(Token) {
const State = enum {
@@ -73,7 +73,7 @@ const ParseError = error{
OutOfMemory,
};
-fn parse(tokens: &const ArrayList(Token), token_index: &usize) ParseError!Node {
+fn parse(tokens: *const ArrayList(Token), token_index: *usize) ParseError!Node {
const first_token = tokens.items[token_index.*];
token_index.* += 1;
@@ -109,7 +109,7 @@ fn parse(tokens: &const ArrayList(Token), token_index: &usize) ParseError!Node {
}
}
-fn expandString(input: []const u8, output: &Buffer) !void {
+fn expandString(input: []const u8, output: *Buffer) !void {
const tokens = try tokenize(input);
if (tokens.len == 1) {
return output.resize(0);
@@ -139,7 +139,7 @@ fn expandString(input: []const u8, output: &Buffer) !void {
const ExpandNodeError = error{OutOfMemory};
-fn expandNode(node: &const Node, output: &ArrayList(Buffer)) ExpandNodeError!void {
+fn expandNode(node: *const Node, output: *ArrayList(Buffer)) ExpandNodeError!void {
assert(output.len == 0);
switch (node.*) {
Node.Scalar => |scalar| {
diff --git a/test/standalone/issue_339/build.zig b/test/standalone/issue_339/build.zig
index f3ab327006..733b3729c1 100644
--- a/test/standalone/issue_339/build.zig
+++ b/test/standalone/issue_339/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const obj = b.addObject("test", "test.zig");
const test_step = b.step("test", "Test the program");
diff --git a/test/standalone/issue_339/test.zig b/test/standalone/issue_339/test.zig
index da0747b8e6..f4068dcfac 100644
--- a/test/standalone/issue_339/test.zig
+++ b/test/standalone/issue_339/test.zig
@@ -1,5 +1,5 @@
const StackTrace = @import("builtin").StackTrace;
-pub fn panic(msg: []const u8, stack_trace: ?&StackTrace) noreturn {
+pub fn panic(msg: []const u8, stack_trace: ?*StackTrace) noreturn {
@breakpoint();
while (true) {}
}
diff --git a/test/standalone/issue_794/build.zig b/test/standalone/issue_794/build.zig
index 4f5dcd7ff4..06c37a83a3 100644
--- a/test/standalone/issue_794/build.zig
+++ b/test/standalone/issue_794/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const test_artifact = b.addTest("main.zig");
test_artifact.addIncludeDir("a_directory");
diff --git a/test/standalone/pkg_import/build.zig b/test/standalone/pkg_import/build.zig
index bb9416d3c4..e0b3885dc3 100644
--- a/test/standalone/pkg_import/build.zig
+++ b/test/standalone/pkg_import/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const exe = b.addExecutable("test", "test.zig");
exe.addPackagePath("my_pkg", "pkg.zig");
diff --git a/test/standalone/use_alias/build.zig b/test/standalone/use_alias/build.zig
index ecbba297d8..c700d43db9 100644
--- a/test/standalone/use_alias/build.zig
+++ b/test/standalone/use_alias/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
b.addCIncludePath(".");
const main = b.addTest("main.zig");
diff --git a/test/tests.zig b/test/tests.zig
index b59b954122..cc562331fe 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -47,7 +47,7 @@ const test_targets = []TestTarget{
const max_stdout_size = 1 * 1024 * 1024; // 1 MB
-pub fn addCompareOutputTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
+pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(CompareOutputContext) catch unreachable;
cases.* = CompareOutputContext{
.b = b,
@@ -61,7 +61,7 @@ pub fn addCompareOutputTests(b: &build.Builder, test_filter: ?[]const u8) &build
return cases.step;
}
-pub fn addRuntimeSafetyTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
+pub fn addRuntimeSafetyTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(CompareOutputContext) catch unreachable;
cases.* = CompareOutputContext{
.b = b,
@@ -75,7 +75,7 @@ pub fn addRuntimeSafetyTests(b: &build.Builder, test_filter: ?[]const u8) &build
return cases.step;
}
-pub fn addCompileErrorTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
+pub fn addCompileErrorTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(CompileErrorContext) catch unreachable;
cases.* = CompileErrorContext{
.b = b,
@@ -89,7 +89,7 @@ pub fn addCompileErrorTests(b: &build.Builder, test_filter: ?[]const u8) &build.
return cases.step;
}
-pub fn addBuildExampleTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
+pub fn addBuildExampleTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(BuildExamplesContext) catch unreachable;
cases.* = BuildExamplesContext{
.b = b,
@@ -103,7 +103,7 @@ pub fn addBuildExampleTests(b: &build.Builder, test_filter: ?[]const u8) &build.
return cases.step;
}
-pub fn addAssembleAndLinkTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
+pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(CompareOutputContext) catch unreachable;
cases.* = CompareOutputContext{
.b = b,
@@ -117,7 +117,7 @@ pub fn addAssembleAndLinkTests(b: &build.Builder, test_filter: ?[]const u8) &bui
return cases.step;
}
-pub fn addTranslateCTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
+pub fn addTranslateCTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(TranslateCContext) catch unreachable;
cases.* = TranslateCContext{
.b = b,
@@ -131,7 +131,7 @@ pub fn addTranslateCTests(b: &build.Builder, test_filter: ?[]const u8) &build.St
return cases.step;
}
-pub fn addGenHTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
+pub fn addGenHTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(GenHContext) catch unreachable;
cases.* = GenHContext{
.b = b,
@@ -145,7 +145,7 @@ pub fn addGenHTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
return cases.step;
}
-pub fn addPkgTests(b: &build.Builder, test_filter: ?[]const u8, root_src: []const u8, name: []const u8, desc: []const u8, with_lldb: bool) &build.Step {
+pub fn addPkgTests(b: *build.Builder, test_filter: ?[]const u8, root_src: []const u8, name: []const u8, desc: []const u8, with_lldb: bool) *build.Step {
const step = b.step(b.fmt("test-{}", name), desc);
for (test_targets) |test_target| {
const is_native = (test_target.os == builtin.os and test_target.arch == builtin.arch);
@@ -193,8 +193,8 @@ pub fn addPkgTests(b: &build.Builder, test_filter: ?[]const u8, root_src: []cons
}
pub const CompareOutputContext = struct {
- b: &build.Builder,
- step: &build.Step,
+ b: *build.Builder,
+ step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
@@ -217,28 +217,28 @@ pub const CompareOutputContext = struct {
source: []const u8,
};
- pub fn addSourceFile(self: &TestCase, filename: []const u8, source: []const u8) void {
+ pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void {
self.sources.append(SourceFile{
.filename = filename,
.source = source,
}) catch unreachable;
}
- pub fn setCommandLineArgs(self: &TestCase, args: []const []const u8) void {
+ pub fn setCommandLineArgs(self: *TestCase, args: []const []const u8) void {
self.cli_args = args;
}
};
const RunCompareOutputStep = struct {
step: build.Step,
- context: &CompareOutputContext,
+ context: *CompareOutputContext,
exe_path: []const u8,
name: []const u8,
expected_output: []const u8,
test_index: usize,
cli_args: []const []const u8,
- pub fn create(context: &CompareOutputContext, exe_path: []const u8, name: []const u8, expected_output: []const u8, cli_args: []const []const u8) &RunCompareOutputStep {
+ pub fn create(context: *CompareOutputContext, exe_path: []const u8, name: []const u8, expected_output: []const u8, cli_args: []const []const u8) *RunCompareOutputStep {
const allocator = context.b.allocator;
const ptr = allocator.create(RunCompareOutputStep) catch unreachable;
ptr.* = RunCompareOutputStep{
@@ -254,7 +254,7 @@ pub const CompareOutputContext = struct {
return ptr;
}
- fn make(step: &build.Step) !void {
+ fn make(step: *build.Step) !void {
const self = @fieldParentPtr(RunCompareOutputStep, "step", step);
const b = self.context.b;
@@ -321,12 +321,12 @@ pub const CompareOutputContext = struct {
const RuntimeSafetyRunStep = struct {
step: build.Step,
- context: &CompareOutputContext,
+ context: *CompareOutputContext,
exe_path: []const u8,
name: []const u8,
test_index: usize,
- pub fn create(context: &CompareOutputContext, exe_path: []const u8, name: []const u8) &RuntimeSafetyRunStep {
+ pub fn create(context: *CompareOutputContext, exe_path: []const u8, name: []const u8) *RuntimeSafetyRunStep {
const allocator = context.b.allocator;
const ptr = allocator.create(RuntimeSafetyRunStep) catch unreachable;
ptr.* = RuntimeSafetyRunStep{
@@ -340,7 +340,7 @@ pub const CompareOutputContext = struct {
return ptr;
}
- fn make(step: &build.Step) !void {
+ fn make(step: *build.Step) !void {
const self = @fieldParentPtr(RuntimeSafetyRunStep, "step", step);
const b = self.context.b;
@@ -382,7 +382,7 @@ pub const CompareOutputContext = struct {
}
};
- pub fn createExtra(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8, special: Special) TestCase {
+ pub fn createExtra(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8, special: Special) TestCase {
var tc = TestCase{
.name = name,
.sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
@@ -396,32 +396,32 @@ pub const CompareOutputContext = struct {
return tc;
}
- pub fn create(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) TestCase {
+ pub fn create(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) TestCase {
return createExtra(self, name, source, expected_output, Special.None);
}
- pub fn addC(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
+ pub fn addC(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
var tc = self.create(name, source, expected_output);
tc.link_libc = true;
self.addCase(tc);
}
- pub fn add(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
+ pub fn add(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
const tc = self.create(name, source, expected_output);
self.addCase(tc);
}
- pub fn addAsm(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
+ pub fn addAsm(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
const tc = self.createExtra(name, source, expected_output, Special.Asm);
self.addCase(tc);
}
- pub fn addRuntimeSafety(self: &CompareOutputContext, name: []const u8, source: []const u8) void {
+ pub fn addRuntimeSafety(self: *CompareOutputContext, name: []const u8, source: []const u8) void {
const tc = self.createExtra(name, source, undefined, Special.RuntimeSafety);
self.addCase(tc);
}
- pub fn addCase(self: &CompareOutputContext, case: &const TestCase) void {
+ pub fn addCase(self: *CompareOutputContext, case: *const TestCase) void {
const b = self.b;
const root_src = os.path.join(b.allocator, b.cache_root, case.sources.items[0].filename) catch unreachable;
@@ -504,8 +504,8 @@ pub const CompareOutputContext = struct {
};
pub const CompileErrorContext = struct {
- b: &build.Builder,
- step: &build.Step,
+ b: *build.Builder,
+ step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
@@ -521,27 +521,27 @@ pub const CompileErrorContext = struct {
source: []const u8,
};
- pub fn addSourceFile(self: &TestCase, filename: []const u8, source: []const u8) void {
+ pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void {
self.sources.append(SourceFile{
.filename = filename,
.source = source,
}) catch unreachable;
}
- pub fn addExpectedError(self: &TestCase, text: []const u8) void {
+ pub fn addExpectedError(self: *TestCase, text: []const u8) void {
self.expected_errors.append(text) catch unreachable;
}
};
const CompileCmpOutputStep = struct {
step: build.Step,
- context: &CompileErrorContext,
+ context: *CompileErrorContext,
name: []const u8,
test_index: usize,
- case: &const TestCase,
+ case: *const TestCase,
build_mode: Mode,
- pub fn create(context: &CompileErrorContext, name: []const u8, case: &const TestCase, build_mode: Mode) &CompileCmpOutputStep {
+ pub fn create(context: *CompileErrorContext, name: []const u8, case: *const TestCase, build_mode: Mode) *CompileCmpOutputStep {
const allocator = context.b.allocator;
const ptr = allocator.create(CompileCmpOutputStep) catch unreachable;
ptr.* = CompileCmpOutputStep{
@@ -556,7 +556,7 @@ pub const CompileErrorContext = struct {
return ptr;
}
- fn make(step: &build.Step) !void {
+ fn make(step: *build.Step) !void {
const self = @fieldParentPtr(CompileCmpOutputStep, "step", step);
const b = self.context.b;
@@ -661,7 +661,7 @@ pub const CompileErrorContext = struct {
warn("\n");
}
- pub fn create(self: &CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) &TestCase {
+ pub fn create(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
const tc = self.b.allocator.create(TestCase) catch unreachable;
tc.* = TestCase{
.name = name,
@@ -678,24 +678,24 @@ pub const CompileErrorContext = struct {
return tc;
}
- pub fn addC(self: &CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn addC(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
var tc = self.create(name, source, expected_lines);
tc.link_libc = true;
self.addCase(tc);
}
- pub fn addExe(self: &CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn addExe(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
var tc = self.create(name, source, expected_lines);
tc.is_exe = true;
self.addCase(tc);
}
- pub fn add(self: &CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn add(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
const tc = self.create(name, source, expected_lines);
self.addCase(tc);
}
- pub fn addCase(self: &CompileErrorContext, case: &const TestCase) void {
+ pub fn addCase(self: *CompileErrorContext, case: *const TestCase) void {
const b = self.b;
for ([]Mode{
@@ -720,20 +720,20 @@ pub const CompileErrorContext = struct {
};
pub const BuildExamplesContext = struct {
- b: &build.Builder,
- step: &build.Step,
+ b: *build.Builder,
+ step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
- pub fn addC(self: &BuildExamplesContext, root_src: []const u8) void {
+ pub fn addC(self: *BuildExamplesContext, root_src: []const u8) void {
self.addAllArgs(root_src, true);
}
- pub fn add(self: &BuildExamplesContext, root_src: []const u8) void {
+ pub fn add(self: *BuildExamplesContext, root_src: []const u8) void {
self.addAllArgs(root_src, false);
}
- pub fn addBuildFile(self: &BuildExamplesContext, build_file: []const u8) void {
+ pub fn addBuildFile(self: *BuildExamplesContext, build_file: []const u8) void {
const b = self.b;
const annotated_case_name = b.fmt("build {} (Debug)", build_file);
@@ -763,7 +763,7 @@ pub const BuildExamplesContext = struct {
self.step.dependOn(&log_step.step);
}
- pub fn addAllArgs(self: &BuildExamplesContext, root_src: []const u8, link_libc: bool) void {
+ pub fn addAllArgs(self: *BuildExamplesContext, root_src: []const u8, link_libc: bool) void {
const b = self.b;
for ([]Mode{
@@ -792,8 +792,8 @@ pub const BuildExamplesContext = struct {
};
pub const TranslateCContext = struct {
- b: &build.Builder,
- step: &build.Step,
+ b: *build.Builder,
+ step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
@@ -808,26 +808,26 @@ pub const TranslateCContext = struct {
source: []const u8,
};
- pub fn addSourceFile(self: &TestCase, filename: []const u8, source: []const u8) void {
+ pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void {
self.sources.append(SourceFile{
.filename = filename,
.source = source,
}) catch unreachable;
}
- pub fn addExpectedLine(self: &TestCase, text: []const u8) void {
+ pub fn addExpectedLine(self: *TestCase, text: []const u8) void {
self.expected_lines.append(text) catch unreachable;
}
};
const TranslateCCmpOutputStep = struct {
step: build.Step,
- context: &TranslateCContext,
+ context: *TranslateCContext,
name: []const u8,
test_index: usize,
- case: &const TestCase,
+ case: *const TestCase,
- pub fn create(context: &TranslateCContext, name: []const u8, case: &const TestCase) &TranslateCCmpOutputStep {
+ pub fn create(context: *TranslateCContext, name: []const u8, case: *const TestCase) *TranslateCCmpOutputStep {
const allocator = context.b.allocator;
const ptr = allocator.create(TranslateCCmpOutputStep) catch unreachable;
ptr.* = TranslateCCmpOutputStep{
@@ -841,7 +841,7 @@ pub const TranslateCContext = struct {
return ptr;
}
- fn make(step: &build.Step) !void {
+ fn make(step: *build.Step) !void {
const self = @fieldParentPtr(TranslateCCmpOutputStep, "step", step);
const b = self.context.b;
@@ -935,7 +935,7 @@ pub const TranslateCContext = struct {
warn("\n");
}
- pub fn create(self: &TranslateCContext, allow_warnings: bool, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) &TestCase {
+ pub fn create(self: *TranslateCContext, allow_warnings: bool, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
const tc = self.b.allocator.create(TestCase) catch unreachable;
tc.* = TestCase{
.name = name,
@@ -951,22 +951,22 @@ pub const TranslateCContext = struct {
return tc;
}
- pub fn add(self: &TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn add(self: *TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
const tc = self.create(false, "source.h", name, source, expected_lines);
self.addCase(tc);
}
- pub fn addC(self: &TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn addC(self: *TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
const tc = self.create(false, "source.c", name, source, expected_lines);
self.addCase(tc);
}
- pub fn addAllowWarnings(self: &TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn addAllowWarnings(self: *TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
const tc = self.create(true, "source.h", name, source, expected_lines);
self.addCase(tc);
}
- pub fn addCase(self: &TranslateCContext, case: &const TestCase) void {
+ pub fn addCase(self: *TranslateCContext, case: *const TestCase) void {
const b = self.b;
const annotated_case_name = fmt.allocPrint(self.b.allocator, "translate-c {}", case.name) catch unreachable;
@@ -986,8 +986,8 @@ pub const TranslateCContext = struct {
};
pub const GenHContext = struct {
- b: &build.Builder,
- step: &build.Step,
+ b: *build.Builder,
+ step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
@@ -1001,27 +1001,27 @@ pub const GenHContext = struct {
source: []const u8,
};
- pub fn addSourceFile(self: &TestCase, filename: []const u8, source: []const u8) void {
+ pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void {
self.sources.append(SourceFile{
.filename = filename,
.source = source,
}) catch unreachable;
}
- pub fn addExpectedLine(self: &TestCase, text: []const u8) void {
+ pub fn addExpectedLine(self: *TestCase, text: []const u8) void {
self.expected_lines.append(text) catch unreachable;
}
};
const GenHCmpOutputStep = struct {
step: build.Step,
- context: &GenHContext,
+ context: *GenHContext,
h_path: []const u8,
name: []const u8,
test_index: usize,
- case: &const TestCase,
+ case: *const TestCase,
- pub fn create(context: &GenHContext, h_path: []const u8, name: []const u8, case: &const TestCase) &GenHCmpOutputStep {
+ pub fn create(context: *GenHContext, h_path: []const u8, name: []const u8, case: *const TestCase) *GenHCmpOutputStep {
const allocator = context.b.allocator;
const ptr = allocator.create(GenHCmpOutputStep) catch unreachable;
ptr.* = GenHCmpOutputStep{
@@ -1036,7 +1036,7 @@ pub const GenHContext = struct {
return ptr;
}
- fn make(step: &build.Step) !void {
+ fn make(step: *build.Step) !void {
const self = @fieldParentPtr(GenHCmpOutputStep, "step", step);
const b = self.context.b;
@@ -1069,7 +1069,7 @@ pub const GenHContext = struct {
warn("\n");
}
- pub fn create(self: &GenHContext, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) &TestCase {
+ pub fn create(self: *GenHContext, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
const tc = self.b.allocator.create(TestCase) catch unreachable;
tc.* = TestCase{
.name = name,
@@ -1084,12 +1084,12 @@ pub const GenHContext = struct {
return tc;
}
- pub fn add(self: &GenHContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn add(self: *GenHContext, name: []const u8, source: []const u8, expected_lines: ...) void {
const tc = self.create("test.zig", name, source, expected_lines);
self.addCase(tc);
}
- pub fn addCase(self: &GenHContext, case: &const TestCase) void {
+ pub fn addCase(self: *GenHContext, case: *const TestCase) void {
const b = self.b;
const root_src = os.path.join(b.allocator, b.cache_root, case.sources.items[0].filename) catch unreachable;
diff --git a/test/translate_c.zig b/test/translate_c.zig
index 4cf1e047fa..9a07bc343d 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -1,6 +1,6 @@
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.TranslateCContext) void {
+pub fn addCases(cases: *tests.TranslateCContext) void {
cases.add("double define struct",
\\typedef struct Bar Bar;
\\typedef struct Foo Foo;
@@ -14,11 +14,11 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\};
,
\\pub const struct_Foo = extern struct {
- \\ a: ?&Foo,
+ \\ a: ?*Foo,
\\};
\\pub const Foo = struct_Foo;
\\pub const struct_Bar = extern struct {
- \\ a: ?&Foo,
+ \\ a: ?*Foo,
\\};
);
@@ -99,7 +99,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
cases.add("restrict -> noalias",
\\void foo(void *restrict bar, void *restrict);
,
- \\pub extern fn foo(noalias bar: ?&c_void, noalias arg1: ?&c_void) void;
+ \\pub extern fn foo(noalias bar: ?*c_void, noalias arg1: ?*c_void) void;
);
cases.add("simple struct",
@@ -110,7 +110,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
,
\\const struct_Foo = extern struct {
\\ x: c_int,
- \\ y: ?&u8,
+ \\ y: ?*u8,
\\};
,
\\pub const Foo = struct_Foo;
@@ -141,7 +141,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
,
\\pub const BarB = enum_Bar.B;
,
- \\pub extern fn func(a: ?&struct_Foo, b: ?&(?&enum_Bar)) void;
+ \\pub extern fn func(a: ?*struct_Foo, b: ?*(?*enum_Bar)) void;
,
\\pub const Foo = struct_Foo;
,
@@ -151,7 +151,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
cases.add("constant size array",
\\void func(int array[20]);
,
- \\pub extern fn func(array: ?&c_int) void;
+ \\pub extern fn func(array: ?*c_int) void;
);
cases.add("self referential struct with function pointer",
@@ -160,7 +160,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\};
,
\\pub const struct_Foo = extern struct {
- \\ derp: ?extern fn(?&struct_Foo) void,
+ \\ derp: ?extern fn(?*struct_Foo) void,
\\};
,
\\pub const Foo = struct_Foo;
@@ -172,7 +172,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
,
\\pub const struct_Foo = @OpaqueType();
,
- \\pub extern fn some_func(foo: ?&struct_Foo, x: c_int) ?&struct_Foo;
+ \\pub extern fn some_func(foo: ?*struct_Foo, x: c_int) ?*struct_Foo;
,
\\pub const Foo = struct_Foo;
);
@@ -219,11 +219,11 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\};
,
\\pub const struct_Bar = extern struct {
- \\ next: ?&struct_Foo,
+ \\ next: ?*struct_Foo,
\\};
,
\\pub const struct_Foo = extern struct {
- \\ next: ?&struct_Bar,
+ \\ next: ?*struct_Bar,
\\};
);
@@ -233,7 +233,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
,
\\pub const Foo = c_void;
,
- \\pub extern fn fun(a: ?&Foo) Foo;
+ \\pub extern fn fun(a: ?*Foo) Foo;
);
cases.add("generate inline func for #define global extern fn",
@@ -505,7 +505,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return 6;
\\}
,
- \\pub export fn and_or_none_bool(a: c_int, b: f32, c: ?&c_void) c_int {
+ \\pub export fn and_or_none_bool(a: c_int, b: f32, c: ?*c_void) c_int {
\\ if ((a != 0) and (b != 0)) return 0;
\\ if ((b != 0) and (c != null)) return 1;
\\ if ((a != 0) and (c != null)) return 2;
@@ -607,7 +607,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\pub const struct_Foo = extern struct {
\\ field: c_int,
\\};
- \\pub export fn read_field(foo: ?&struct_Foo) c_int {
+ \\pub export fn read_field(foo: ?*struct_Foo) c_int {
\\ return (??foo).field;
\\}
);
@@ -653,8 +653,8 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return x;
\\}
,
- \\pub export fn foo(x: ?&c_ushort) ?&c_void {
- \\ return @ptrCast(?&c_void, x);
+ \\pub export fn foo(x: ?*c_ushort) ?*c_void {
+ \\ return @ptrCast(?*c_void, x);
\\}
);
@@ -674,7 +674,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return 0;
\\}
,
- \\pub export fn foo() ?&c_int {
+ \\pub export fn foo() ?*c_int {
\\ return null;
\\}
);
@@ -983,7 +983,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ *x = 1;
\\}
,
- \\pub export fn foo(x: ?&c_int) void {
+ \\pub export fn foo(x: ?*c_int) void {
\\ (??x).* = 1;
\\}
);
@@ -1011,7 +1011,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
,
\\pub fn foo() c_int {
\\ var x: c_int = 1234;
- \\ var ptr: ?&c_int = &x;
+ \\ var ptr: ?*c_int = &x;
\\ return (??ptr).*;
\\}
);
@@ -1021,7 +1021,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return "bar";
\\}
,
- \\pub fn foo() ?&const u8 {
+ \\pub fn foo() ?*const u8 {
\\ return c"bar";
\\}
);
@@ -1150,8 +1150,8 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return (float *)a;
\\}
,
- \\fn ptrcast(a: ?&c_int) ?&f32 {
- \\ return @ptrCast(?&f32, a);
+ \\fn ptrcast(a: ?*c_int) ?*f32 {
+ \\ return @ptrCast(?*f32, a);
\\}
);
@@ -1173,7 +1173,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return !c;
\\}
,
- \\pub fn foo(a: c_int, b: f32, c: ?&c_void) c_int {
+ \\pub fn foo(a: c_int, b: f32, c: ?*c_void) c_int {
\\ return !(a == 0);
\\ return !(a != 0);
\\ return !(b != 0);
@@ -1194,7 +1194,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
cases.add("const ptr initializer",
\\static const char *v0 = "0.0.0";
,
- \\pub var v0: ?&const u8 = c"0.0.0";
+ \\pub var v0: ?*const u8 = c"0.0.0";
);
cases.add("static incomplete array inside function",
@@ -1203,14 +1203,14 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\}
,
\\pub fn foo() void {
- \\ const v2: &const u8 = c"2.2.2";
+ \\ const v2: *const u8 = c"2.2.2";
\\}
);
cases.add("macro pointer cast",
\\#define NRF_GPIO ((NRF_GPIO_Type *) NRF_GPIO_BASE)
,
- \\pub const NRF_GPIO = if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Pointer) @ptrCast(&NRF_GPIO_Type, NRF_GPIO_BASE) else if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Int) @intToPtr(&NRF_GPIO_Type, NRF_GPIO_BASE) else (&NRF_GPIO_Type)(NRF_GPIO_BASE);
+ \\pub const NRF_GPIO = if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Pointer) @ptrCast(*NRF_GPIO_Type, NRF_GPIO_BASE) else if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Int) @intToPtr(*NRF_GPIO_Type, NRF_GPIO_BASE) else (*NRF_GPIO_Type)(NRF_GPIO_BASE);
);
cases.add("if on none bool",
@@ -1231,7 +1231,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ B,
\\ C,
\\};
- \\pub fn if_none_bool(a: c_int, b: f32, c: ?&c_void, d: enum_SomeEnum) c_int {
+ \\pub fn if_none_bool(a: c_int, b: f32, c: ?*c_void, d: enum_SomeEnum) c_int {
\\ if (a != 0) return 0;
\\ if (b != 0) return 1;
\\ if (c != null) return 2;
@@ -1248,7 +1248,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return 3;
\\}
,
- \\pub fn while_none_bool(a: c_int, b: f32, c: ?&c_void) c_int {
+ \\pub fn while_none_bool(a: c_int, b: f32, c: ?*c_void) c_int {
\\ while (a != 0) return 0;
\\ while (b != 0) return 1;
\\ while (c != null) return 2;
@@ -1264,7 +1264,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return 3;
\\}
,
- \\pub fn for_none_bool(a: c_int, b: f32, c: ?&c_void) c_int {
+ \\pub fn for_none_bool(a: c_int, b: f32, c: ?*c_void) c_int {
\\ while (a != 0) return 0;
\\ while (b != 0) return 1;
\\ while (c != null) return 2;
--
cgit v1.2.3
From 77678b2cbc7ac9ba2d5d4725241f6a9f7ac64fa4 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 10 Jun 2018 01:13:51 -0400
Subject: breaking syntax change: orelse keyword instead of ?? (#1096)
use the `zig-fmt-optional-default` branch to have zig fmt
automatically do the changes.
closes #1023
---
build.zig | 6 +++---
doc/docgen.zig | 6 +++---
doc/langref.html.in | 16 +++++++--------
src-self-hosted/main.zig | 14 ++++++-------
src-self-hosted/module.zig | 8 ++++----
src/all_types.hpp | 7 ++++++-
src/analyze.cpp | 1 +
src/ast_render.cpp | 12 +++++++++--
src/ir.cpp | 31 ++++++++++++-----------------
src/parser.cpp | 13 ++++++------
src/tokenizer.cpp | 27 ++++++-------------------
src/tokenizer.hpp | 2 +-
src/translate_c.cpp | 16 ++++++++-------
std/atomic/queue.zig | 4 ++--
std/atomic/stack.zig | 4 ++--
std/buf_map.zig | 6 +++---
std/buf_set.zig | 4 ++--
std/build.zig | 24 +++++++++++-----------
std/debug/index.zig | 20 +++++++++----------
std/heap.zig | 10 +++++-----
std/linked_list.zig | 4 ++--
std/os/index.zig | 14 ++++++-------
std/os/linux/vdso.zig | 8 ++++----
std/os/path.zig | 12 +++++------
std/os/windows/util.zig | 2 +-
std/special/build_runner.zig | 10 +++++-----
std/unicode.zig | 2 +-
std/zig/parse.zig | 47 ++++++++++++++++++++++----------------------
std/zig/render.zig | 8 ++++----
test/cases/cast.zig | 6 +++---
test/cases/null.zig | 10 +++++-----
test/compile_errors.zig | 2 +-
test/translate_c.zig | 20 +++++++++----------
33 files changed, 187 insertions(+), 189 deletions(-)
(limited to 'src-self-hosted/module.zig')
diff --git a/build.zig b/build.zig
index eada37816c..fd154c7504 100644
--- a/build.zig
+++ b/build.zig
@@ -102,11 +102,11 @@ pub fn build(b: *Builder) !void {
b.default_step.dependOn(&exe.step);
- const skip_self_hosted = b.option(bool, "skip-self-hosted", "Main test suite skips building self hosted compiler") ?? false;
+ const skip_self_hosted = b.option(bool, "skip-self-hosted", "Main test suite skips building self hosted compiler") orelse false;
if (!skip_self_hosted) {
test_step.dependOn(&exe.step);
}
- const verbose_link_exe = b.option(bool, "verbose-link", "Print link command for self hosted compiler") ?? false;
+ const verbose_link_exe = b.option(bool, "verbose-link", "Print link command for self hosted compiler") orelse false;
exe.setVerboseLink(verbose_link_exe);
b.installArtifact(exe);
@@ -114,7 +114,7 @@ pub fn build(b: *Builder) !void {
installCHeaders(b, c_header_files);
const test_filter = b.option([]const u8, "test-filter", "Skip tests that do not match filter");
- const with_lldb = b.option(bool, "with-lldb", "Run tests in LLDB to get a backtrace if one fails") ?? false;
+ const with_lldb = b.option(bool, "with-lldb", "Run tests in LLDB to get a backtrace if one fails") orelse false;
test_step.dependOn(docs_step);
diff --git a/doc/docgen.zig b/doc/docgen.zig
index ed0e1be273..3283d146b0 100644
--- a/doc/docgen.zig
+++ b/doc/docgen.zig
@@ -25,13 +25,13 @@ pub fn main() !void {
if (!args_it.skip()) @panic("expected self arg");
- const zig_exe = try (args_it.next(allocator) ?? @panic("expected zig exe arg"));
+ const zig_exe = try (args_it.next(allocator) orelse @panic("expected zig exe arg"));
defer allocator.free(zig_exe);
- const in_file_name = try (args_it.next(allocator) ?? @panic("expected input arg"));
+ const in_file_name = try (args_it.next(allocator) orelse @panic("expected input arg"));
defer allocator.free(in_file_name);
- const out_file_name = try (args_it.next(allocator) ?? @panic("expected output arg"));
+ const out_file_name = try (args_it.next(allocator) orelse @panic("expected output arg"));
defer allocator.free(out_file_name);
var in_file = try os.File.openRead(allocator, in_file_name);
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 4c4a637095..0ada8a5196 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -985,7 +985,7 @@ a ^= b
- a ?? b
|
+ a orelse b
|
- {#link|Optionals#}
@@ -998,7 +998,7 @@ a ^= b |
const value: ?u32 = null;
-const unwrapped = value ?? 1234;
+const unwrapped = value orelse 1234;
unwrapped == 1234
|
@@ -1011,7 +1011,7 @@ unwrapped == 1234
Equivalent to:
- a ?? unreachable
+ a orelse unreachable
|
const value: ?u32 = 5678;
@@ -1278,7 +1278,7 @@ x{} x.* x.?
== != < > <= >=
and
or
-?? catch
+orelse catch
= *= /= %= += -= <<= >>= &= ^= |=
{#header_close#}
{#header_close#}
@@ -3062,7 +3062,7 @@ fn createFoo(param: i32) !Foo {
// but we want to return it if the function succeeds.
errdefer deallocateFoo(foo);
- const tmp_buf = allocateTmpBuffer() ?? return error.OutOfMemory;
+ const tmp_buf = allocateTmpBuffer() orelse return error.OutOfMemory;
// tmp_buf is truly a temporary resource, and we for sure want to clean it up
// before this block leaves scope
defer deallocateTmpBuffer(tmp_buf);
@@ -3219,13 +3219,13 @@ struct Foo *do_a_thing(void) {
extern fn malloc(size: size_t) ?*u8;
fn doAThing() ?*Foo {
- const ptr = malloc(1234) ?? return null;
+ const ptr = malloc(1234) orelse return null;
// ...
}
{#code_end#}
Here, Zig is at least as convenient, if not more, than C. And, the type of "ptr"
- is *u8 not ?*u8. The ?? operator
+ is *u8 not ?*u8. The orelse keyword
unwrapped the optional type and therefore ptr is guaranteed to be non-null everywhere
it is used in the function.
@@ -5941,7 +5941,7 @@ AsmClobbers= ":" list(String, ",")
UnwrapExpression = BoolOrExpression (UnwrapOptional | UnwrapError) | BoolOrExpression
-UnwrapOptional = "??" Expression
+UnwrapOptional = "orelse" Expression
UnwrapError = "catch" option("|" Symbol "|") Expression
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 64734f077a..1c91ab9cbe 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -212,7 +212,7 @@ fn cmdBuild(allocator: *Allocator, args: []const []const u8) !void {
const build_runner_path = try os.path.join(allocator, special_dir, "build_runner.zig");
defer allocator.free(build_runner_path);
- const build_file = flags.single("build-file") ?? "build.zig";
+ const build_file = flags.single("build-file") orelse "build.zig";
const build_file_abs = try os.path.resolve(allocator, ".", build_file);
defer allocator.free(build_file_abs);
@@ -516,7 +516,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
const basename = os.path.basename(in_file.?);
var it = mem.split(basename, ".");
- const root_name = it.next() ?? {
+ const root_name = it.next() orelse {
try stderr.write("file name cannot be empty\n");
os.exit(1);
};
@@ -535,7 +535,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
const zig_root_source_file = in_file;
- const full_cache_dir = os.path.resolve(allocator, ".", flags.single("cache-dir") ?? "zig-cache"[0..]) catch {
+ const full_cache_dir = os.path.resolve(allocator, ".", flags.single("cache-dir") orelse "zig-cache"[0..]) catch {
os.exit(1);
};
defer allocator.free(full_cache_dir);
@@ -555,9 +555,9 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
);
defer module.destroy();
- module.version_major = try std.fmt.parseUnsigned(u32, flags.single("ver-major") ?? "0", 10);
- module.version_minor = try std.fmt.parseUnsigned(u32, flags.single("ver-minor") ?? "0", 10);
- module.version_patch = try std.fmt.parseUnsigned(u32, flags.single("ver-patch") ?? "0", 10);
+ module.version_major = try std.fmt.parseUnsigned(u32, flags.single("ver-major") orelse "0", 10);
+ module.version_minor = try std.fmt.parseUnsigned(u32, flags.single("ver-minor") orelse "0", 10);
+ module.version_patch = try std.fmt.parseUnsigned(u32, flags.single("ver-patch") orelse "0", 10);
module.is_test = false;
@@ -652,7 +652,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
}
try module.build();
- try module.link(flags.single("out-file") ?? null);
+ try module.link(flags.single("out-file") orelse null);
if (flags.present("print-timing-info")) {
// codegen_print_timing_info(g, stderr);
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index a7ddf3f9e9..575105f25f 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -130,13 +130,13 @@ pub const Module = struct {
var name_buffer = try Buffer.init(allocator, name);
errdefer name_buffer.deinit();
- const context = c.LLVMContextCreate() ?? return error.OutOfMemory;
+ const context = c.LLVMContextCreate() orelse return error.OutOfMemory;
errdefer c.LLVMContextDispose(context);
- const module = c.LLVMModuleCreateWithNameInContext(name_buffer.ptr(), context) ?? return error.OutOfMemory;
+ const module = c.LLVMModuleCreateWithNameInContext(name_buffer.ptr(), context) orelse return error.OutOfMemory;
errdefer c.LLVMDisposeModule(module);
- const builder = c.LLVMCreateBuilderInContext(context) ?? return error.OutOfMemory;
+ const builder = c.LLVMCreateBuilderInContext(context) orelse return error.OutOfMemory;
errdefer c.LLVMDisposeBuilder(builder);
const module_ptr = try allocator.create(Module);
@@ -223,7 +223,7 @@ pub const Module = struct {
c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr);
}
- const root_src_path = self.root_src_path ?? @panic("TODO handle null root src path");
+ const root_src_path = self.root_src_path orelse @panic("TODO handle null root src path");
const root_src_real_path = os.path.real(self.allocator, root_src_path) catch |err| {
try printError("unable to get real path '{}': {}", root_src_path, err);
return err;
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 2a5a0ad740..ab219e4e56 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -387,6 +387,7 @@ enum NodeType {
NodeTypeSliceExpr,
NodeTypeFieldAccessExpr,
NodeTypePtrDeref,
+ NodeTypeUnwrapOptional,
NodeTypeUse,
NodeTypeBoolLiteral,
NodeTypeNullLiteral,
@@ -575,6 +576,10 @@ struct AstNodeCatchExpr {
AstNode *op2;
};
+struct AstNodeUnwrapOptional {
+ AstNode *expr;
+};
+
enum CastOp {
CastOpNoCast, // signifies the function call expression is not a cast
CastOpNoop, // fn call expr is a cast, but does nothing
@@ -624,7 +629,6 @@ enum PrefixOp {
PrefixOpNegation,
PrefixOpNegationWrap,
PrefixOpOptional,
- PrefixOpUnwrapOptional,
PrefixOpAddrOf,
};
@@ -909,6 +913,7 @@ struct AstNode {
AstNodeTestDecl test_decl;
AstNodeBinOpExpr bin_op_expr;
AstNodeCatchExpr unwrap_err_expr;
+ AstNodeUnwrapOptional unwrap_optional;
AstNodePrefixOpExpr prefix_op_expr;
AstNodePointerType pointer_type;
AstNodeFnCallExpr fn_call_expr;
diff --git a/src/analyze.cpp b/src/analyze.cpp
index ed261148ea..0aa5ea5dcb 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -3308,6 +3308,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
case NodeTypeAsmExpr:
case NodeTypeFieldAccessExpr:
case NodeTypePtrDeref:
+ case NodeTypeUnwrapOptional:
case NodeTypeStructField:
case NodeTypeContainerInitExpr:
case NodeTypeStructValueField:
diff --git a/src/ast_render.cpp b/src/ast_render.cpp
index 2c8c03b226..2ace00885d 100644
--- a/src/ast_render.cpp
+++ b/src/ast_render.cpp
@@ -50,7 +50,7 @@ static const char *bin_op_str(BinOpType bin_op) {
case BinOpTypeAssignBitXor: return "^=";
case BinOpTypeAssignBitOr: return "|=";
case BinOpTypeAssignMergeErrorSets: return "||=";
- case BinOpTypeUnwrapOptional: return "??";
+ case BinOpTypeUnwrapOptional: return "orelse";
case BinOpTypeArrayCat: return "++";
case BinOpTypeArrayMult: return "**";
case BinOpTypeErrorUnion: return "!";
@@ -67,7 +67,6 @@ static const char *prefix_op_str(PrefixOp prefix_op) {
case PrefixOpBoolNot: return "!";
case PrefixOpBinNot: return "~";
case PrefixOpOptional: return "?";
- case PrefixOpUnwrapOptional: return "??";
case PrefixOpAddrOf: return "&";
}
zig_unreachable();
@@ -222,6 +221,8 @@ static const char *node_type_str(NodeType node_type) {
return "FieldAccessExpr";
case NodeTypePtrDeref:
return "PtrDerefExpr";
+ case NodeTypeUnwrapOptional:
+ return "UnwrapOptional";
case NodeTypeContainerDecl:
return "ContainerDecl";
case NodeTypeStructField:
@@ -711,6 +712,13 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, ".*");
break;
}
+ case NodeTypeUnwrapOptional:
+ {
+ AstNode *lhs = node->data.unwrap_optional.expr;
+ render_node_ungrouped(ar, lhs);
+ fprintf(ar->f, ".?");
+ break;
+ }
case NodeTypeUndefinedLiteral:
fprintf(ar->f, "undefined");
break;
diff --git a/src/ir.cpp b/src/ir.cpp
index 02606fc4aa..96eb5f7434 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -4661,21 +4661,6 @@ static IrInstruction *ir_gen_err_assert_ok(IrBuilder *irb, Scope *scope, AstNode
return ir_build_load_ptr(irb, scope, source_node, payload_ptr);
}
-static IrInstruction *ir_gen_maybe_assert_ok(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) {
- assert(node->type == NodeTypePrefixOpExpr);
- AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
-
- IrInstruction *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR);
- if (maybe_ptr == irb->codegen->invalid_instruction)
- return irb->codegen->invalid_instruction;
-
- IrInstruction *unwrapped_ptr = ir_build_unwrap_maybe(irb, scope, node, maybe_ptr, true);
- if (lval.is_ptr)
- return unwrapped_ptr;
-
- return ir_build_load_ptr(irb, scope, node, unwrapped_ptr);
-}
-
static IrInstruction *ir_gen_bool_not(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypePrefixOpExpr);
AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
@@ -4705,8 +4690,6 @@ static IrInstruction *ir_gen_prefix_op_expr(IrBuilder *irb, Scope *scope, AstNod
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegationWrap), lval);
case PrefixOpOptional:
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpOptional), lval);
- case PrefixOpUnwrapOptional:
- return ir_gen_maybe_assert_ok(irb, scope, node, lval);
case PrefixOpAddrOf: {
AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
return ir_lval_wrap(irb, scope, ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR), lval);
@@ -6541,7 +6524,6 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_build_load_ptr(irb, scope, node, ptr_instruction);
}
case NodeTypePtrDeref: {
- assert(node->type == NodeTypePtrDeref);
AstNode *expr_node = node->data.ptr_deref_expr.target;
IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval);
if (value == irb->codegen->invalid_instruction)
@@ -6549,6 +6531,19 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_build_un_op(irb, scope, node, IrUnOpDereference, value);
}
+ case NodeTypeUnwrapOptional: {
+ AstNode *expr_node = node->data.unwrap_optional.expr;
+
+ IrInstruction *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR);
+ if (maybe_ptr == irb->codegen->invalid_instruction)
+ return irb->codegen->invalid_instruction;
+
+ IrInstruction *unwrapped_ptr = ir_build_unwrap_maybe(irb, scope, node, maybe_ptr, true);
+ if (lval.is_ptr)
+ return unwrapped_ptr;
+
+ return ir_build_load_ptr(irb, scope, node, unwrapped_ptr);
+ }
case NodeTypeThisLiteral:
return ir_lval_wrap(irb, scope, ir_gen_this_literal(irb, scope, node), lval);
case NodeTypeBoolLiteral:
diff --git a/src/parser.cpp b/src/parser.cpp
index 2ee69f81ab..adb1633f5d 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -1151,9 +1151,8 @@ static AstNode *ast_parse_suffix_op_expr(ParseContext *pc, size_t *token_index,
} else if (token->id == TokenIdQuestion) {
*token_index += 1;
- AstNode *node = ast_create_node(pc, NodeTypePrefixOpExpr, first_token);
- node->data.prefix_op_expr.prefix_op = PrefixOpUnwrapOptional;
- node->data.prefix_op_expr.primary_expr = primary_expr;
+ AstNode *node = ast_create_node(pc, NodeTypeUnwrapOptional, first_token);
+ node->data.unwrap_optional.expr = primary_expr;
primary_expr = node;
} else {
@@ -1173,7 +1172,6 @@ static PrefixOp tok_to_prefix_op(Token *token) {
case TokenIdMinusPercent: return PrefixOpNegationWrap;
case TokenIdTilde: return PrefixOpBinNot;
case TokenIdQuestion: return PrefixOpOptional;
- case TokenIdDoubleQuestion: return PrefixOpUnwrapOptional;
case TokenIdAmpersand: return PrefixOpAddrOf;
default: return PrefixOpInvalid;
}
@@ -2312,7 +2310,7 @@ static BinOpType ast_parse_ass_op(ParseContext *pc, size_t *token_index, bool ma
/*
UnwrapExpression : BoolOrExpression (UnwrapOptional | UnwrapError) | BoolOrExpression
-UnwrapOptional : "??" BoolOrExpression
+UnwrapOptional = "orelse" Expression
UnwrapError = "catch" option("|" Symbol "|") Expression
*/
static AstNode *ast_parse_unwrap_expr(ParseContext *pc, size_t *token_index, bool mandatory) {
@@ -2322,7 +2320,7 @@ static AstNode *ast_parse_unwrap_expr(ParseContext *pc, size_t *token_index, boo
Token *token = &pc->tokens->at(*token_index);
- if (token->id == TokenIdDoubleQuestion) {
+ if (token->id == TokenIdKeywordOrElse) {
*token_index += 1;
AstNode *rhs = ast_parse_expression(pc, token_index, true);
@@ -3035,6 +3033,9 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypePtrDeref:
visit_field(&node->data.ptr_deref_expr.target, visit, context);
break;
+ case NodeTypeUnwrapOptional:
+ visit_field(&node->data.unwrap_optional.expr, visit, context);
+ break;
case NodeTypeUse:
visit_field(&node->data.use.expr, visit, context);
break;
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index cfabdf11ad..2950b4eb49 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -134,6 +134,7 @@ static const struct ZigKeyword zig_keywords[] = {
{"noalias", TokenIdKeywordNoAlias},
{"null", TokenIdKeywordNull},
{"or", TokenIdKeywordOr},
+ {"orelse", TokenIdKeywordOrElse},
{"packed", TokenIdKeywordPacked},
{"promise", TokenIdKeywordPromise},
{"pub", TokenIdKeywordPub},
@@ -215,7 +216,6 @@ enum TokenizeState {
TokenizeStateSawGreaterThanGreaterThan,
TokenizeStateSawDot,
TokenizeStateSawDotDot,
- TokenizeStateSawQuestionMark,
TokenizeStateSawAtSign,
TokenizeStateCharCode,
TokenizeStateError,
@@ -532,6 +532,10 @@ void tokenize(Buf *buf, Tokenization *out) {
begin_token(&t, TokenIdComma);
end_token(&t);
break;
+ case '?':
+ begin_token(&t, TokenIdQuestion);
+ end_token(&t);
+ break;
case '{':
begin_token(&t, TokenIdLBrace);
end_token(&t);
@@ -624,28 +628,10 @@ void tokenize(Buf *buf, Tokenization *out) {
begin_token(&t, TokenIdDot);
t.state = TokenizeStateSawDot;
break;
- case '?':
- begin_token(&t, TokenIdQuestion);
- t.state = TokenizeStateSawQuestionMark;
- break;
default:
invalid_char_error(&t, c);
}
break;
- case TokenizeStateSawQuestionMark:
- switch (c) {
- case '?':
- set_token_id(&t, t.cur_tok, TokenIdDoubleQuestion);
- end_token(&t);
- t.state = TokenizeStateStart;
- break;
- default:
- t.pos -= 1;
- end_token(&t);
- t.state = TokenizeStateStart;
- continue;
- }
- break;
case TokenizeStateSawDot:
switch (c) {
case '.':
@@ -1480,7 +1466,6 @@ void tokenize(Buf *buf, Tokenization *out) {
case TokenizeStateSawGreaterThan:
case TokenizeStateSawGreaterThanGreaterThan:
case TokenizeStateSawDot:
- case TokenizeStateSawQuestionMark:
case TokenizeStateSawAtSign:
case TokenizeStateSawStarPercent:
case TokenizeStateSawPlusPercent:
@@ -1545,7 +1530,6 @@ const char * token_name(TokenId id) {
case TokenIdDash: return "-";
case TokenIdDivEq: return "/=";
case TokenIdDot: return ".";
- case TokenIdDoubleQuestion: return "??";
case TokenIdEllipsis2: return "..";
case TokenIdEllipsis3: return "...";
case TokenIdEof: return "EOF";
@@ -1582,6 +1566,7 @@ const char * token_name(TokenId id) {
case TokenIdKeywordNoAlias: return "noalias";
case TokenIdKeywordNull: return "null";
case TokenIdKeywordOr: return "or";
+ case TokenIdKeywordOrElse: return "orelse";
case TokenIdKeywordPacked: return "packed";
case TokenIdKeywordPromise: return "promise";
case TokenIdKeywordPub: return "pub";
diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp
index 7c617f85c6..75c7feb476 100644
--- a/src/tokenizer.hpp
+++ b/src/tokenizer.hpp
@@ -41,7 +41,6 @@ enum TokenId {
TokenIdDash,
TokenIdDivEq,
TokenIdDot,
- TokenIdDoubleQuestion,
TokenIdEllipsis2,
TokenIdEllipsis3,
TokenIdEof,
@@ -76,6 +75,7 @@ enum TokenId {
TokenIdKeywordNoAlias,
TokenIdKeywordNull,
TokenIdKeywordOr,
+ TokenIdKeywordOrElse,
TokenIdKeywordPacked,
TokenIdKeywordPromise,
TokenIdKeywordPub,
diff --git a/src/translate_c.cpp b/src/translate_c.cpp
index aaaf5a1edb..db46d31c5b 100644
--- a/src/translate_c.cpp
+++ b/src/translate_c.cpp
@@ -260,6 +260,12 @@ static AstNode *trans_create_node_prefix_op(Context *c, PrefixOp op, AstNode *ch
return node;
}
+static AstNode *trans_create_node_unwrap_null(Context *c, AstNode *child_node) {
+ AstNode *node = trans_create_node(c, NodeTypeUnwrapOptional);
+ node->data.unwrap_optional.expr = child_node;
+ return node;
+}
+
static AstNode *trans_create_node_bin_op(Context *c, AstNode *lhs_node, BinOpType op, AstNode *rhs_node) {
AstNode *node = trans_create_node(c, NodeTypeBinOpExpr);
node->data.bin_op_expr.op1 = lhs_node;
@@ -382,7 +388,7 @@ static AstNode *trans_create_node_inline_fn(Context *c, Buf *fn_name, AstNode *r
fn_def->data.fn_def.fn_proto = fn_proto;
fn_proto->data.fn_proto.fn_def_node = fn_def;
- AstNode *unwrap_node = trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, ref_node);
+ AstNode *unwrap_node = trans_create_node_unwrap_null(c, ref_node);
AstNode *fn_call_node = trans_create_node(c, NodeTypeFnCallExpr);
fn_call_node->data.fn_call_expr.fn_ref_expr = unwrap_node;
@@ -409,10 +415,6 @@ static AstNode *trans_create_node_inline_fn(Context *c, Buf *fn_name, AstNode *r
return fn_def;
}
-static AstNode *trans_create_node_unwrap_null(Context *c, AstNode *child) {
- return trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, child);
-}
-
static AstNode *get_global(Context *c, Buf *name) {
{
auto entry = c->global_table.maybe_get(name);
@@ -1963,7 +1965,7 @@ static AstNode *trans_unary_operator(Context *c, ResultUsed result_used, TransSc
bool is_fn_ptr = qual_type_is_fn_ptr(stmt->getSubExpr()->getType());
if (is_fn_ptr)
return value_node;
- AstNode *unwrapped = trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, value_node);
+ AstNode *unwrapped = trans_create_node_unwrap_null(c, value_node);
return trans_create_node_ptr_deref(c, unwrapped);
}
case UO_Plus:
@@ -2587,7 +2589,7 @@ static AstNode *trans_call_expr(Context *c, ResultUsed result_used, TransScope *
}
}
if (callee_node == nullptr) {
- callee_node = trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, callee_raw_node);
+ callee_node = trans_create_node_unwrap_null(c, callee_raw_node);
}
} else {
callee_node = callee_raw_node;
diff --git a/std/atomic/queue.zig b/std/atomic/queue.zig
index 142c958173..4f856d9e01 100644
--- a/std/atomic/queue.zig
+++ b/std/atomic/queue.zig
@@ -33,8 +33,8 @@ pub fn Queue(comptime T: type) type {
pub fn get(self: *Self) ?*Node {
var head = @atomicLoad(*Node, &self.head, AtomicOrder.SeqCst);
while (true) {
- const node = head.next ?? return null;
- head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return node;
+ const node = head.next orelse return null;
+ head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return node;
}
}
};
diff --git a/std/atomic/stack.zig b/std/atomic/stack.zig
index 15611188d2..77fa1a9100 100644
--- a/std/atomic/stack.zig
+++ b/std/atomic/stack.zig
@@ -28,14 +28,14 @@ pub fn Stack(comptime T: type) type {
var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst);
while (true) {
node.next = root;
- root = @cmpxchgWeak(?*Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? break;
+ root = @cmpxchgWeak(?*Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse break;
}
}
pub fn pop(self: *Self) ?*Node {
var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst);
while (true) {
- root = @cmpxchgWeak(?*Node, &self.root, root, (root ?? return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return root;
+ root = @cmpxchgWeak(?*Node, &self.root, root, (root orelse return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return root;
}
}
diff --git a/std/buf_map.zig b/std/buf_map.zig
index 0d4f3a6d5e..a82d1b731a 100644
--- a/std/buf_map.zig
+++ b/std/buf_map.zig
@@ -19,7 +19,7 @@ pub const BufMap = struct {
pub fn deinit(self: *const BufMap) void {
var it = self.hash_map.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
self.free(entry.key);
self.free(entry.value);
}
@@ -37,12 +37,12 @@ pub const BufMap = struct {
}
pub fn get(self: *const BufMap, key: []const u8) ?[]const u8 {
- const entry = self.hash_map.get(key) ?? return null;
+ const entry = self.hash_map.get(key) orelse return null;
return entry.value;
}
pub fn delete(self: *BufMap, key: []const u8) void {
- const entry = self.hash_map.remove(key) ?? return;
+ const entry = self.hash_map.remove(key) orelse return;
self.free(entry.key);
self.free(entry.value);
}
diff --git a/std/buf_set.zig b/std/buf_set.zig
index 03a050ed8b..ab2d8e7c34 100644
--- a/std/buf_set.zig
+++ b/std/buf_set.zig
@@ -17,7 +17,7 @@ pub const BufSet = struct {
pub fn deinit(self: *const BufSet) void {
var it = self.hash_map.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
self.free(entry.key);
}
@@ -33,7 +33,7 @@ pub const BufSet = struct {
}
pub fn delete(self: *BufSet, key: []const u8) void {
- const entry = self.hash_map.remove(key) ?? return;
+ const entry = self.hash_map.remove(key) orelse return;
self.free(entry.key);
}
diff --git a/std/build.zig b/std/build.zig
index fed02e0815..5733aec17d 100644
--- a/std/build.zig
+++ b/std/build.zig
@@ -136,7 +136,7 @@ pub const Builder = struct {
}
pub fn setInstallPrefix(self: *Builder, maybe_prefix: ?[]const u8) void {
- self.prefix = maybe_prefix ?? "/usr/local"; // TODO better default
+ self.prefix = maybe_prefix orelse "/usr/local"; // TODO better default
self.lib_dir = os.path.join(self.allocator, self.prefix, "lib") catch unreachable;
self.exe_dir = os.path.join(self.allocator, self.prefix, "bin") catch unreachable;
}
@@ -312,9 +312,9 @@ pub const Builder = struct {
if (os.getEnvVarOwned(self.allocator, "NIX_CFLAGS_COMPILE")) |nix_cflags_compile| {
var it = mem.split(nix_cflags_compile, " ");
while (true) {
- const word = it.next() ?? break;
+ const word = it.next() orelse break;
if (mem.eql(u8, word, "-isystem")) {
- const include_path = it.next() ?? {
+ const include_path = it.next() orelse {
warn("Expected argument after -isystem in NIX_CFLAGS_COMPILE\n");
break;
};
@@ -330,9 +330,9 @@ pub const Builder = struct {
if (os.getEnvVarOwned(self.allocator, "NIX_LDFLAGS")) |nix_ldflags| {
var it = mem.split(nix_ldflags, " ");
while (true) {
- const word = it.next() ?? break;
+ const word = it.next() orelse break;
if (mem.eql(u8, word, "-rpath")) {
- const rpath = it.next() ?? {
+ const rpath = it.next() orelse {
warn("Expected argument after -rpath in NIX_LDFLAGS\n");
break;
};
@@ -362,7 +362,7 @@ pub const Builder = struct {
}
self.available_options_list.append(available_option) catch unreachable;
- const entry = self.user_input_options.get(name) ?? return null;
+ const entry = self.user_input_options.get(name) orelse return null;
entry.value.used = true;
switch (type_id) {
TypeId.Bool => switch (entry.value.value) {
@@ -416,9 +416,9 @@ pub const Builder = struct {
pub fn standardReleaseOptions(self: *Builder) builtin.Mode {
if (self.release_mode) |mode| return mode;
- const release_safe = self.option(bool, "release-safe", "optimizations on and safety on") ?? false;
- const release_fast = self.option(bool, "release-fast", "optimizations on and safety off") ?? false;
- const release_small = self.option(bool, "release-small", "size optimizations on and safety off") ?? false;
+ const release_safe = self.option(bool, "release-safe", "optimizations on and safety on") orelse false;
+ const release_fast = self.option(bool, "release-fast", "optimizations on and safety off") orelse false;
+ const release_small = self.option(bool, "release-small", "size optimizations on and safety off") orelse false;
const mode = if (release_safe and !release_fast and !release_small) builtin.Mode.ReleaseSafe else if (release_fast and !release_safe and !release_small) builtin.Mode.ReleaseFast else if (release_small and !release_fast and !release_safe) builtin.Mode.ReleaseSmall else if (!release_fast and !release_safe and !release_small) builtin.Mode.Debug else x: {
warn("Multiple release modes (of -Drelease-safe, -Drelease-fast and -Drelease-small)");
@@ -518,7 +518,7 @@ pub const Builder = struct {
// make sure all args are used
var it = self.user_input_options.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
if (!entry.value.used) {
warn("Invalid option: -D{}\n\n", entry.key);
self.markInvalidUserInput();
@@ -1246,7 +1246,7 @@ pub const LibExeObjStep = struct {
{
var it = self.link_libs.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
zig_args.append("--library") catch unreachable;
zig_args.append(entry.key) catch unreachable;
}
@@ -1696,7 +1696,7 @@ pub const TestStep = struct {
{
var it = self.link_libs.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
try zig_args.append("--library");
try zig_args.append(entry.key);
}
diff --git a/std/debug/index.zig b/std/debug/index.zig
index be47ab76bc..25f7a58b25 100644
--- a/std/debug/index.zig
+++ b/std/debug/index.zig
@@ -208,7 +208,7 @@ fn printSourceAtAddress(debug_info: *ElfStackTrace, out_stream: var, address: us
.name = "???",
.address = address,
};
- const symbol = debug_info.symbol_table.search(address) ?? &unknown;
+ const symbol = debug_info.symbol_table.search(address) orelse &unknown;
try out_stream.print(WHITE ++ "{}" ++ RESET ++ ": " ++ DIM ++ ptr_hex ++ " in ??? (???)" ++ RESET ++ "\n", symbol.name, address);
},
else => {
@@ -268,10 +268,10 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
try st.elf.openFile(allocator, &st.self_exe_file);
errdefer st.elf.close();
- st.debug_info = (try st.elf.findSection(".debug_info")) ?? return error.MissingDebugInfo;
- st.debug_abbrev = (try st.elf.findSection(".debug_abbrev")) ?? return error.MissingDebugInfo;
- st.debug_str = (try st.elf.findSection(".debug_str")) ?? return error.MissingDebugInfo;
- st.debug_line = (try st.elf.findSection(".debug_line")) ?? return error.MissingDebugInfo;
+ st.debug_info = (try st.elf.findSection(".debug_info")) orelse return error.MissingDebugInfo;
+ st.debug_abbrev = (try st.elf.findSection(".debug_abbrev")) orelse return error.MissingDebugInfo;
+ st.debug_str = (try st.elf.findSection(".debug_str")) orelse return error.MissingDebugInfo;
+ st.debug_line = (try st.elf.findSection(".debug_line")) orelse return error.MissingDebugInfo;
st.debug_ranges = (try st.elf.findSection(".debug_ranges"));
try scanAllCompileUnits(st);
return st;
@@ -443,7 +443,7 @@ const Die = struct {
}
fn getAttrAddr(self: *const Die, id: u64) !u64 {
- const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
+ const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Address => |value| value,
else => error.InvalidDebugInfo,
@@ -451,7 +451,7 @@ const Die = struct {
}
fn getAttrSecOffset(self: *const Die, id: u64) !u64 {
- const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
+ const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Const => |value| value.asUnsignedLe(),
FormValue.SecOffset => |value| value,
@@ -460,7 +460,7 @@ const Die = struct {
}
fn getAttrUnsignedLe(self: *const Die, id: u64) !u64 {
- const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
+ const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Const => |value| value.asUnsignedLe(),
else => error.InvalidDebugInfo,
@@ -468,7 +468,7 @@ const Die = struct {
}
fn getAttrString(self: *const Die, st: *ElfStackTrace, id: u64) ![]u8 {
- const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
+ const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.String => |value| value,
FormValue.StrPtr => |offset| getString(st, offset),
@@ -748,7 +748,7 @@ fn parseDie(st: *ElfStackTrace, abbrev_table: *const AbbrevTable, is_64: bool) !
var in_file_stream = io.FileInStream.init(in_file);
const in_stream = &in_file_stream.stream;
const abbrev_code = try readULeb128(in_stream);
- const table_entry = getAbbrevTableEntry(abbrev_table, abbrev_code) ?? return error.InvalidDebugInfo;
+ const table_entry = getAbbrevTableEntry(abbrev_table, abbrev_code) orelse return error.InvalidDebugInfo;
var result = Die{
.tag_id = table_entry.tag_id,
diff --git a/std/heap.zig b/std/heap.zig
index d1fbf9ca0a..172bc24118 100644
--- a/std/heap.zig
+++ b/std/heap.zig
@@ -97,12 +97,12 @@ pub const DirectAllocator = struct {
},
Os.windows => {
const amt = n + alignment + @sizeOf(usize);
- const heap_handle = self.heap_handle ?? blk: {
- const hh = os.windows.HeapCreate(os.windows.HEAP_NO_SERIALIZE, amt, 0) ?? return error.OutOfMemory;
+ const heap_handle = self.heap_handle orelse blk: {
+ const hh = os.windows.HeapCreate(os.windows.HEAP_NO_SERIALIZE, amt, 0) orelse return error.OutOfMemory;
self.heap_handle = hh;
break :blk hh;
};
- const ptr = os.windows.HeapAlloc(heap_handle, 0, amt) ?? return error.OutOfMemory;
+ const ptr = os.windows.HeapAlloc(heap_handle, 0, amt) orelse return error.OutOfMemory;
const root_addr = @ptrToInt(ptr);
const rem = @rem(root_addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
@@ -142,7 +142,7 @@ pub const DirectAllocator = struct {
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
const old_ptr = @intToPtr(*c_void, root_addr);
const amt = new_size + alignment + @sizeOf(usize);
- const new_ptr = os.windows.HeapReAlloc(self.heap_handle.?, 0, old_ptr, amt) ?? blk: {
+ const new_ptr = os.windows.HeapReAlloc(self.heap_handle.?, 0, old_ptr, amt) orelse blk: {
if (new_size > old_mem.len) return error.OutOfMemory;
const new_record_addr = old_record_addr - new_size + old_mem.len;
@intToPtr(*align(1) usize, new_record_addr).* = root_addr;
@@ -343,7 +343,7 @@ pub const ThreadSafeFixedBufferAllocator = struct {
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
}
- end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) ?? return self.buffer[adjusted_index..new_end_index];
+ end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
}
}
diff --git a/std/linked_list.zig b/std/linked_list.zig
index 536c6d24d0..9e32b7d9da 100644
--- a/std/linked_list.zig
+++ b/std/linked_list.zig
@@ -169,7 +169,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Returns:
/// A pointer to the last node in the list.
pub fn pop(list: *Self) ?*Node {
- const last = list.last ?? return null;
+ const last = list.last orelse return null;
list.remove(last);
return last;
}
@@ -179,7 +179,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Returns:
/// A pointer to the first node in the list.
pub fn popFirst(list: *Self) ?*Node {
- const first = list.first ?? return null;
+ const first = list.first orelse return null;
list.remove(first);
return first;
}
diff --git a/std/os/index.zig b/std/os/index.zig
index 807b2c398b..6a13ff94d4 100644
--- a/std/os/index.zig
+++ b/std/os/index.zig
@@ -425,7 +425,7 @@ pub fn posixExecve(argv: []const []const u8, env_map: *const BufMap, allocator:
return posixExecveErrnoToErr(posix.getErrno(posix.execve(argv_buf[0].?, argv_buf.ptr, envp_buf.ptr)));
}
- const PATH = getEnvPosix("PATH") ?? "/usr/local/bin:/bin/:/usr/bin";
+ const PATH = getEnvPosix("PATH") orelse "/usr/local/bin:/bin/:/usr/bin";
// PATH.len because it is >= the largest search_path
// +1 for the / to join the search path and exe_path
// +1 for the null terminating byte
@@ -490,7 +490,7 @@ pub fn getEnvMap(allocator: *Allocator) !BufMap {
errdefer result.deinit();
if (is_windows) {
- const ptr = windows.GetEnvironmentStringsA() ?? return error.OutOfMemory;
+ const ptr = windows.GetEnvironmentStringsA() orelse return error.OutOfMemory;
defer assert(windows.FreeEnvironmentStringsA(ptr) != 0);
var i: usize = 0;
@@ -573,7 +573,7 @@ pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) ![]u8 {
return allocator.shrink(u8, buf, result);
}
} else {
- const result = getEnvPosix(key) ?? return error.EnvironmentVariableNotFound;
+ const result = getEnvPosix(key) orelse return error.EnvironmentVariableNotFound;
return mem.dupe(allocator, u8, result);
}
}
@@ -1641,7 +1641,7 @@ pub const ArgIterator = struct {
if (builtin.os == Os.windows) {
return self.inner.next(allocator);
} else {
- return mem.dupe(allocator, u8, self.inner.next() ?? return null);
+ return mem.dupe(allocator, u8, self.inner.next() orelse return null);
}
}
@@ -2457,9 +2457,9 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread
}
};
- const heap_handle = windows.GetProcessHeap() ?? return SpawnThreadError.OutOfMemory;
+ const heap_handle = windows.GetProcessHeap() orelse return SpawnThreadError.OutOfMemory;
const byte_count = @alignOf(WinThread.OuterContext) + @sizeOf(WinThread.OuterContext);
- const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) ?? return SpawnThreadError.OutOfMemory;
+ const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) orelse return SpawnThreadError.OutOfMemory;
errdefer assert(windows.HeapFree(heap_handle, 0, bytes_ptr) != 0);
const bytes = @ptrCast([*]u8, bytes_ptr)[0..byte_count];
const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext) catch unreachable;
@@ -2468,7 +2468,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread
outer_context.thread.data.alloc_start = bytes_ptr;
const parameter = if (@sizeOf(Context) == 0) null else @ptrCast(*c_void, &outer_context.inner);
- outer_context.thread.data.handle = windows.CreateThread(null, default_stack_size, WinThread.threadMain, parameter, 0, null) ?? {
+ outer_context.thread.data.handle = windows.CreateThread(null, default_stack_size, WinThread.threadMain, parameter, 0, null) orelse {
const err = windows.GetLastError();
return switch (err) {
else => os.unexpectedErrorWindows(err),
diff --git a/std/os/linux/vdso.zig b/std/os/linux/vdso.zig
index 1414b8185b..cbd0cd1df5 100644
--- a/std/os/linux/vdso.zig
+++ b/std/os/linux/vdso.zig
@@ -28,7 +28,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
}
}
}
- const dynv = maybe_dynv ?? return 0;
+ const dynv = maybe_dynv orelse return 0;
if (base == @maxValue(usize)) return 0;
var maybe_strings: ?[*]u8 = null;
@@ -52,9 +52,9 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
}
}
- const strings = maybe_strings ?? return 0;
- const syms = maybe_syms ?? return 0;
- const hashtab = maybe_hashtab ?? return 0;
+ const strings = maybe_strings orelse return 0;
+ const syms = maybe_syms orelse return 0;
+ const hashtab = maybe_hashtab orelse return 0;
if (maybe_verdef == null) maybe_versym = null;
const OK_TYPES = (1 << elf.STT_NOTYPE | 1 << elf.STT_OBJECT | 1 << elf.STT_FUNC | 1 << elf.STT_COMMON);
diff --git a/std/os/path.zig b/std/os/path.zig
index 430dda2934..a3ad23b1a9 100644
--- a/std/os/path.zig
+++ b/std/os/path.zig
@@ -182,8 +182,8 @@ pub fn windowsParsePath(path: []const u8) WindowsPath {
}
var it = mem.split(path, []u8{this_sep});
- _ = (it.next() ?? return relative_path);
- _ = (it.next() ?? return relative_path);
+ _ = (it.next() orelse return relative_path);
+ _ = (it.next() orelse return relative_path);
return WindowsPath{
.is_abs = isAbsoluteWindows(path),
.kind = WindowsPath.Kind.NetworkShare,
@@ -200,8 +200,8 @@ pub fn windowsParsePath(path: []const u8) WindowsPath {
}
var it = mem.split(path, []u8{this_sep});
- _ = (it.next() ?? return relative_path);
- _ = (it.next() ?? return relative_path);
+ _ = (it.next() orelse return relative_path);
+ _ = (it.next() orelse return relative_path);
return WindowsPath{
.is_abs = isAbsoluteWindows(path),
.kind = WindowsPath.Kind.NetworkShare,
@@ -923,7 +923,7 @@ pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8)
var from_it = mem.split(resolved_from, "/\\");
var to_it = mem.split(resolved_to, "/\\");
while (true) {
- const from_component = from_it.next() ?? return mem.dupe(allocator, u8, to_it.rest());
+ const from_component = from_it.next() orelse return mem.dupe(allocator, u8, to_it.rest());
const to_rest = to_it.rest();
if (to_it.next()) |to_component| {
// TODO ASCII is wrong, we actually need full unicode support to compare paths.
@@ -974,7 +974,7 @@ pub fn relativePosix(allocator: *Allocator, from: []const u8, to: []const u8) ![
var from_it = mem.split(resolved_from, "/");
var to_it = mem.split(resolved_to, "/");
while (true) {
- const from_component = from_it.next() ?? return mem.dupe(allocator, u8, to_it.rest());
+ const from_component = from_it.next() orelse return mem.dupe(allocator, u8, to_it.rest());
const to_rest = to_it.rest();
if (to_it.next()) |to_component| {
if (mem.eql(u8, from_component, to_component))
diff --git a/std/os/windows/util.zig b/std/os/windows/util.zig
index 7170346108..f93a673be0 100644
--- a/std/os/windows/util.zig
+++ b/std/os/windows/util.zig
@@ -153,7 +153,7 @@ pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap)
pub fn windowsLoadDll(allocator: *mem.Allocator, dll_path: []const u8) !windows.HMODULE {
const padded_buff = try cstr.addNullByte(allocator, dll_path);
defer allocator.free(padded_buff);
- return windows.LoadLibraryA(padded_buff.ptr) ?? error.DllNotFound;
+ return windows.LoadLibraryA(padded_buff.ptr) orelse error.DllNotFound;
}
pub fn windowsUnloadDll(hModule: windows.HMODULE) void {
diff --git a/std/special/build_runner.zig b/std/special/build_runner.zig
index 3471d6ed21..e4f04df6d0 100644
--- a/std/special/build_runner.zig
+++ b/std/special/build_runner.zig
@@ -27,15 +27,15 @@ pub fn main() !void {
// skip my own exe name
_ = arg_it.skip();
- const zig_exe = try unwrapArg(arg_it.next(allocator) ?? {
+ const zig_exe = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected first argument to be path to zig compiler\n");
return error.InvalidArgs;
});
- const build_root = try unwrapArg(arg_it.next(allocator) ?? {
+ const build_root = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected second argument to be build root directory path\n");
return error.InvalidArgs;
});
- const cache_root = try unwrapArg(arg_it.next(allocator) ?? {
+ const cache_root = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected third argument to be cache root directory path\n");
return error.InvalidArgs;
});
@@ -84,12 +84,12 @@ pub fn main() !void {
} else if (mem.eql(u8, arg, "--help")) {
return usage(&builder, false, try stdout_stream);
} else if (mem.eql(u8, arg, "--prefix")) {
- prefix = try unwrapArg(arg_it.next(allocator) ?? {
+ prefix = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected argument after --prefix\n\n");
return usageAndErr(&builder, false, try stderr_stream);
});
} else if (mem.eql(u8, arg, "--search-prefix")) {
- const search_prefix = try unwrapArg(arg_it.next(allocator) ?? {
+ const search_prefix = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected argument after --search-prefix\n\n");
return usageAndErr(&builder, false, try stderr_stream);
});
diff --git a/std/unicode.zig b/std/unicode.zig
index 21ae12f59c..ec808ca4fe 100644
--- a/std/unicode.zig
+++ b/std/unicode.zig
@@ -220,7 +220,7 @@ const Utf8Iterator = struct {
}
pub fn nextCodepoint(it: *Utf8Iterator) ?u32 {
- const slice = it.nextCodepointSlice() ?? return null;
+ const slice = it.nextCodepointSlice() orelse return null;
switch (slice.len) {
1 => return u32(slice[0]),
diff --git a/std/zig/parse.zig b/std/zig/parse.zig
index 9f8ef3c3d6..5752f69409 100644
--- a/std/zig/parse.zig
+++ b/std/zig/parse.zig
@@ -43,7 +43,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
// skip over line comments at the top of the file
while (true) {
- const next_tok = tok_it.peek() ?? break;
+ const next_tok = tok_it.peek() orelse break;
if (next_tok.id != Token.Id.LineComment) break;
_ = tok_it.next();
}
@@ -197,7 +197,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lib_name_token = nextToken(&tok_it, &tree);
const lib_name_token_index = lib_name_token.index;
const lib_name_token_ptr = lib_name_token.ptr;
- break :blk (try parseStringLiteral(arena, &tok_it, lib_name_token_ptr, lib_name_token_index, &tree)) ?? {
+ break :blk (try parseStringLiteral(arena, &tok_it, lib_name_token_ptr, lib_name_token_index, &tree)) orelse {
prevToken(&tok_it, &tree);
break :blk null;
};
@@ -1434,13 +1434,14 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
try stack.append(State{
.ExpectTokenSave = ExpectTokenSave{
.id = Token.Id.AngleBracketRight,
- .ptr = &async_node.rangle_bracket.? },
+ .ptr = &async_node.rangle_bracket.?,
+ },
});
try stack.append(State{ .TypeExprBegin = OptionalCtx{ .RequiredNull = &async_node.allocator_type } });
continue;
},
State.AsyncEnd => |ctx| {
- const node = ctx.ctx.get() ?? continue;
+ const node = ctx.ctx.get() orelse continue;
switch (node.id) {
ast.Node.Id.FnProto => {
@@ -1813,7 +1814,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
State.RangeExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Ellipsis3)) |ellipsis3| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -1835,7 +1836,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.AssignmentExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -1865,7 +1866,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.UnwrapExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -1900,7 +1901,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BoolOrExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Keyword_or)) |or_token| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -1924,7 +1925,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BoolAndExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Keyword_and)) |and_token| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -1948,7 +1949,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.ComparisonExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -1978,7 +1979,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BinaryOrExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Pipe)) |pipe| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -2002,7 +2003,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BinaryXorExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Caret)) |caret| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -2026,7 +2027,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BinaryAndExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Ampersand)) |ampersand| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -2050,7 +2051,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BitShiftExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -2080,7 +2081,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.AdditionExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -2110,7 +2111,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.MultiplyExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -2141,7 +2142,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.CurlySuffixExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (tok_it.peek().?.id == Token.Id.Period) {
const node = try arena.construct(ast.Node.SuffixOp{
@@ -2189,7 +2190,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.TypeExprEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Bang)) |bang| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -2269,7 +2270,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.SuffixOpExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -2418,7 +2419,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.StringLiteral, Token.Id.MultilineStringLiteralLine => {
- opt_ctx.store((try parseStringLiteral(arena, &tok_it, token.ptr, token.index, &tree)) ?? unreachable);
+ opt_ctx.store((try parseStringLiteral(arena, &tok_it, token.ptr, token.index, &tree)) orelse unreachable);
continue;
},
Token.Id.LParen => {
@@ -2648,7 +2649,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
const token_ptr = token.ptr;
- opt_ctx.store((try parseStringLiteral(arena, &tok_it, token_ptr, token_index, &tree)) ?? {
+ opt_ctx.store((try parseStringLiteral(arena, &tok_it, token_ptr, token_index, &tree)) orelse {
prevToken(&tok_it, &tree);
if (opt_ctx != OptionalCtx.Optional) {
((try tree.errors.addOne())).* = Error{ .ExpectedPrimaryExpr = Error.ExpectedPrimaryExpr{ .token = token_index } };
@@ -3348,7 +3349,7 @@ fn nextToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) AnnotatedTok
assert(result.ptr.id != Token.Id.LineComment);
while (true) {
- const next_tok = tok_it.peek() ?? return result;
+ const next_tok = tok_it.peek() orelse return result;
if (next_tok.id != Token.Id.LineComment) return result;
_ = tok_it.next();
}
@@ -3356,7 +3357,7 @@ fn nextToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) AnnotatedTok
fn prevToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) void {
while (true) {
- const prev_tok = tok_it.prev() ?? return;
+ const prev_tok = tok_it.prev() orelse return;
if (prev_tok.id == Token.Id.LineComment) continue;
return;
}
diff --git a/std/zig/render.zig b/std/zig/render.zig
index 0b8e4d1453..bc45768fa3 100644
--- a/std/zig/render.zig
+++ b/std/zig/render.zig
@@ -83,7 +83,7 @@ fn renderRoot(
var start_col: usize = 0;
var it = tree.root_node.decls.iterator(0);
while (true) {
- var decl = (it.next() ?? return).*;
+ var decl = (it.next() orelse return).*;
// look for zig fmt: off comment
var start_token_index = decl.firstToken();
zig_fmt_loop: while (start_token_index != 0) {
@@ -112,7 +112,7 @@ fn renderRoot(
const start = tree.tokens.at(start_token_index + 1).start;
try stream.print("{}\n", tree.source[start..end_token.end]);
while (tree.tokens.at(decl.firstToken()).start < end_token.end) {
- decl = (it.next() ?? return).*;
+ decl = (it.next() orelse return).*;
}
break :zig_fmt_loop;
}
@@ -1993,7 +1993,7 @@ fn renderDocComments(
indent: usize,
start_col: *usize,
) (@typeOf(stream).Child.Error || Error)!void {
- const comment = node.doc_comments ?? return;
+ const comment = node.doc_comments orelse return;
var it = comment.lines.iterator(0);
const first_token = node.firstToken();
while (it.next()) |line_token_index| {
@@ -2021,7 +2021,7 @@ fn nodeIsBlock(base: *const ast.Node) bool {
}
fn nodeCausesSliceOpSpace(base: *ast.Node) bool {
- const infix_op = base.cast(ast.Node.InfixOp) ?? return false;
+ const infix_op = base.cast(ast.Node.InfixOp) orelse return false;
return switch (infix_op.op) {
ast.Node.InfixOp.Op.Period => false,
else => true,
diff --git a/test/cases/cast.zig b/test/cases/cast.zig
index a56c470408..ade1cf78aa 100644
--- a/test/cases/cast.zig
+++ b/test/cases/cast.zig
@@ -73,7 +73,7 @@ fn Struct(comptime T: type) type {
fn maybePointer(self: ?*const Self) Self {
const none = Self{ .x = if (T == void) void{} else 0 };
- return (self ?? &none).*;
+ return (self orelse &none).*;
}
};
}
@@ -87,7 +87,7 @@ const Union = union {
fn maybePointer(self: ?*const Union) Union {
const none = Union{ .x = 0 };
- return (self ?? &none).*;
+ return (self orelse &none).*;
}
};
@@ -100,7 +100,7 @@ const Enum = enum {
}
fn maybePointer(self: ?*const Enum) Enum {
- return (self ?? &Enum.None).*;
+ return (self orelse &Enum.None).*;
}
};
diff --git a/test/cases/null.zig b/test/cases/null.zig
index 62565784ac..cdcfd23efb 100644
--- a/test/cases/null.zig
+++ b/test/cases/null.zig
@@ -15,13 +15,13 @@ test "optional type" {
const next_x: ?i32 = null;
- const z = next_x ?? 1234;
+ const z = next_x orelse 1234;
assert(z == 1234);
const final_x: ?i32 = 13;
- const num = final_x ?? unreachable;
+ const num = final_x orelse unreachable;
assert(num == 13);
}
@@ -38,7 +38,7 @@ test "test maybe object and get a pointer to the inner value" {
test "rhs maybe unwrap return" {
const x: ?bool = true;
- const y = x ?? return;
+ const y = x orelse return;
}
test "maybe return" {
@@ -53,7 +53,7 @@ fn maybeReturnImpl() void {
}
fn foo(x: ?i32) ?bool {
- const value = x ?? return null;
+ const value = x orelse return null;
return value > 1234;
}
@@ -140,6 +140,6 @@ test "unwrap optional which is field of global var" {
}
test "null with default unwrap" {
- const x: i32 = null ?? 1;
+ const x: i32 = null orelse 1;
assert(x == 1);
}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 1c737a59e7..5ec2759032 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -2296,7 +2296,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\
\\ defer try canFail();
\\
- \\ const a = maybeInt() ?? return;
+ \\ const a = maybeInt() orelse return;
\\}
\\
\\fn canFail() error!void { }
diff --git a/test/translate_c.zig b/test/translate_c.zig
index 3489f9da21..417171d2c2 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -246,13 +246,13 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub extern var fn_ptr: ?extern fn() void;
,
\\pub inline fn foo() void {
- \\ return (??fn_ptr)();
+ \\ return fn_ptr.?();
\\}
,
\\pub extern var fn_ptr2: ?extern fn(c_int, f32) u8;
,
\\pub inline fn bar(arg0: c_int, arg1: f32) u8 {
- \\ return (??fn_ptr2)(arg0, arg1);
+ \\ return fn_ptr2.?(arg0, arg1);
\\}
);
@@ -608,7 +608,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ field: c_int,
\\};
\\pub export fn read_field(foo: ?[*]struct_Foo) c_int {
- \\ return (??foo).field;
+ \\ return foo.?.field;
\\}
);
@@ -969,11 +969,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub export fn bar() void {
\\ var f: ?extern fn() void = foo;
\\ var b: ?extern fn() c_int = baz;
- \\ (??f)();
- \\ (??f)();
+ \\ f.?();
+ \\ f.?();
\\ foo();
- \\ _ = (??b)();
- \\ _ = (??b)();
+ \\ _ = b.?();
+ \\ _ = b.?();
\\ _ = baz();
\\}
);
@@ -984,7 +984,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\}
,
\\pub export fn foo(x: ?[*]c_int) void {
- \\ (??x).* = 1;
+ \\ x.?.* = 1;
\\}
);
@@ -1012,7 +1012,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub fn foo() c_int {
\\ var x: c_int = 1234;
\\ var ptr: ?[*]c_int = &x;
- \\ return (??ptr).*;
+ \\ return ptr.?.*;
\\}
);
@@ -1119,7 +1119,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub const glClearPFN = PFNGLCLEARPROC;
,
\\pub inline fn glClearUnion(arg0: GLbitfield) void {
- \\ return (??glProcs.gl.Clear)(arg0);
+ \\ return glProcs.gl.Clear.?(arg0);
\\}
,
\\pub const OpenGLProcs = union_OpenGLProcs;
--
cgit v1.2.3
From 71db8df5480f4d849480267574cd5491066e3868 Mon Sep 17 00:00:00 2001
From: kristopher tate
Date: Thu, 21 Jun 2018 00:40:21 +0900
Subject: std: update stdlib to match updated allocator create signature; ref
#733
---
src-self-hosted/module.zig | 24 +++++++-------
std/atomic/queue.zig | 6 ++--
std/atomic/stack.zig | 6 ++--
std/build.zig | 55 +++++++++++--------------------
std/debug/index.zig | 17 ++++------
std/heap.zig | 2 +-
std/io.zig | 8 ++---
std/linked_list.zig | 6 +++-
std/os/child_process.zig | 9 ++---
std/os/index.zig | 7 ++--
test/tests.zig | 82 +++++++++++++++++++++-------------------------
11 files changed, 100 insertions(+), 122 deletions(-)
(limited to 'src-self-hosted/module.zig')
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index 575105f25f..997ef5eed2 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -110,11 +110,12 @@ pub const Module = struct {
parent: ?*CliPkg,
pub fn init(allocator: *mem.Allocator, name: []const u8, path: []const u8, parent: ?*CliPkg) !*CliPkg {
- var pkg = try allocator.create(CliPkg);
- pkg.name = name;
- pkg.path = path;
- pkg.children = ArrayList(*CliPkg).init(allocator);
- pkg.parent = parent;
+ var pkg = try allocator.create(CliPkg{
+ .name = name,
+ .path = path,
+ .children = ArrayList(*CliPkg).init(allocator),
+ .parent = parent
+ });
return pkg;
}
@@ -139,10 +140,7 @@ pub const Module = struct {
const builder = c.LLVMCreateBuilderInContext(context) orelse return error.OutOfMemory;
errdefer c.LLVMDisposeBuilder(builder);
- const module_ptr = try allocator.create(Module);
- errdefer allocator.destroy(module_ptr);
-
- module_ptr.* = Module{
+ const module_ptr = try allocator.create(Module{
.allocator = allocator,
.name = name_buffer,
.root_src_path = root_src_path,
@@ -196,7 +194,8 @@ pub const Module = struct {
.test_filters = [][]const u8{},
.test_name_prefix = null,
.emit_file_type = Emit.Binary,
- };
+ });
+ errdefer allocator.destroy(module_ptr);
return module_ptr;
}
@@ -279,13 +278,12 @@ pub const Module = struct {
}
}
- const link_lib = try self.allocator.create(LinkLib);
- link_lib.* = LinkLib{
+ const link_lib = try self.allocator.create(LinkLib{
.name = name,
.path = null,
.provided_explicitly = provided_explicitly,
.symbols = ArrayList([]u8).init(self.allocator),
- };
+ });
try self.link_libs_list.append(link_lib);
if (is_libc) {
self.libc_link_lib = link_lib;
diff --git a/std/atomic/queue.zig b/std/atomic/queue.zig
index 3dc64dbea2..5b810f95ac 100644
--- a/std/atomic/queue.zig
+++ b/std/atomic/queue.zig
@@ -114,8 +114,10 @@ fn startPuts(ctx: *Context) u8 {
while (put_count != 0) : (put_count -= 1) {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32));
- const node = ctx.allocator.create(Queue(i32).Node) catch unreachable;
- node.data = x;
+ const node = ctx.allocator.create(Queue(i32).Node{
+ .next = null,
+ .data = x
+ }) catch unreachable;
ctx.queue.put(node);
_ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
}
diff --git a/std/atomic/stack.zig b/std/atomic/stack.zig
index 9e81d89257..2272be4a92 100644
--- a/std/atomic/stack.zig
+++ b/std/atomic/stack.zig
@@ -117,8 +117,10 @@ fn startPuts(ctx: *Context) u8 {
while (put_count != 0) : (put_count -= 1) {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32));
- const node = ctx.allocator.create(Stack(i32).Node) catch unreachable;
- node.data = x;
+ const node = ctx.allocator.create(Stack(i32).Node{
+ .next = null,
+ .data = x
+ }) catch unreachable;
ctx.stack.push(node);
_ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
}
diff --git a/std/build.zig b/std/build.zig
index 92454a183a..99de9b5197 100644
--- a/std/build.zig
+++ b/std/build.zig
@@ -158,8 +158,7 @@ pub const Builder = struct {
}
pub fn addTest(self: *Builder, root_src: []const u8) *TestStep {
- const test_step = self.allocator.create(TestStep) catch unreachable;
- test_step.* = TestStep.init(self, root_src);
+ const test_step = self.allocator.create(TestStep.init(self, root_src)) catch unreachable;
return test_step;
}
@@ -191,21 +190,18 @@ pub const Builder = struct {
}
pub fn addWriteFile(self: *Builder, file_path: []const u8, data: []const u8) *WriteFileStep {
- const write_file_step = self.allocator.create(WriteFileStep) catch unreachable;
- write_file_step.* = WriteFileStep.init(self, file_path, data);
+ const write_file_step = self.allocator.create(WriteFileStep.init(self, file_path, data)) catch unreachable;
return write_file_step;
}
pub fn addLog(self: *Builder, comptime format: []const u8, args: ...) *LogStep {
const data = self.fmt(format, args);
- const log_step = self.allocator.create(LogStep) catch unreachable;
- log_step.* = LogStep.init(self, data);
+ const log_step = self.allocator.create(LogStep.init(self, data)) catch unreachable;
return log_step;
}
pub fn addRemoveDirTree(self: *Builder, dir_path: []const u8) *RemoveDirStep {
- const remove_dir_step = self.allocator.create(RemoveDirStep) catch unreachable;
- remove_dir_step.* = RemoveDirStep.init(self, dir_path);
+ const remove_dir_step = self.allocator.create(RemoveDirStep.init(self, dir_path)) catch unreachable;
return remove_dir_step;
}
@@ -404,11 +400,10 @@ pub const Builder = struct {
}
pub fn step(self: *Builder, name: []const u8, description: []const u8) *Step {
- const step_info = self.allocator.create(TopLevelStep) catch unreachable;
- step_info.* = TopLevelStep{
+ const step_info = self.allocator.create(TopLevelStep{
.step = Step.initNoOp(name, self.allocator),
.description = description,
- };
+ }) catch unreachable;
self.top_level_steps.append(step_info) catch unreachable;
return &step_info.step;
}
@@ -598,8 +593,7 @@ pub const Builder = struct {
const full_dest_path = os.path.resolve(self.allocator, self.prefix, dest_rel_path) catch unreachable;
self.pushInstalledFile(full_dest_path);
- const install_step = self.allocator.create(InstallFileStep) catch unreachable;
- install_step.* = InstallFileStep.init(self, src_path, full_dest_path);
+ const install_step = self.allocator.create(InstallFileStep.init(self, src_path, full_dest_path)) catch unreachable;
return install_step;
}
@@ -837,51 +831,43 @@ pub const LibExeObjStep = struct {
};
pub fn createSharedLibrary(builder: *Builder, name: []const u8, root_src: ?[]const u8, ver: *const Version) *LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- self.* = initExtraArgs(builder, name, root_src, Kind.Lib, false, ver);
+ const self = builder.allocator.create(initExtraArgs(builder, name, root_src, Kind.Lib, false, ver)) catch unreachable;
return self;
}
pub fn createCSharedLibrary(builder: *Builder, name: []const u8, version: *const Version) *LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- self.* = initC(builder, name, Kind.Lib, version, false);
+ const self = builder.allocator.create(initC(builder, name, Kind.Lib, version, false)) catch unreachable;
return self;
}
pub fn createStaticLibrary(builder: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- self.* = initExtraArgs(builder, name, root_src, Kind.Lib, true, builder.version(0, 0, 0));
+ const self = builder.allocator.create(initExtraArgs(builder, name, root_src, Kind.Lib, true, builder.version(0, 0, 0))) catch unreachable;
return self;
}
pub fn createCStaticLibrary(builder: *Builder, name: []const u8) *LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- self.* = initC(builder, name, Kind.Lib, builder.version(0, 0, 0), true);
+ const self = builder.allocator.create(initC(builder, name, Kind.Lib, builder.version(0, 0, 0), true)) catch unreachable;
return self;
}
pub fn createObject(builder: *Builder, name: []const u8, root_src: []const u8) *LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- self.* = initExtraArgs(builder, name, root_src, Kind.Obj, false, builder.version(0, 0, 0));
+ const self = builder.allocator.create(initExtraArgs(builder, name, root_src, Kind.Obj, false, builder.version(0, 0, 0))) catch unreachable;
return self;
}
pub fn createCObject(builder: *Builder, name: []const u8, src: []const u8) *LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- self.* = initC(builder, name, Kind.Obj, builder.version(0, 0, 0), false);
+ const self = builder.allocator.create(initC(builder, name, Kind.Obj, builder.version(0, 0, 0), false)) catch unreachable;
self.object_src = src;
return self;
}
pub fn createExecutable(builder: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- self.* = initExtraArgs(builder, name, root_src, Kind.Exe, false, builder.version(0, 0, 0));
+ const self = builder.allocator.create(initExtraArgs(builder, name, root_src, Kind.Exe, false, builder.version(0, 0, 0))) catch unreachable;
return self;
}
pub fn createCExecutable(builder: *Builder, name: []const u8) *LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- self.* = initC(builder, name, Kind.Exe, builder.version(0, 0, 0), false);
+ const self = builder.allocator.create(initC(builder, name, Kind.Exe, builder.version(0, 0, 0), false)) catch unreachable;
return self;
}
@@ -1748,14 +1734,14 @@ pub const CommandStep = struct {
/// ::argv is copied.
pub fn create(builder: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) *CommandStep {
- const self = builder.allocator.create(CommandStep) catch unreachable;
- self.* = CommandStep{
+ const self = builder.allocator.create(CommandStep{
.builder = builder,
.step = Step.init(argv[0], builder.allocator, make),
.argv = builder.allocator.alloc([]u8, argv.len) catch unreachable,
.cwd = cwd,
.env_map = env_map,
- };
+ }) catch unreachable;
+
mem.copy([]const u8, self.argv, argv);
self.step.name = self.argv[0];
return self;
@@ -1778,18 +1764,17 @@ const InstallArtifactStep = struct {
const Self = this;
pub fn create(builder: *Builder, artifact: *LibExeObjStep) *Self {
- const self = builder.allocator.create(Self) catch unreachable;
const dest_dir = switch (artifact.kind) {
LibExeObjStep.Kind.Obj => unreachable,
LibExeObjStep.Kind.Exe => builder.exe_dir,
LibExeObjStep.Kind.Lib => builder.lib_dir,
};
- self.* = Self{
+ const self = builder.allocator.create(Self{
.builder = builder,
.step = Step.init(builder.fmt("install {}", artifact.step.name), builder.allocator, make),
.artifact = artifact,
.dest_file = os.path.join(builder.allocator, dest_dir, artifact.out_filename) catch unreachable,
- };
+ }) catch unreachable;
self.step.dependOn(&artifact.step);
builder.pushInstalledFile(self.dest_file);
if (self.artifact.kind == LibExeObjStep.Kind.Lib and !self.artifact.static) {
diff --git a/std/debug/index.zig b/std/debug/index.zig
index 198e0f90f6..19cee3c65d 100644
--- a/std/debug/index.zig
+++ b/std/debug/index.zig
@@ -249,9 +249,7 @@ fn printSourceAtAddress(debug_info: *ElfStackTrace, out_stream: var, address: us
pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
switch (builtin.object_format) {
builtin.ObjectFormat.elf => {
- const st = try allocator.create(ElfStackTrace);
- errdefer allocator.destroy(st);
- st.* = ElfStackTrace{
+ const st = try allocator.create(ElfStackTrace{
.self_exe_file = undefined,
.elf = undefined,
.debug_info = undefined,
@@ -261,7 +259,8 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
.debug_ranges = null,
.abbrev_table_list = ArrayList(AbbrevTableHeader).init(allocator),
.compile_unit_list = ArrayList(CompileUnit).init(allocator),
- };
+ });
+ errdefer allocator.destroy(st);
st.self_exe_file = try os.openSelfExe();
errdefer st.self_exe_file.close();
@@ -280,11 +279,10 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
var exe_file = try os.openSelfExe();
defer exe_file.close();
- const st = try allocator.create(ElfStackTrace);
+ const st = try allocator.create(ElfStackTrace{
+ .symbol_table = try macho.loadSymbols(allocator, &io.FileInStream.init(&exe_file))
+ });
errdefer allocator.destroy(st);
-
- st.* = ElfStackTrace{ .symbol_table = try macho.loadSymbols(allocator, &io.FileInStream.init(&exe_file)) };
-
return st;
},
builtin.ObjectFormat.coff => {
@@ -974,8 +972,7 @@ fn scanAllCompileUnits(st: *ElfStackTrace) !void {
try st.self_exe_file.seekTo(compile_unit_pos);
- const compile_unit_die = try st.allocator().create(Die);
- compile_unit_die.* = try parseDie(st, abbrev_table, is_64);
+ const compile_unit_die = try st.allocator().create( try parseDie(st, abbrev_table, is_64) );
if (compile_unit_die.tag_id != DW.TAG_compile_unit) return error.InvalidDebugInfo;
diff --git a/std/heap.zig b/std/heap.zig
index c948818e3d..7fc00cd0a4 100644
--- a/std/heap.zig
+++ b/std/heap.zig
@@ -407,7 +407,7 @@ fn testAllocator(allocator: *mem.Allocator) !void {
var slice = try allocator.alloc(*i32, 100);
for (slice) |*item, i| {
- item.* = try allocator.create(i32);
+ item.* = try allocator.create(i32(0));
item.*.* = @intCast(i32, i);
}
diff --git a/std/io.zig b/std/io.zig
index cfe1a7f585..1c468f6f4f 100644
--- a/std/io.zig
+++ b/std/io.zig
@@ -414,14 +414,12 @@ pub const BufferedAtomicFile = struct {
pub fn create(allocator: *mem.Allocator, dest_path: []const u8) !*BufferedAtomicFile {
// TODO with well defined copy elision we don't need this allocation
- var self = try allocator.create(BufferedAtomicFile);
- errdefer allocator.destroy(self);
-
- self.* = BufferedAtomicFile{
+ var self = try allocator.create(BufferedAtomicFile{
.atomic_file = undefined,
.file_stream = undefined,
.buffered_stream = undefined,
- };
+ });
+ errdefer allocator.destroy(self);
self.atomic_file = try os.AtomicFile.init(allocator, dest_path, os.default_file_mode);
errdefer self.atomic_file.deinit();
diff --git a/std/linked_list.zig b/std/linked_list.zig
index 9e32b7d9da..f4f7aab752 100644
--- a/std/linked_list.zig
+++ b/std/linked_list.zig
@@ -193,7 +193,11 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// A pointer to the new node.
pub fn allocateNode(list: *Self, allocator: *Allocator) !*Node {
comptime assert(!isIntrusive());
- return allocator.create(Node);
+ return allocator.create(Node{
+ .prev = null,
+ .next = null,
+ .data = undefined
+ });
}
/// Deallocate a node.
diff --git a/std/os/child_process.zig b/std/os/child_process.zig
index da5e708555..693129eea8 100644
--- a/std/os/child_process.zig
+++ b/std/os/child_process.zig
@@ -85,10 +85,7 @@ pub const ChildProcess = struct {
/// First argument in argv is the executable.
/// On success must call deinit.
pub fn init(argv: []const []const u8, allocator: *mem.Allocator) !*ChildProcess {
- const child = try allocator.create(ChildProcess);
- errdefer allocator.destroy(child);
-
- child.* = ChildProcess{
+ const child = try allocator.create(ChildProcess{
.allocator = allocator,
.argv = argv,
.pid = undefined,
@@ -109,8 +106,8 @@ pub const ChildProcess = struct {
.stdin_behavior = StdIo.Inherit,
.stdout_behavior = StdIo.Inherit,
.stderr_behavior = StdIo.Inherit,
- };
-
+ });
+ errdefer allocator.destroy(child);
return child;
}
diff --git a/std/os/index.zig b/std/os/index.zig
index dd0d4e2ea1..7b69bd0b36 100644
--- a/std/os/index.zig
+++ b/std/os/index.zig
@@ -2582,8 +2582,11 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread
const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) orelse return SpawnThreadError.OutOfMemory;
errdefer assert(windows.HeapFree(heap_handle, 0, bytes_ptr) != 0);
const bytes = @ptrCast([*]u8, bytes_ptr)[0..byte_count];
- const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext) catch unreachable;
- outer_context.inner = context;
+ const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext{
+ .thread = undefined,
+ .inner = context
+ }) catch unreachable;
+
outer_context.thread.data.heap_handle = heap_handle;
outer_context.thread.data.alloc_start = bytes_ptr;
diff --git a/test/tests.zig b/test/tests.zig
index b66441f628..66eb2d93a0 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -48,13 +48,12 @@ const test_targets = []TestTarget{
const max_stdout_size = 1 * 1024 * 1024; // 1 MB
pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
- const cases = b.allocator.create(CompareOutputContext) catch unreachable;
- cases.* = CompareOutputContext{
+ const cases = b.allocator.create(CompareOutputContext{
.b = b,
.step = b.step("test-compare-output", "Run the compare output tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ }) catch unreachable;
compare_output.addCases(cases);
@@ -62,13 +61,12 @@ pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8) *build
}
pub fn addRuntimeSafetyTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
- const cases = b.allocator.create(CompareOutputContext) catch unreachable;
- cases.* = CompareOutputContext{
+ const cases = b.allocator.create(CompareOutputContext{
.b = b,
.step = b.step("test-runtime-safety", "Run the runtime safety tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ }) catch unreachable;
runtime_safety.addCases(cases);
@@ -76,13 +74,12 @@ pub fn addRuntimeSafetyTests(b: *build.Builder, test_filter: ?[]const u8) *build
}
pub fn addCompileErrorTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
- const cases = b.allocator.create(CompileErrorContext) catch unreachable;
- cases.* = CompileErrorContext{
+ const cases = b.allocator.create(CompileErrorContext{
.b = b,
.step = b.step("test-compile-errors", "Run the compile error tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ }) catch unreachable;
compile_errors.addCases(cases);
@@ -90,13 +87,12 @@ pub fn addCompileErrorTests(b: *build.Builder, test_filter: ?[]const u8) *build.
}
pub fn addBuildExampleTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
- const cases = b.allocator.create(BuildExamplesContext) catch unreachable;
- cases.* = BuildExamplesContext{
+ const cases = b.allocator.create(BuildExamplesContext{
.b = b,
.step = b.step("test-build-examples", "Build the examples"),
.test_index = 0,
.test_filter = test_filter,
- };
+ }) catch unreachable;
build_examples.addCases(cases);
@@ -104,13 +100,12 @@ pub fn addBuildExampleTests(b: *build.Builder, test_filter: ?[]const u8) *build.
}
pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
- const cases = b.allocator.create(CompareOutputContext) catch unreachable;
- cases.* = CompareOutputContext{
+ const cases = b.allocator.create(CompareOutputContext{
.b = b,
.step = b.step("test-asm-link", "Run the assemble and link tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ }) catch unreachable;
assemble_and_link.addCases(cases);
@@ -118,13 +113,12 @@ pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8) *bui
}
pub fn addTranslateCTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
- const cases = b.allocator.create(TranslateCContext) catch unreachable;
- cases.* = TranslateCContext{
+ const cases = b.allocator.create(TranslateCContext{
.b = b,
.step = b.step("test-translate-c", "Run the C transation tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ }) catch unreachable;
translate_c.addCases(cases);
@@ -132,13 +126,12 @@ pub fn addTranslateCTests(b: *build.Builder, test_filter: ?[]const u8) *build.St
}
pub fn addGenHTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
- const cases = b.allocator.create(GenHContext) catch unreachable;
- cases.* = GenHContext{
+ const cases = b.allocator.create(GenHContext{
.b = b,
.step = b.step("test-gen-h", "Run the C header file generation tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ }) catch unreachable;
gen_h.addCases(cases);
@@ -240,8 +233,7 @@ pub const CompareOutputContext = struct {
pub fn create(context: *CompareOutputContext, exe_path: []const u8, name: []const u8, expected_output: []const u8, cli_args: []const []const u8) *RunCompareOutputStep {
const allocator = context.b.allocator;
- const ptr = allocator.create(RunCompareOutputStep) catch unreachable;
- ptr.* = RunCompareOutputStep{
+ const ptr = allocator.create(RunCompareOutputStep{
.context = context,
.exe_path = exe_path,
.name = name,
@@ -249,7 +241,7 @@ pub const CompareOutputContext = struct {
.test_index = context.test_index,
.step = build.Step.init("RunCompareOutput", allocator, make),
.cli_args = cli_args,
- };
+ }) catch unreachable;
context.test_index += 1;
return ptr;
}
@@ -328,14 +320,14 @@ pub const CompareOutputContext = struct {
pub fn create(context: *CompareOutputContext, exe_path: []const u8, name: []const u8) *RuntimeSafetyRunStep {
const allocator = context.b.allocator;
- const ptr = allocator.create(RuntimeSafetyRunStep) catch unreachable;
- ptr.* = RuntimeSafetyRunStep{
+ const ptr = allocator.create(RuntimeSafetyRunStep{
.context = context,
.exe_path = exe_path,
.name = name,
.test_index = context.test_index,
.step = build.Step.init("RuntimeSafetyRun", allocator, make),
- };
+ }) catch unreachable;
+
context.test_index += 1;
return ptr;
}
@@ -543,15 +535,15 @@ pub const CompileErrorContext = struct {
pub fn create(context: *CompileErrorContext, name: []const u8, case: *const TestCase, build_mode: Mode) *CompileCmpOutputStep {
const allocator = context.b.allocator;
- const ptr = allocator.create(CompileCmpOutputStep) catch unreachable;
- ptr.* = CompileCmpOutputStep{
+ const ptr = allocator.create(CompileCmpOutputStep{
.step = build.Step.init("CompileCmpOutput", allocator, make),
.context = context,
.name = name,
.test_index = context.test_index,
.case = case,
.build_mode = build_mode,
- };
+ }) catch unreachable;
+
context.test_index += 1;
return ptr;
}
@@ -662,14 +654,14 @@ pub const CompileErrorContext = struct {
}
pub fn create(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
- const tc = self.b.allocator.create(TestCase) catch unreachable;
- tc.* = TestCase{
+ const tc = self.b.allocator.create(TestCase{
.name = name,
.sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
.expected_errors = ArrayList([]const u8).init(self.b.allocator),
.link_libc = false,
.is_exe = false,
- };
+ }) catch unreachable;
+
tc.addSourceFile(".tmp_source.zig", source);
comptime var arg_i = 0;
inline while (arg_i < expected_lines.len) : (arg_i += 1) {
@@ -829,14 +821,14 @@ pub const TranslateCContext = struct {
pub fn create(context: *TranslateCContext, name: []const u8, case: *const TestCase) *TranslateCCmpOutputStep {
const allocator = context.b.allocator;
- const ptr = allocator.create(TranslateCCmpOutputStep) catch unreachable;
- ptr.* = TranslateCCmpOutputStep{
+ const ptr = allocator.create(TranslateCCmpOutputStep{
.step = build.Step.init("ParseCCmpOutput", allocator, make),
.context = context,
.name = name,
.test_index = context.test_index,
.case = case,
- };
+ }) catch unreachable;
+
context.test_index += 1;
return ptr;
}
@@ -936,13 +928,13 @@ pub const TranslateCContext = struct {
}
pub fn create(self: *TranslateCContext, allow_warnings: bool, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
- const tc = self.b.allocator.create(TestCase) catch unreachable;
- tc.* = TestCase{
+ const tc = self.b.allocator.create(TestCase{
.name = name,
.sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
.expected_lines = ArrayList([]const u8).init(self.b.allocator),
.allow_warnings = allow_warnings,
- };
+ }) catch unreachable;
+
tc.addSourceFile(filename, source);
comptime var arg_i = 0;
inline while (arg_i < expected_lines.len) : (arg_i += 1) {
@@ -1023,15 +1015,15 @@ pub const GenHContext = struct {
pub fn create(context: *GenHContext, h_path: []const u8, name: []const u8, case: *const TestCase) *GenHCmpOutputStep {
const allocator = context.b.allocator;
- const ptr = allocator.create(GenHCmpOutputStep) catch unreachable;
- ptr.* = GenHCmpOutputStep{
+ const ptr = allocator.create(GenHCmpOutputStep{
.step = build.Step.init("ParseCCmpOutput", allocator, make),
.context = context,
.h_path = h_path,
.name = name,
.test_index = context.test_index,
.case = case,
- };
+ }) catch unreachable;
+
context.test_index += 1;
return ptr;
}
@@ -1070,12 +1062,12 @@ pub const GenHContext = struct {
}
pub fn create(self: *GenHContext, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
- const tc = self.b.allocator.create(TestCase) catch unreachable;
- tc.* = TestCase{
+ const tc = self.b.allocator.create(TestCase{
.name = name,
.sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
.expected_lines = ArrayList([]const u8).init(self.b.allocator),
- };
+ }) catch unreachable;
+
tc.addSourceFile(filename, source);
comptime var arg_i = 0;
inline while (arg_i < expected_lines.len) : (arg_i += 1) {
--
cgit v1.2.3
From 85f928f8bff8c033f6ef0104d68b033669cb36e4 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 20 Jun 2018 17:33:29 -0400
Subject: remove std.mem.Allocator.construct and other fixups
---
src-self-hosted/errmsg.zig | 2 +-
src-self-hosted/main.zig | 2 +-
src-self-hosted/module.zig | 13 +++-
std/atomic/queue.zig | 2 +-
std/atomic/stack.zig | 2 +-
std/heap.zig | 3 +-
std/linked_list.zig | 6 +-
std/mem.zig | 8 +--
std/os/index.zig | 13 ++--
std/zig/parse.zig | 150 ++++++++++++++++++++++-----------------------
10 files changed, 101 insertions(+), 100 deletions(-)
(limited to 'src-self-hosted/module.zig')
diff --git a/src-self-hosted/errmsg.zig b/src-self-hosted/errmsg.zig
index 32d2450aac..b6fd78d8f6 100644
--- a/src-self-hosted/errmsg.zig
+++ b/src-self-hosted/errmsg.zig
@@ -35,7 +35,7 @@ pub fn createFromParseError(
var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
try parse_error.render(&tree.tokens, out_stream);
- const msg = try allocator.construct(Msg{
+ const msg = try allocator.create(Msg{
.tree = tree,
.path = path,
.text = text_buf.toOwnedSlice(),
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index f7f38130b5..45e6bb742a 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -707,7 +707,7 @@ const Fmt = struct {
// file_path must outlive Fmt
fn addToQueue(self: *Fmt, file_path: []const u8) !void {
- const new_node = try self.seen.allocator.construct(std.LinkedList([]const u8).Node{
+ const new_node = try self.seen.allocator.create(std.LinkedList([]const u8).Node{
.prev = undefined,
.next = undefined,
.data = file_path,
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index 997ef5eed2..5f02f1a832 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -114,7 +114,7 @@ pub const Module = struct {
.name = name,
.path = path,
.children = ArrayList(*CliPkg).init(allocator),
- .parent = parent
+ .parent = parent,
});
return pkg;
}
@@ -127,7 +127,16 @@ pub const Module = struct {
}
};
- pub fn create(allocator: *mem.Allocator, name: []const u8, root_src_path: ?[]const u8, target: *const Target, kind: Kind, build_mode: builtin.Mode, zig_lib_dir: []const u8, cache_dir: []const u8) !*Module {
+ pub fn create(
+ allocator: *mem.Allocator,
+ name: []const u8,
+ root_src_path: ?[]const u8,
+ target: *const Target,
+ kind: Kind,
+ build_mode: builtin.Mode,
+ zig_lib_dir: []const u8,
+ cache_dir: []const u8,
+ ) !*Module {
var name_buffer = try Buffer.init(allocator, name);
errdefer name_buffer.deinit();
diff --git a/std/atomic/queue.zig b/std/atomic/queue.zig
index 308cd6c736..16dc9f6cc3 100644
--- a/std/atomic/queue.zig
+++ b/std/atomic/queue.zig
@@ -115,7 +115,7 @@ fn startPuts(ctx: *Context) u8 {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32));
const node = ctx.allocator.create(Queue(i32).Node{
- .next = null,
+ .next = undefined,
.data = x,
}) catch unreachable;
ctx.queue.put(node);
diff --git a/std/atomic/stack.zig b/std/atomic/stack.zig
index 011ab3254f..d74bee8e8b 100644
--- a/std/atomic/stack.zig
+++ b/std/atomic/stack.zig
@@ -118,7 +118,7 @@ fn startPuts(ctx: *Context) u8 {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32));
const node = ctx.allocator.create(Stack(i32).Node{
- .next = null,
+ .next = undefined,
.data = x,
}) catch unreachable;
ctx.stack.push(node);
diff --git a/std/heap.zig b/std/heap.zig
index 7fc00cd0a4..41d7802fdd 100644
--- a/std/heap.zig
+++ b/std/heap.zig
@@ -407,8 +407,7 @@ fn testAllocator(allocator: *mem.Allocator) !void {
var slice = try allocator.alloc(*i32, 100);
for (slice) |*item, i| {
- item.* = try allocator.create(i32(0));
- item.*.* = @intCast(i32, i);
+ item.* = try allocator.create(@intCast(i32, i));
}
for (slice) |item, i| {
diff --git a/std/linked_list.zig b/std/linked_list.zig
index 128ccdf4b1..62cd5ca2bb 100644
--- a/std/linked_list.zig
+++ b/std/linked_list.zig
@@ -193,11 +193,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// A pointer to the new node.
pub fn allocateNode(list: *Self, allocator: *Allocator) !*Node {
comptime assert(!isIntrusive());
- return allocator.create(Node{
- .prev = null,
- .next = null,
- .data = undefined,
- });
+ return allocator.create(Node(undefined));
}
/// Deallocate a node.
diff --git a/std/mem.zig b/std/mem.zig
index 29f5f924c8..ba59faf711 100644
--- a/std/mem.zig
+++ b/std/mem.zig
@@ -41,13 +41,7 @@ pub const Allocator = struct {
return ptr;
}
- /// Alias of `create`
- /// Call `destroy` with the result
- pub fn construct(self: *Allocator, init: var) Error!*@typeOf(init) {
- return self.create(init);
- }
-
- /// `ptr` should be the return value of `construct` or `create`
+ /// `ptr` should be the return value of `create`
pub fn destroy(self: *Allocator, ptr: var) void {
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
self.freeFn(self, non_const_ptr[0..@sizeOf(@typeOf(ptr).Child)]);
diff --git a/std/os/index.zig b/std/os/index.zig
index ce29106810..52b36c351c 100644
--- a/std/os/index.zig
+++ b/std/os/index.zig
@@ -2468,7 +2468,7 @@ pub const Thread = struct {
data: Data,
pub const use_pthreads = is_posix and builtin.link_libc;
- const Data = if (use_pthreads)
+ pub const Data = if (use_pthreads)
struct {
handle: c.pthread_t,
stack_addr: usize,
@@ -2583,13 +2583,16 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread
errdefer assert(windows.HeapFree(heap_handle, 0, bytes_ptr) != 0);
const bytes = @ptrCast([*]u8, bytes_ptr)[0..byte_count];
const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext{
- .thread = undefined,
+ .thread = Thread{
+ .data = Thread.Data{
+ .heap_handle = heap_handle,
+ .alloc_start = bytes_ptr,
+ .handle = undefined,
+ },
+ },
.inner = context,
}) catch unreachable;
- outer_context.thread.data.heap_handle = heap_handle;
- outer_context.thread.data.alloc_start = bytes_ptr;
-
const parameter = if (@sizeOf(Context) == 0) null else @ptrCast(*c_void, &outer_context.inner);
outer_context.thread.data.handle = windows.CreateThread(null, default_stack_size, WinThread.threadMain, parameter, 0, null) orelse {
const err = windows.GetLastError();
diff --git a/std/zig/parse.zig b/std/zig/parse.zig
index 877b81c527..9f0371d4da 100644
--- a/std/zig/parse.zig
+++ b/std/zig/parse.zig
@@ -17,7 +17,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
defer stack.deinit();
const arena = &tree_arena.allocator;
- const root_node = try arena.construct(ast.Node.Root{
+ const root_node = try arena.create(ast.Node.Root{
.base = ast.Node{ .id = ast.Node.Id.Root },
.decls = ast.Node.Root.DeclList.init(arena),
.doc_comments = null,
@@ -65,14 +65,14 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
Token.Id.Keyword_test => {
stack.append(State.TopLevel) catch unreachable;
- const block = try arena.construct(ast.Node.Block{
+ const block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = null,
.lbrace = undefined,
.statements = ast.Node.Block.StatementList.init(arena),
.rbrace = undefined,
});
- const test_node = try arena.construct(ast.Node.TestDecl{
+ const test_node = try arena.create(ast.Node.TestDecl{
.base = ast.Node{ .id = ast.Node.Id.TestDecl },
.doc_comments = comments,
.test_token = token_index,
@@ -109,14 +109,14 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_comptime => {
- const block = try arena.construct(ast.Node.Block{
+ const block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = null,
.lbrace = undefined,
.statements = ast.Node.Block.StatementList.init(arena),
.rbrace = undefined,
});
- const node = try arena.construct(ast.Node.Comptime{
+ const node = try arena.create(ast.Node.Comptime{
.base = ast.Node{ .id = ast.Node.Id.Comptime },
.comptime_token = token_index,
.expr = &block.base,
@@ -225,7 +225,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
return tree;
}
- const node = try arena.construct(ast.Node.Use{
+ const node = try arena.create(ast.Node.Use{
.base = ast.Node{ .id = ast.Node.Id.Use },
.use_token = token_index,
.visib_token = ctx.visib_token,
@@ -266,7 +266,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_fn, Token.Id.Keyword_nakedcc, Token.Id.Keyword_stdcallcc, Token.Id.Keyword_async => {
- const fn_proto = try arena.construct(ast.Node.FnProto{
+ const fn_proto = try arena.create(ast.Node.FnProto{
.base = ast.Node{ .id = ast.Node.Id.FnProto },
.doc_comments = ctx.comments,
.visib_token = ctx.visib_token,
@@ -298,7 +298,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_async => {
- const async_node = try arena.construct(ast.Node.AsyncAttribute{
+ const async_node = try arena.create(ast.Node.AsyncAttribute{
.base = ast.Node{ .id = ast.Node.Id.AsyncAttribute },
.async_token = token_index,
.allocator_type = null,
@@ -330,7 +330,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.TopLevelExternOrField => |ctx| {
if (eatToken(&tok_it, &tree, Token.Id.Identifier)) |identifier| {
- const node = try arena.construct(ast.Node.StructField{
+ const node = try arena.create(ast.Node.StructField{
.base = ast.Node{ .id = ast.Node.Id.StructField },
.doc_comments = ctx.comments,
.visib_token = ctx.visib_token,
@@ -375,7 +375,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
const token_ptr = token.ptr;
- const node = try arena.construct(ast.Node.ContainerDecl{
+ const node = try arena.create(ast.Node.ContainerDecl{
.base = ast.Node{ .id = ast.Node.Id.ContainerDecl },
.layout_token = ctx.layout_token,
.kind_token = switch (token_ptr.id) {
@@ -448,7 +448,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
Token.Id.Identifier => {
switch (tree.tokens.at(container_decl.kind_token).id) {
Token.Id.Keyword_struct => {
- const node = try arena.construct(ast.Node.StructField{
+ const node = try arena.create(ast.Node.StructField{
.base = ast.Node{ .id = ast.Node.Id.StructField },
.doc_comments = comments,
.visib_token = null,
@@ -464,7 +464,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_union => {
- const node = try arena.construct(ast.Node.UnionTag{
+ const node = try arena.create(ast.Node.UnionTag{
.base = ast.Node{ .id = ast.Node.Id.UnionTag },
.name_token = token_index,
.type_expr = null,
@@ -480,7 +480,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_enum => {
- const node = try arena.construct(ast.Node.EnumTag{
+ const node = try arena.create(ast.Node.EnumTag{
.base = ast.Node{ .id = ast.Node.Id.EnumTag },
.name_token = token_index,
.value = null,
@@ -562,7 +562,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.VarDecl => |ctx| {
- const var_decl = try arena.construct(ast.Node.VarDecl{
+ const var_decl = try arena.create(ast.Node.VarDecl{
.base = ast.Node{ .id = ast.Node.Id.VarDecl },
.doc_comments = ctx.comments,
.visib_token = ctx.visib_token,
@@ -660,7 +660,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_ptr = token.ptr;
switch (token_ptr.id) {
Token.Id.LBrace => {
- const block = try arena.construct(ast.Node.Block{
+ const block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = null,
.lbrace = token_index,
@@ -712,7 +712,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
// TODO: this is a special case. Remove this when #760 is fixed
if (token_ptr.id == Token.Id.Keyword_error) {
if (tok_it.peek().?.id == Token.Id.LBrace) {
- const error_type_node = try arena.construct(ast.Node.ErrorType{
+ const error_type_node = try arena.create(ast.Node.ErrorType{
.base = ast.Node{ .id = ast.Node.Id.ErrorType },
.token = token_index,
});
@@ -733,7 +733,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
if (eatToken(&tok_it, &tree, Token.Id.RParen)) |_| {
continue;
}
- const param_decl = try arena.construct(ast.Node.ParamDecl{
+ const param_decl = try arena.create(ast.Node.ParamDecl{
.base = ast.Node{ .id = ast.Node.Id.ParamDecl },
.comptime_token = null,
.noalias_token = null,
@@ -819,7 +819,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_ptr = token.ptr;
switch (token_ptr.id) {
Token.Id.LBrace => {
- const block = try arena.construct(ast.Node.Block{
+ const block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = ctx.label,
.lbrace = token_index,
@@ -853,7 +853,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_suspend => {
- const node = try arena.construct(ast.Node.Suspend{
+ const node = try arena.create(ast.Node.Suspend{
.base = ast.Node{ .id = ast.Node.Id.Suspend },
.label = ctx.label,
.suspend_token = token_index,
@@ -925,7 +925,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
}
},
State.While => |ctx| {
- const node = try arena.construct(ast.Node.While{
+ const node = try arena.create(ast.Node.While{
.base = ast.Node{ .id = ast.Node.Id.While },
.label = ctx.label,
.inline_token = ctx.inline_token,
@@ -954,7 +954,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
State.For => |ctx| {
- const node = try arena.construct(ast.Node.For{
+ const node = try arena.create(ast.Node.For{
.base = ast.Node{ .id = ast.Node.Id.For },
.label = ctx.label,
.inline_token = ctx.inline_token,
@@ -975,7 +975,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.Else => |dest| {
if (eatToken(&tok_it, &tree, Token.Id.Keyword_else)) |else_token| {
- const node = try arena.construct(ast.Node.Else{
+ const node = try arena.create(ast.Node.Else{
.base = ast.Node{ .id = ast.Node.Id.Else },
.else_token = else_token,
.payload = null,
@@ -1038,7 +1038,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_defer, Token.Id.Keyword_errdefer => {
- const node = try arena.construct(ast.Node.Defer{
+ const node = try arena.create(ast.Node.Defer{
.base = ast.Node{ .id = ast.Node.Id.Defer },
.defer_token = token_index,
.kind = switch (token_ptr.id) {
@@ -1056,7 +1056,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.LBrace => {
- const inner_block = try arena.construct(ast.Node.Block{
+ const inner_block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = null,
.lbrace = token_index,
@@ -1124,7 +1124,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
- const node = try arena.construct(ast.Node.AsmOutput{
+ const node = try arena.create(ast.Node.AsmOutput{
.base = ast.Node{ .id = ast.Node.Id.AsmOutput },
.lbracket = lbracket_index,
.symbolic_name = undefined,
@@ -1178,7 +1178,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
- const node = try arena.construct(ast.Node.AsmInput{
+ const node = try arena.create(ast.Node.AsmInput{
.base = ast.Node{ .id = ast.Node.Id.AsmInput },
.lbracket = lbracket_index,
.symbolic_name = undefined,
@@ -1243,7 +1243,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
- const node = try arena.construct(ast.Node.FieldInitializer{
+ const node = try arena.create(ast.Node.FieldInitializer{
.base = ast.Node{ .id = ast.Node.Id.FieldInitializer },
.period_token = undefined,
.name_token = undefined,
@@ -1332,7 +1332,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
}
const comments = try eatDocComments(arena, &tok_it, &tree);
- const node = try arena.construct(ast.Node.SwitchCase{
+ const node = try arena.create(ast.Node.SwitchCase{
.base = ast.Node{ .id = ast.Node.Id.SwitchCase },
.items = ast.Node.SwitchCase.ItemList.init(arena),
.payload = null,
@@ -1369,7 +1369,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (token_ptr.id == Token.Id.Keyword_else) {
- const else_node = try arena.construct(ast.Node.SwitchElse{
+ const else_node = try arena.create(ast.Node.SwitchElse{
.base = ast.Node{ .id = ast.Node.Id.SwitchElse },
.token = token_index,
});
@@ -1468,7 +1468,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
State.ExternType => |ctx| {
if (eatToken(&tok_it, &tree, Token.Id.Keyword_fn)) |fn_token| {
- const fn_proto = try arena.construct(ast.Node.FnProto{
+ const fn_proto = try arena.create(ast.Node.FnProto{
.base = ast.Node{ .id = ast.Node.Id.FnProto },
.doc_comments = ctx.comments,
.visib_token = null,
@@ -1641,7 +1641,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
- const node = try arena.construct(ast.Node.Payload{
+ const node = try arena.create(ast.Node.Payload{
.base = ast.Node{ .id = ast.Node.Id.Payload },
.lpipe = token_index,
.error_symbol = undefined,
@@ -1677,7 +1677,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
- const node = try arena.construct(ast.Node.PointerPayload{
+ const node = try arena.create(ast.Node.PointerPayload{
.base = ast.Node{ .id = ast.Node.Id.PointerPayload },
.lpipe = token_index,
.ptr_token = null,
@@ -1720,7 +1720,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
- const node = try arena.construct(ast.Node.PointerIndexPayload{
+ const node = try arena.create(ast.Node.PointerIndexPayload{
.base = ast.Node{ .id = ast.Node.Id.PointerIndexPayload },
.lpipe = token_index,
.ptr_token = null,
@@ -1754,7 +1754,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_ptr = token.ptr;
switch (token_ptr.id) {
Token.Id.Keyword_return, Token.Id.Keyword_break, Token.Id.Keyword_continue => {
- const node = try arena.construct(ast.Node.ControlFlowExpression{
+ const node = try arena.create(ast.Node.ControlFlowExpression{
.base = ast.Node{ .id = ast.Node.Id.ControlFlowExpression },
.ltoken = token_index,
.kind = undefined,
@@ -1783,7 +1783,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_try, Token.Id.Keyword_cancel, Token.Id.Keyword_resume => {
- const node = try arena.construct(ast.Node.PrefixOp{
+ const node = try arena.create(ast.Node.PrefixOp{
.base = ast.Node{ .id = ast.Node.Id.PrefixOp },
.op_token = token_index,
.op = switch (token_ptr.id) {
@@ -1817,7 +1817,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Ellipsis3)) |ellipsis3| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = ellipsis3,
@@ -1842,7 +1842,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToAssignment(token_ptr.id)) |ass_id| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@@ -1872,7 +1872,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToUnwrapExpr(token_ptr.id)) |unwrap_id| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@@ -1904,7 +1904,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Keyword_or)) |or_token| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = or_token,
@@ -1928,7 +1928,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Keyword_and)) |and_token| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = and_token,
@@ -1955,7 +1955,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToComparison(token_ptr.id)) |comp_id| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@@ -1982,7 +1982,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Pipe)) |pipe| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = pipe,
@@ -2006,7 +2006,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Caret)) |caret| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = caret,
@@ -2030,7 +2030,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Ampersand)) |ampersand| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = ampersand,
@@ -2057,7 +2057,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToBitShift(token_ptr.id)) |bitshift_id| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@@ -2087,7 +2087,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToAddition(token_ptr.id)) |add_id| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@@ -2117,7 +2117,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToMultiply(token_ptr.id)) |mult_id| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@@ -2145,7 +2145,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (tok_it.peek().?.id == Token.Id.Period) {
- const node = try arena.construct(ast.Node.SuffixOp{
+ const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op{ .StructInitializer = ast.Node.SuffixOp.Op.InitList.init(arena) },
@@ -2164,7 +2164,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
- const node = try arena.construct(ast.Node.SuffixOp{
+ const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op{ .ArrayInitializer = ast.Node.SuffixOp.Op.InitList.init(arena) },
@@ -2193,7 +2193,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Bang)) |bang| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = bang,
@@ -2212,7 +2212,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToPrefixOp(token_ptr.id)) |prefix_id| {
- var node = try arena.construct(ast.Node.PrefixOp{
+ var node = try arena.create(ast.Node.PrefixOp{
.base = ast.Node{ .id = ast.Node.Id.PrefixOp },
.op_token = token_index,
.op = prefix_id,
@@ -2222,7 +2222,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
// Treat '**' token as two pointer types
if (token_ptr.id == Token.Id.AsteriskAsterisk) {
- const child = try arena.construct(ast.Node.PrefixOp{
+ const child = try arena.create(ast.Node.PrefixOp{
.base = ast.Node{ .id = ast.Node.Id.PrefixOp },
.op_token = token_index,
.op = prefix_id,
@@ -2246,7 +2246,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
State.SuffixOpExpressionBegin => |opt_ctx| {
if (eatToken(&tok_it, &tree, Token.Id.Keyword_async)) |async_token| {
- const async_node = try arena.construct(ast.Node.AsyncAttribute{
+ const async_node = try arena.create(ast.Node.AsyncAttribute{
.base = ast.Node{ .id = ast.Node.Id.AsyncAttribute },
.async_token = async_token,
.allocator_type = null,
@@ -2277,7 +2277,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_ptr = token.ptr;
switch (token_ptr.id) {
Token.Id.LParen => {
- const node = try arena.construct(ast.Node.SuffixOp{
+ const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op{
@@ -2301,7 +2301,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.LBracket => {
- const node = try arena.construct(ast.Node.SuffixOp{
+ const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op{ .ArrayAccess = undefined },
@@ -2316,7 +2316,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
Token.Id.Period => {
if (eatToken(&tok_it, &tree, Token.Id.Asterisk)) |asterisk_token| {
- const node = try arena.construct(ast.Node.SuffixOp{
+ const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op.Deref,
@@ -2327,7 +2327,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
if (eatToken(&tok_it, &tree, Token.Id.QuestionMark)) |question_token| {
- const node = try arena.construct(ast.Node.SuffixOp{
+ const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op.UnwrapOptional,
@@ -2337,7 +2337,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
stack.append(State{ .SuffixOpExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
continue;
}
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@@ -2397,7 +2397,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_promise => {
- const node = try arena.construct(ast.Node.PromiseType{
+ const node = try arena.create(ast.Node.PromiseType{
.base = ast.Node{ .id = ast.Node.Id.PromiseType },
.promise_token = token.index,
.result = null,
@@ -2423,7 +2423,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.LParen => {
- const node = try arena.construct(ast.Node.GroupedExpression{
+ const node = try arena.create(ast.Node.GroupedExpression{
.base = ast.Node{ .id = ast.Node.Id.GroupedExpression },
.lparen = token.index,
.expr = undefined,
@@ -2441,7 +2441,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Builtin => {
- const node = try arena.construct(ast.Node.BuiltinCall{
+ const node = try arena.create(ast.Node.BuiltinCall{
.base = ast.Node{ .id = ast.Node.Id.BuiltinCall },
.builtin_token = token.index,
.params = ast.Node.BuiltinCall.ParamList.init(arena),
@@ -2460,7 +2460,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.LBracket => {
- const node = try arena.construct(ast.Node.PrefixOp{
+ const node = try arena.create(ast.Node.PrefixOp{
.base = ast.Node{ .id = ast.Node.Id.PrefixOp },
.op_token = token.index,
.op = undefined,
@@ -2519,7 +2519,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_fn => {
- const fn_proto = try arena.construct(ast.Node.FnProto{
+ const fn_proto = try arena.create(ast.Node.FnProto{
.base = ast.Node{ .id = ast.Node.Id.FnProto },
.doc_comments = null,
.visib_token = null,
@@ -2540,7 +2540,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_nakedcc, Token.Id.Keyword_stdcallcc => {
- const fn_proto = try arena.construct(ast.Node.FnProto{
+ const fn_proto = try arena.create(ast.Node.FnProto{
.base = ast.Node{ .id = ast.Node.Id.FnProto },
.doc_comments = null,
.visib_token = null,
@@ -2567,7 +2567,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_asm => {
- const node = try arena.construct(ast.Node.Asm{
+ const node = try arena.create(ast.Node.Asm{
.base = ast.Node{ .id = ast.Node.Id.Asm },
.asm_token = token.index,
.volatile_token = null,
@@ -2629,7 +2629,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
- const node = try arena.construct(ast.Node.ErrorSetDecl{
+ const node = try arena.create(ast.Node.ErrorSetDecl{
.base = ast.Node{ .id = ast.Node.Id.ErrorSetDecl },
.error_token = ctx.error_token,
.decls = ast.Node.ErrorSetDecl.DeclList.init(arena),
@@ -2695,7 +2695,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
return tree;
}
- const node = try arena.construct(ast.Node.ErrorTag{
+ const node = try arena.create(ast.Node.ErrorTag{
.base = ast.Node{ .id = ast.Node.Id.ErrorTag },
.doc_comments = comments,
.name_token = ident_token_index,
@@ -3032,7 +3032,7 @@ fn pushDocComment(arena: *mem.Allocator, line_comment: TokenIndex, result: *?*as
if (result.*) |comment_node| {
break :blk comment_node;
} else {
- const comment_node = try arena.construct(ast.Node.DocComment{
+ const comment_node = try arena.create(ast.Node.DocComment{
.base = ast.Node{ .id = ast.Node.Id.DocComment },
.lines = ast.Node.DocComment.LineList.init(arena),
});
@@ -3061,7 +3061,7 @@ fn parseStringLiteral(arena: *mem.Allocator, tok_it: *ast.Tree.TokenList.Iterato
return &(try createLiteral(arena, ast.Node.StringLiteral, token_index)).base;
},
Token.Id.MultilineStringLiteralLine => {
- const node = try arena.construct(ast.Node.MultilineStringLiteral{
+ const node = try arena.create(ast.Node.MultilineStringLiteral{
.base = ast.Node{ .id = ast.Node.Id.MultilineStringLiteral },
.lines = ast.Node.MultilineStringLiteral.LineList.init(arena),
});
@@ -3089,7 +3089,7 @@ fn parseStringLiteral(arena: *mem.Allocator, tok_it: *ast.Tree.TokenList.Iterato
fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *const OptionalCtx, token_ptr: *const Token, token_index: TokenIndex) !bool {
switch (token_ptr.id) {
Token.Id.Keyword_suspend => {
- const node = try arena.construct(ast.Node.Suspend{
+ const node = try arena.create(ast.Node.Suspend{
.base = ast.Node{ .id = ast.Node.Id.Suspend },
.label = null,
.suspend_token = token_index,
@@ -3103,7 +3103,7 @@ fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *con
return true;
},
Token.Id.Keyword_if => {
- const node = try arena.construct(ast.Node.If{
+ const node = try arena.create(ast.Node.If{
.base = ast.Node{ .id = ast.Node.Id.If },
.if_token = token_index,
.condition = undefined,
@@ -3144,7 +3144,7 @@ fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *con
return true;
},
Token.Id.Keyword_switch => {
- const node = try arena.construct(ast.Node.Switch{
+ const node = try arena.create(ast.Node.Switch{
.base = ast.Node{ .id = ast.Node.Id.Switch },
.switch_token = token_index,
.expr = undefined,
@@ -3166,7 +3166,7 @@ fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *con
return true;
},
Token.Id.Keyword_comptime => {
- const node = try arena.construct(ast.Node.Comptime{
+ const node = try arena.create(ast.Node.Comptime{
.base = ast.Node{ .id = ast.Node.Id.Comptime },
.comptime_token = token_index,
.expr = undefined,
@@ -3178,7 +3178,7 @@ fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *con
return true;
},
Token.Id.LBrace => {
- const block = try arena.construct(ast.Node.Block{
+ const block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = null,
.lbrace = token_index,
@@ -3318,7 +3318,7 @@ fn tokenIdToPrefixOp(id: @TagType(Token.Id)) ?ast.Node.PrefixOp.Op {
}
fn createLiteral(arena: *mem.Allocator, comptime T: type, token_index: TokenIndex) !*T {
- return arena.construct(T{
+ return arena.create(T{
.base = ast.Node{ .id = ast.Node.typeToId(T) },
.token = token_index,
});
--
cgit v1.2.3
From 8866bef92c8b674c2a444c94326c57984597ab05 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 22 Jun 2018 01:49:32 -0400
Subject: clean up self hosted main. delete unsupported commands
---
src-self-hosted/arg.zig | 4 +-
src-self-hosted/main.zig | 546 ++++++++++-----------------------------------
src-self-hosted/module.zig | 24 --
test/cases/bugs/1111.zig | 6 +-
4 files changed, 121 insertions(+), 459 deletions(-)
(limited to 'src-self-hosted/module.zig')
diff --git a/src-self-hosted/arg.zig b/src-self-hosted/arg.zig
index dc89483213..2ab44e5fdf 100644
--- a/src-self-hosted/arg.zig
+++ b/src-self-hosted/arg.zig
@@ -168,7 +168,7 @@ pub const Args = struct {
}
// e.g. --names value1 value2 value3
- pub fn many(self: *Args, name: []const u8) ?[]const []const u8 {
+ pub fn many(self: *Args, name: []const u8) []const []const u8 {
if (self.flags.get(name)) |entry| {
switch (entry.value) {
FlagArg.Many => |inner| {
@@ -177,7 +177,7 @@ pub const Args = struct {
else => @panic("attempted to retrieve flag with wrong type"),
}
} else {
- return null;
+ return []const []const u8{};
}
}
};
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 45e6bb742a..6dabddaefb 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -26,15 +26,11 @@ const usage =
\\
\\Commands:
\\
- \\ build Build project from build.zig
\\ build-exe [source] Create executable from source or object files
\\ build-lib [source] Create library from source or object files
\\ build-obj [source] Create object from source or assembly
\\ fmt [source] Parse file and render in canonical zig format
- \\ run [source] Create executable and run immediately
\\ targets List available compilation targets
- \\ test [source] Create and run a test build
- \\ translate-c [source] Convert c code to zig code
\\ version Print version number and exit
\\ zen Print zen of zig and exit
\\
@@ -47,7 +43,7 @@ const Command = struct {
};
pub fn main() !void {
- var allocator = std.heap.c_allocator;
+ const allocator = std.heap.c_allocator;
var stdout_file = try std.io.getStdOut();
var stdout_out_stream = std.io.FileOutStream.init(&stdout_file);
@@ -58,18 +54,16 @@ pub fn main() !void {
stderr = &stderr_out_stream.stream;
const args = try os.argsAlloc(allocator);
- defer os.argsFree(allocator, args);
+ // TODO I'm getting unreachable code here, which shouldn't happen
+ //defer os.argsFree(allocator, args);
if (args.len <= 1) {
+ try stderr.write("expected command argument\n\n");
try stderr.write(usage);
os.exit(1);
}
const commands = []Command{
- Command{
- .name = "build",
- .exec = cmdBuild,
- },
Command{
.name = "build-exe",
.exec = cmdBuildExe,
@@ -86,22 +80,10 @@ pub fn main() !void {
.name = "fmt",
.exec = cmdFmt,
},
- Command{
- .name = "run",
- .exec = cmdRun,
- },
Command{
.name = "targets",
.exec = cmdTargets,
},
- Command{
- .name = "test",
- .exec = cmdTest,
- },
- Command{
- .name = "translate-c",
- .exec = cmdTranslateC,
- },
Command{
.name = "version",
.exec = cmdVersion,
@@ -124,177 +106,15 @@ pub fn main() !void {
for (commands) |command| {
if (mem.eql(u8, command.name, args[1])) {
- try command.exec(allocator, args[2..]);
- return;
+ return command.exec(allocator, args[2..]);
}
}
try stderr.print("unknown command: {}\n\n", args[1]);
try stderr.write(usage);
+ os.exit(1);
}
-// cmd:build ///////////////////////////////////////////////////////////////////////////////////////
-
-const usage_build =
- \\usage: zig build
- \\
- \\General Options:
- \\ --help Print this help and exit
- \\ --init Generate a build.zig template
- \\ --build-file [file] Override path to build.zig
- \\ --cache-dir [path] Override path to cache directory
- \\ --verbose Print commands before executing them
- \\ --prefix [path] Override default install prefix
- \\
- \\Project-Specific Options:
- \\
- \\ Project-specific options become available when the build file is found.
- \\
- \\Advanced Options:
- \\ --build-file [file] Override path to build.zig
- \\ --cache-dir [path] Override path to cache directory
- \\ --verbose-tokenize Enable compiler debug output for tokenization
- \\ --verbose-ast Enable compiler debug output for parsing into an AST
- \\ --verbose-link Enable compiler debug output for linking
- \\ --verbose-ir Enable compiler debug output for Zig IR
- \\ --verbose-llvm-ir Enable compiler debug output for LLVM IR
- \\ --verbose-cimport Enable compiler debug output for C imports
- \\
- \\
-;
-
-const args_build_spec = []Flag{
- Flag.Bool("--help"),
- Flag.Bool("--init"),
- Flag.Arg1("--build-file"),
- Flag.Arg1("--cache-dir"),
- Flag.Bool("--verbose"),
- Flag.Arg1("--prefix"),
-
- Flag.Arg1("--build-file"),
- Flag.Arg1("--cache-dir"),
- Flag.Bool("--verbose-tokenize"),
- Flag.Bool("--verbose-ast"),
- Flag.Bool("--verbose-link"),
- Flag.Bool("--verbose-ir"),
- Flag.Bool("--verbose-llvm-ir"),
- Flag.Bool("--verbose-cimport"),
-};
-
-const missing_build_file =
- \\No 'build.zig' file found.
- \\
- \\Initialize a 'build.zig' template file with `zig build --init`,
- \\or build an executable directly with `zig build-exe $FILENAME.zig`.
- \\
- \\See: `zig build --help` or `zig help` for more options.
- \\
-;
-
-fn cmdBuild(allocator: *Allocator, args: []const []const u8) !void {
- var flags = try Args.parse(allocator, args_build_spec, args);
- defer flags.deinit();
-
- if (flags.present("help")) {
- try stderr.write(usage_build);
- os.exit(0);
- }
-
- const zig_lib_dir = try introspect.resolveZigLibDir(allocator);
- defer allocator.free(zig_lib_dir);
-
- const zig_std_dir = try os.path.join(allocator, zig_lib_dir, "std");
- defer allocator.free(zig_std_dir);
-
- const special_dir = try os.path.join(allocator, zig_std_dir, "special");
- defer allocator.free(special_dir);
-
- const build_runner_path = try os.path.join(allocator, special_dir, "build_runner.zig");
- defer allocator.free(build_runner_path);
-
- const build_file = flags.single("build-file") orelse "build.zig";
- const build_file_abs = try os.path.resolve(allocator, ".", build_file);
- defer allocator.free(build_file_abs);
-
- const build_file_exists = os.File.access(allocator, build_file_abs, os.default_file_mode) catch false;
-
- if (flags.present("init")) {
- if (build_file_exists) {
- try stderr.print("build.zig already exists\n");
- os.exit(1);
- }
-
- // need a new scope for proper defer scope finalization on exit
- {
- const build_template_path = try os.path.join(allocator, special_dir, "build_file_template.zig");
- defer allocator.free(build_template_path);
-
- try os.copyFile(allocator, build_template_path, build_file_abs);
- try stderr.print("wrote build.zig template\n");
- }
-
- os.exit(0);
- }
-
- if (!build_file_exists) {
- try stderr.write(missing_build_file);
- os.exit(1);
- }
-
- // TODO: Invoke build.zig entrypoint directly?
- var zig_exe_path = try os.selfExePath(allocator);
- defer allocator.free(zig_exe_path);
-
- var build_args = ArrayList([]const u8).init(allocator);
- defer build_args.deinit();
-
- const build_file_basename = os.path.basename(build_file_abs);
- const build_file_dirname = os.path.dirname(build_file_abs) orelse ".";
-
- var full_cache_dir: []u8 = undefined;
- if (flags.single("cache-dir")) |cache_dir| {
- full_cache_dir = try os.path.resolve(allocator, ".", cache_dir, full_cache_dir);
- } else {
- full_cache_dir = try os.path.join(allocator, build_file_dirname, "zig-cache");
- }
- defer allocator.free(full_cache_dir);
-
- const path_to_build_exe = try os.path.join(allocator, full_cache_dir, "build");
- defer allocator.free(path_to_build_exe);
-
- try build_args.append(path_to_build_exe);
- try build_args.append(zig_exe_path);
- try build_args.append(build_file_dirname);
- try build_args.append(full_cache_dir);
-
- var proc = try os.ChildProcess.init(build_args.toSliceConst(), allocator);
- defer proc.deinit();
-
- var term = try proc.spawnAndWait();
- switch (term) {
- os.ChildProcess.Term.Exited => |status| {
- if (status != 0) {
- try stderr.print("{} exited with status {}\n", build_args.at(0), status);
- os.exit(1);
- }
- },
- os.ChildProcess.Term.Signal => |signal| {
- try stderr.print("{} killed by signal {}\n", build_args.at(0), signal);
- os.exit(1);
- },
- os.ChildProcess.Term.Stopped => |signal| {
- try stderr.print("{} stopped by signal {}\n", build_args.at(0), signal);
- os.exit(1);
- },
- os.ChildProcess.Term.Unknown => |status| {
- try stderr.print("{} encountered unknown failure {}\n", build_args.at(0), status);
- os.exit(1);
- },
- }
-}
-
-// cmd:build-exe ///////////////////////////////////////////////////////////////////////////////////
-
const usage_build_generic =
\\usage: zig build-exe [file]
\\ zig build-lib [file]
@@ -315,8 +135,11 @@ const usage_build_generic =
\\ --output-h [file] Override generated header file path
\\ --pkg-begin [name] [path] Make package available to import and push current pkg
\\ --pkg-end Pop current pkg
- \\ --release-fast Build with optimizations on and safety off
- \\ --release-safe Build with optimizations on and safety on
+ \\ --mode [mode] Set the build mode
+ \\ debug (default) optimizations off, safety on
+ \\ release-fast optimizations on, safety off
+ \\ release-safe optimizations on, safety on
+ \\ release-small optimize for small binary, safety off
\\ --static Output will be statically linked
\\ --strip Exclude debug symbols
\\ --target-arch [name] Specify target architecture
@@ -367,6 +190,12 @@ const args_build_generic = []Flag{
"off",
"on",
}),
+ Flag.Option("--mode", []const []const u8{
+ "debug",
+ "release-fast",
+ "release-safe",
+ "release-small",
+ }),
Flag.ArgMergeN("--assembly", 1),
Flag.Arg1("--cache-dir"),
@@ -383,8 +212,6 @@ const args_build_generic = []Flag{
// NOTE: Parsed manually after initial check
Flag.ArgN("--pkg-begin", 2),
Flag.Bool("--pkg-end"),
- Flag.Bool("--release-fast"),
- Flag.Bool("--release-safe"),
Flag.Bool("--static"),
Flag.Bool("--strip"),
Flag.Arg1("--target-arch"),
@@ -431,16 +258,25 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
defer flags.deinit();
if (flags.present("help")) {
- try stderr.write(usage_build_generic);
+ try stdout.write(usage_build_generic);
os.exit(0);
}
- var build_mode = builtin.Mode.Debug;
- if (flags.present("release-fast")) {
- build_mode = builtin.Mode.ReleaseFast;
- } else if (flags.present("release-safe")) {
- build_mode = builtin.Mode.ReleaseSafe;
- }
+ const build_mode = blk: {
+ if (flags.single("mode")) |mode_flag| {
+ if (mem.eql(u8, mode_flag, "debug")) {
+ break :blk builtin.Mode.Debug;
+ } else if (mem.eql(u8, mode_flag, "release-fast")) {
+ break :blk builtin.Mode.ReleaseFast;
+ } else if (mem.eql(u8, mode_flag, "release-safe")) {
+ break :blk builtin.Mode.ReleaseSafe;
+ } else if (mem.eql(u8, mode_flag, "release-small")) {
+ break :blk builtin.Mode.ReleaseSmall;
+ } else unreachable;
+ } else {
+ break :blk builtin.Mode.Debug;
+ }
+ };
const color = blk: {
if (flags.single("color")) |color_flag| {
@@ -456,20 +292,21 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
}
};
- var emit_type = Module.Emit.Binary;
- if (flags.single("emit")) |emit_flag| {
- if (mem.eql(u8, emit_flag, "asm")) {
- emit_type = Module.Emit.Assembly;
- } else if (mem.eql(u8, emit_flag, "bin")) {
- emit_type = Module.Emit.Binary;
- } else if (mem.eql(u8, emit_flag, "llvm-ir")) {
- emit_type = Module.Emit.LlvmIr;
+ const emit_type = blk: {
+ if (flags.single("emit")) |emit_flag| {
+ if (mem.eql(u8, emit_flag, "asm")) {
+ break :blk Module.Emit.Assembly;
+ } else if (mem.eql(u8, emit_flag, "bin")) {
+ break :blk Module.Emit.Binary;
+ } else if (mem.eql(u8, emit_flag, "llvm-ir")) {
+ break :blk Module.Emit.LlvmIr;
+ } else unreachable;
} else {
- unreachable;
+ break :blk Module.Emit.Binary;
}
- }
+ };
- var cur_pkg = try Module.CliPkg.init(allocator, "", "", null); // TODO: Need a path, name?
+ var cur_pkg = try CliPkg.init(allocator, "", "", null);
defer cur_pkg.deinit();
var i: usize = 0;
@@ -482,15 +319,16 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
i += 1;
const new_pkg_path = args[i];
- var new_cur_pkg = try Module.CliPkg.init(allocator, new_pkg_name, new_pkg_path, cur_pkg);
+ var new_cur_pkg = try CliPkg.init(allocator, new_pkg_name, new_pkg_path, cur_pkg);
try cur_pkg.children.append(new_cur_pkg);
cur_pkg = new_cur_pkg;
} else if (mem.eql(u8, "--pkg-end", arg_name)) {
- if (cur_pkg.parent == null) {
+ if (cur_pkg.parent) |parent| {
+ cur_pkg = parent;
+ } else {
try stderr.print("encountered --pkg-end with no matching --pkg-begin\n");
os.exit(1);
}
- cur_pkg = cur_pkg.parent.?;
}
}
@@ -499,43 +337,42 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
os.exit(1);
}
- var in_file: ?[]const u8 = undefined;
- switch (flags.positionals.len) {
- 0 => {
- try stderr.write("--name [name] not provided and unable to infer\n");
- os.exit(1);
- },
- 1 => {
- in_file = flags.positionals.at(0);
- },
+ const provided_name = flags.single("name");
+ const root_source_file = switch (flags.positionals.len) {
+ 0 => null,
+ 1 => flags.positionals.at(0),
else => {
- try stderr.write("only one zig input file is accepted during build\n");
+ try stderr.print("unexpected extra parameter: {}\n", flags.positionals.at(1));
os.exit(1);
},
- }
+ };
- const basename = os.path.basename(in_file.?);
- var it = mem.split(basename, ".");
- const root_name = it.next() orelse {
- try stderr.write("file name cannot be empty\n");
- os.exit(1);
+ const root_name = if (provided_name) |n| n else blk: {
+ if (root_source_file) |file| {
+ const basename = os.path.basename(file);
+ var it = mem.split(basename, ".");
+ break :blk it.next() orelse basename;
+ } else {
+ try stderr.write("--name [name] not provided and unable to infer\n");
+ os.exit(1);
+ }
};
- const asm_a = flags.many("assembly");
- const obj_a = flags.many("object");
- if (in_file == null and (obj_a == null or obj_a.?.len == 0) and (asm_a == null or asm_a.?.len == 0)) {
+ const assembly_files = flags.many("assembly");
+ const link_objects = flags.many("object");
+ if (root_source_file == null and link_objects.len == 0 and assembly_files.len == 0) {
try stderr.write("Expected source file argument or at least one --object or --assembly argument\n");
os.exit(1);
}
- if (out_type == Module.Kind.Obj and (obj_a != null and obj_a.?.len != 0)) {
+ if (out_type == Module.Kind.Obj and link_objects.len != 0) {
try stderr.write("When building an object file, --object arguments are invalid\n");
os.exit(1);
}
- const zig_root_source_file = in_file;
-
- const full_cache_dir = os.path.resolve(allocator, ".", flags.single("cache-dir") orelse "zig-cache"[0..]) catch {
+ const rel_cache_dir = flags.single("cache-dir") orelse "zig-cache"[0..];
+ const full_cache_dir = os.path.resolve(allocator, ".", rel_cache_dir) catch {
+ try stderr.print("invalid cache dir: {}\n", rel_cache_dir);
os.exit(1);
};
defer allocator.free(full_cache_dir);
@@ -546,7 +383,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
var module = try Module.create(
allocator,
root_name,
- zig_root_source_file,
+ root_source_file,
Target.Native,
out_type,
build_mode,
@@ -561,24 +398,21 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
module.is_test = false;
- if (flags.single("linker-script")) |linker_script| {
- module.linker_script = linker_script;
- }
-
+ module.linker_script = flags.single("linker-script");
module.each_lib_rpath = flags.present("each-lib-rpath");
var clang_argv_buf = ArrayList([]const u8).init(allocator);
defer clang_argv_buf.deinit();
- if (flags.many("mllvm")) |mllvm_flags| {
- for (mllvm_flags) |mllvm| {
- try clang_argv_buf.append("-mllvm");
- try clang_argv_buf.append(mllvm);
- }
- module.llvm_argv = mllvm_flags;
- module.clang_argv = clang_argv_buf.toSliceConst();
+ const mllvm_flags = flags.many("mllvm");
+ for (mllvm_flags) |mllvm| {
+ try clang_argv_buf.append("-mllvm");
+ try clang_argv_buf.append(mllvm);
}
+ module.llvm_argv = mllvm_flags;
+ module.clang_argv = clang_argv_buf.toSliceConst();
+
module.strip = flags.present("strip");
module.is_static = flags.present("static");
@@ -610,18 +444,9 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
module.verbose_cimport = flags.present("verbose-cimport");
module.err_color = color;
-
- if (flags.many("library-path")) |lib_dirs| {
- module.lib_dirs = lib_dirs;
- }
-
- if (flags.many("framework")) |frameworks| {
- module.darwin_frameworks = frameworks;
- }
-
- if (flags.many("rpath")) |rpath_list| {
- module.rpath_list = rpath_list;
- }
+ module.lib_dirs = flags.many("library-path");
+ module.darwin_frameworks = flags.many("framework");
+ module.rpath_list = flags.many("rpath");
if (flags.single("output-h")) |output_h| {
module.out_h_path = output_h;
@@ -644,41 +469,25 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
}
module.emit_file_type = emit_type;
- if (flags.many("object")) |objects| {
- module.link_objects = objects;
- }
- if (flags.many("assembly")) |assembly_files| {
- module.assembly_files = assembly_files;
- }
+ module.link_objects = link_objects;
+ module.assembly_files = assembly_files;
try module.build();
- try module.link(flags.single("out-file") orelse null);
-
- if (flags.present("print-timing-info")) {
- // codegen_print_timing_info(g, stderr);
- }
-
- try stderr.print("building {}: {}\n", @tagName(out_type), in_file);
+ try module.link(flags.single("out-file"));
}
fn cmdBuildExe(allocator: *Allocator, args: []const []const u8) !void {
- try buildOutputType(allocator, args, Module.Kind.Exe);
+ return buildOutputType(allocator, args, Module.Kind.Exe);
}
-// cmd:build-lib ///////////////////////////////////////////////////////////////////////////////////
-
fn cmdBuildLib(allocator: *Allocator, args: []const []const u8) !void {
- try buildOutputType(allocator, args, Module.Kind.Lib);
+ return buildOutputType(allocator, args, Module.Kind.Lib);
}
-// cmd:build-obj ///////////////////////////////////////////////////////////////////////////////////
-
fn cmdBuildObj(allocator: *Allocator, args: []const []const u8) !void {
- try buildOutputType(allocator, args, Module.Kind.Obj);
+ return buildOutputType(allocator, args, Module.Kind.Obj);
}
-// cmd:fmt /////////////////////////////////////////////////////////////////////////////////////////
-
const usage_fmt =
\\usage: zig fmt [file]...
\\
@@ -735,7 +544,7 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
defer flags.deinit();
if (flags.present("help")) {
- try stderr.write(usage_fmt);
+ try stdout.write(usage_fmt);
os.exit(0);
}
@@ -863,162 +672,16 @@ fn cmdTargets(allocator: *Allocator, args: []const []const u8) !void {
}
}
-// cmd:version /////////////////////////////////////////////////////////////////////////////////////
-
fn cmdVersion(allocator: *Allocator, args: []const []const u8) !void {
try stdout.print("{}\n", std.cstr.toSliceConst(c.ZIG_VERSION_STRING));
}
-// cmd:test ////////////////////////////////////////////////////////////////////////////////////////
-
-const usage_test =
- \\usage: zig test [file]...
- \\
- \\Options:
- \\ --help Print this help and exit
- \\
- \\
-;
-
const args_test_spec = []Flag{Flag.Bool("--help")};
-fn cmdTest(allocator: *Allocator, args: []const []const u8) !void {
- var flags = try Args.parse(allocator, args_build_spec, args);
- defer flags.deinit();
-
- if (flags.present("help")) {
- try stderr.write(usage_test);
- os.exit(0);
- }
-
- if (flags.positionals.len != 1) {
- try stderr.write("expected exactly one zig source file\n");
- os.exit(1);
- }
-
- // compile the test program into the cache and run
-
- // NOTE: May be overlap with buildOutput, take the shared part out.
- try stderr.print("testing file {}\n", flags.positionals.at(0));
-}
-
-// cmd:run /////////////////////////////////////////////////////////////////////////////////////////
-
-// Run should be simple and not expose the full set of arguments provided by build-exe. If specific
-// build requirements are need, the user should `build-exe` then `run` manually.
-const usage_run =
- \\usage: zig run [file] --
- \\
- \\Options:
- \\ --help Print this help and exit
- \\
- \\
-;
-
-const args_run_spec = []Flag{Flag.Bool("--help")};
-
-fn cmdRun(allocator: *Allocator, args: []const []const u8) !void {
- var compile_args = args;
- var runtime_args: []const []const u8 = []const []const u8{};
-
- for (args) |argv, i| {
- if (mem.eql(u8, argv, "--")) {
- compile_args = args[0..i];
- runtime_args = args[i + 1 ..];
- break;
- }
- }
- var flags = try Args.parse(allocator, args_run_spec, compile_args);
- defer flags.deinit();
-
- if (flags.present("help")) {
- try stderr.write(usage_run);
- os.exit(0);
- }
-
- if (flags.positionals.len != 1) {
- try stderr.write("expected exactly one zig source file\n");
- os.exit(1);
- }
-
- try stderr.print("runtime args:\n");
- for (runtime_args) |cargs| {
- try stderr.print("{}\n", cargs);
- }
-}
-
-// cmd:translate-c /////////////////////////////////////////////////////////////////////////////////
-
-const usage_translate_c =
- \\usage: zig translate-c [file]
- \\
- \\Options:
- \\ --help Print this help and exit
- \\ --enable-timing-info Print timing diagnostics
- \\ --output [path] Output file to write generated zig file (default: stdout)
- \\
- \\
-;
-
-const args_translate_c_spec = []Flag{
- Flag.Bool("--help"),
- Flag.Bool("--enable-timing-info"),
- Flag.Arg1("--libc-include-dir"),
- Flag.Arg1("--output"),
-};
-
-fn cmdTranslateC(allocator: *Allocator, args: []const []const u8) !void {
- var flags = try Args.parse(allocator, args_translate_c_spec, args);
- defer flags.deinit();
-
- if (flags.present("help")) {
- try stderr.write(usage_translate_c);
- os.exit(0);
- }
-
- if (flags.positionals.len != 1) {
- try stderr.write("expected exactly one c source file\n");
- os.exit(1);
- }
-
- // set up codegen
-
- const zig_root_source_file = null;
-
- // NOTE: translate-c shouldn't require setting up the full codegen instance as it does in
- // the C++ compiler.
-
- // codegen_create(g);
- // codegen_set_out_name(g, null);
- // codegen_translate_c(g, flags.positional.at(0))
-
- var output_stream = stdout;
- if (flags.single("output")) |output_file| {
- var file = try os.File.openWrite(allocator, output_file);
- defer file.close();
-
- var file_stream = io.FileOutStream.init(&file);
- // TODO: Not being set correctly, still stdout
- output_stream = &file_stream.stream;
- }
-
- // ast_render(g, output_stream, g->root_import->root, 4);
- try output_stream.write("pub const example = 10;\n");
-
- if (flags.present("enable-timing-info")) {
- // codegen_print_timing_info(g, stdout);
- try stderr.write("printing timing info for translate-c\n");
- }
-}
-
-// cmd:help ////////////////////////////////////////////////////////////////////////////////////////
-
fn cmdHelp(allocator: *Allocator, args: []const []const u8) !void {
- try stderr.write(usage);
+ try stdout.write(usage);
}
-// cmd:zen /////////////////////////////////////////////////////////////////////////////////////////
-
const info_zen =
\\
\\ * Communicate intent precisely.
@@ -1040,8 +703,6 @@ fn cmdZen(allocator: *Allocator, args: []const []const u8) !void {
try stdout.write(info_zen);
}
-// cmd:internal ////////////////////////////////////////////////////////////////////////////////////
-
const usage_internal =
\\usage: zig internal [subcommand]
\\
@@ -1095,3 +756,28 @@ fn cmdInternalBuildInfo(allocator: *Allocator, args: []const []const u8) !void {
std.cstr.toSliceConst(c.ZIG_DIA_GUIDS_LIB),
);
}
+
+const CliPkg = struct {
+ name: []const u8,
+ path: []const u8,
+ children: ArrayList(*CliPkg),
+ parent: ?*CliPkg,
+
+ pub fn init(allocator: *mem.Allocator, name: []const u8, path: []const u8, parent: ?*CliPkg) !*CliPkg {
+ var pkg = try allocator.create(CliPkg{
+ .name = name,
+ .path = path,
+ .children = ArrayList(*CliPkg).init(allocator),
+ .parent = parent,
+ });
+ return pkg;
+ }
+
+ pub fn deinit(self: *CliPkg) void {
+ for (self.children.toSliceConst()) |child| {
+ child.deinit();
+ }
+ self.children.deinit();
+ }
+};
+
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index 5f02f1a832..4da46cd38c 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -103,30 +103,6 @@ pub const Module = struct {
LlvmIr,
};
- pub const CliPkg = struct {
- name: []const u8,
- path: []const u8,
- children: ArrayList(*CliPkg),
- parent: ?*CliPkg,
-
- pub fn init(allocator: *mem.Allocator, name: []const u8, path: []const u8, parent: ?*CliPkg) !*CliPkg {
- var pkg = try allocator.create(CliPkg{
- .name = name,
- .path = path,
- .children = ArrayList(*CliPkg).init(allocator),
- .parent = parent,
- });
- return pkg;
- }
-
- pub fn deinit(self: *CliPkg) void {
- for (self.children.toSliceConst()) |child| {
- child.deinit();
- }
- self.children.deinit();
- }
- };
-
pub fn create(
allocator: *mem.Allocator,
name: []const u8,
diff --git a/test/cases/bugs/1111.zig b/test/cases/bugs/1111.zig
index 51ce90af52..f62107f9a3 100644
--- a/test/cases/bugs/1111.zig
+++ b/test/cases/bugs/1111.zig
@@ -5,8 +5,8 @@ const Foo = extern enum {
test "issue 1111 fixed" {
const v = Foo.Bar;
- switch(v) {
- Foo.Bar => return,
- else => return,
+ switch (v) {
+ Foo.Bar => return,
+ else => return,
}
}
--
cgit v1.2.3
From a3f55aaf34f0a459c8aec4b35e55ad4534eaca30 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 29 Jun 2018 15:39:55 -0400
Subject: add event loop Channel abstraction
This is akin to channels in Go, except:
* implemented in userland
* they are lock-free and thread-safe
* they integrate with the userland event loop
The self hosted compiler is changed to use a channel for events,
and made to stay alive, watching files and performing builds when
things change, however the main.zig file exits after 1 build.
Note that nothing is actually built yet, it just parses the input
and then declares that the build succeeded.
Next items to do:
* add windows and macos support for std.event.Loop
* improve the event loop stop() operation
* make the event loop multiplex coroutines onto kernel threads
* watch source file for updates, and provide AST diffs
(at least list the top level declaration changes)
* top level declaration analysis
---
src-self-hosted/main.zig | 37 +++++-
src-self-hosted/module.zig | 135 ++++++++++++++++------
std/atomic/queue_mpsc.zig | 2 +-
std/event.zig | 279 ++++++++++++++++++++++++++++++++++++++++++++-
std/fmt/index.zig | 3 +
std/heap.zig | 1 +
6 files changed, 416 insertions(+), 41 deletions(-)
(limited to 'src-self-hosted/module.zig')
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 6dabddaefb..d17fc94c82 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -1,6 +1,7 @@
const std = @import("std");
const builtin = @import("builtin");
+const event = std.event;
const os = std.os;
const io = std.io;
const mem = std.mem;
@@ -43,6 +44,9 @@ const Command = struct {
};
pub fn main() !void {
+ // This allocator needs to be thread-safe because we use it for the event.Loop
+ // which multiplexes coroutines onto kernel threads.
+ // libc allocator is guaranteed to have this property.
const allocator = std.heap.c_allocator;
var stdout_file = try std.io.getStdOut();
@@ -380,8 +384,10 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
const zig_lib_dir = introspect.resolveZigLibDir(allocator) catch os.exit(1);
defer allocator.free(zig_lib_dir);
+ var loop = try event.Loop.init(allocator);
+
var module = try Module.create(
- allocator,
+ &loop,
root_name,
root_source_file,
Target.Native,
@@ -471,9 +477,35 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
module.emit_file_type = emit_type;
module.link_objects = link_objects;
module.assembly_files = assembly_files;
+ module.link_out_file = flags.single("out-file");
try module.build();
- try module.link(flags.single("out-file"));
+ const process_build_events_handle = try async processBuildEvents(module, true);
+ defer cancel process_build_events_handle;
+ loop.run();
+}
+
+async fn processBuildEvents(module: *Module, watch: bool) void {
+ while (watch) {
+ // TODO directly awaiting async should guarantee memory allocation elision
+ const build_event = await (async module.events.get() catch unreachable);
+
+ switch (build_event) {
+ Module.Event.Ok => {
+ std.debug.warn("Build succeeded\n");
+ // for now we stop after 1
+ module.loop.stop();
+ return;
+ },
+ Module.Event.Error => |err| {
+ std.debug.warn("build failed: {}\n", @errorName(err));
+ @panic("TODO error return trace");
+ },
+ Module.Event.Fail => |errs| {
+ @panic("TODO print compile error messages");
+ },
+ }
+ }
}
fn cmdBuildExe(allocator: *Allocator, args: []const []const u8) !void {
@@ -780,4 +812,3 @@ const CliPkg = struct {
self.children.deinit();
}
};
-
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index 4da46cd38c..4fac760790 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -11,9 +11,11 @@ const warn = std.debug.warn;
const Token = std.zig.Token;
const ArrayList = std.ArrayList;
const errmsg = @import("errmsg.zig");
+const ast = std.zig.ast;
+const event = std.event;
pub const Module = struct {
- allocator: *mem.Allocator,
+ loop: *event.Loop,
name: Buffer,
root_src_path: ?[]const u8,
module: llvm.ModuleRef,
@@ -76,6 +78,50 @@ pub const Module = struct {
kind: Kind,
+ link_out_file: ?[]const u8,
+ events: *event.Channel(Event),
+
+ // TODO handle some of these earlier and report them in a way other than error codes
+ pub const BuildError = error{
+ OutOfMemory,
+ EndOfStream,
+ BadFd,
+ Io,
+ IsDir,
+ Unexpected,
+ SystemResources,
+ SharingViolation,
+ PathAlreadyExists,
+ FileNotFound,
+ AccessDenied,
+ PipeBusy,
+ FileTooBig,
+ SymLinkLoop,
+ ProcessFdQuotaExceeded,
+ NameTooLong,
+ SystemFdQuotaExceeded,
+ NoDevice,
+ PathNotFound,
+ NoSpaceLeft,
+ NotDir,
+ FileSystem,
+ OperationAborted,
+ IoPending,
+ BrokenPipe,
+ WouldBlock,
+ FileClosed,
+ DestinationAddressRequired,
+ DiskQuota,
+ InputOutput,
+ NoStdHandles,
+ };
+
+ pub const Event = union(enum) {
+ Ok,
+ Fail: []errmsg.Msg,
+ Error: BuildError,
+ };
+
pub const DarwinVersionMin = union(enum) {
None,
MacOS: []const u8,
@@ -104,7 +150,7 @@ pub const Module = struct {
};
pub fn create(
- allocator: *mem.Allocator,
+ loop: *event.Loop,
name: []const u8,
root_src_path: ?[]const u8,
target: *const Target,
@@ -113,7 +159,7 @@ pub const Module = struct {
zig_lib_dir: []const u8,
cache_dir: []const u8,
) !*Module {
- var name_buffer = try Buffer.init(allocator, name);
+ var name_buffer = try Buffer.init(loop.allocator, name);
errdefer name_buffer.deinit();
const context = c.LLVMContextCreate() orelse return error.OutOfMemory;
@@ -125,8 +171,12 @@ pub const Module = struct {
const builder = c.LLVMCreateBuilderInContext(context) orelse return error.OutOfMemory;
errdefer c.LLVMDisposeBuilder(builder);
- const module_ptr = try allocator.create(Module{
- .allocator = allocator,
+ const events = try event.Channel(Event).create(loop, 0);
+ errdefer events.destroy();
+
+ return loop.allocator.create(Module{
+ .loop = loop,
+ .events = events,
.name = name_buffer,
.root_src_path = root_src_path,
.module = module,
@@ -171,7 +221,7 @@ pub const Module = struct {
.link_objects = [][]const u8{},
.windows_subsystem_windows = false,
.windows_subsystem_console = false,
- .link_libs_list = ArrayList(*LinkLib).init(allocator),
+ .link_libs_list = ArrayList(*LinkLib).init(loop.allocator),
.libc_link_lib = null,
.err_color = errmsg.Color.Auto,
.darwin_frameworks = [][]const u8{},
@@ -179,9 +229,8 @@ pub const Module = struct {
.test_filters = [][]const u8{},
.test_name_prefix = null,
.emit_file_type = Emit.Binary,
+ .link_out_file = null,
});
- errdefer allocator.destroy(module_ptr);
- return module_ptr;
}
fn dump(self: *Module) void {
@@ -189,58 +238,70 @@ pub const Module = struct {
}
pub fn destroy(self: *Module) void {
+ self.events.destroy();
c.LLVMDisposeBuilder(self.builder);
c.LLVMDisposeModule(self.module);
c.LLVMContextDispose(self.context);
self.name.deinit();
- self.allocator.destroy(self);
+ self.a().destroy(self);
}
pub fn build(self: *Module) !void {
if (self.llvm_argv.len != 0) {
- var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.allocator, [][]const []const u8{
+ var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.a(), [][]const []const u8{
[][]const u8{"zig (LLVM option parsing)"},
self.llvm_argv,
});
defer c_compatible_args.deinit();
+ // TODO this sets global state
c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr);
}
+ _ = try async self.buildAsync();
+ }
+
+ async fn buildAsync(self: *Module) void {
+ while (true) {
+ // TODO directly awaiting async should guarantee memory allocation elision
+ // TODO also async before suspending should guarantee memory allocation elision
+ (await (async self.addRootSrc() catch unreachable)) catch |err| {
+ await (async self.events.put(Event{ .Error = err }) catch unreachable);
+ return;
+ };
+ await (async self.events.put(Event.Ok) catch unreachable);
+ }
+ }
+
+ async fn addRootSrc(self: *Module) !void {
const root_src_path = self.root_src_path orelse @panic("TODO handle null root src path");
- const root_src_real_path = os.path.real(self.allocator, root_src_path) catch |err| {
+ const root_src_real_path = os.path.real(self.a(), root_src_path) catch |err| {
try printError("unable to get real path '{}': {}", root_src_path, err);
return err;
};
- errdefer self.allocator.free(root_src_real_path);
+ errdefer self.a().free(root_src_real_path);
- const source_code = io.readFileAlloc(self.allocator, root_src_real_path) catch |err| {
+ const source_code = io.readFileAlloc(self.a(), root_src_real_path) catch |err| {
try printError("unable to open '{}': {}", root_src_real_path, err);
return err;
};
- errdefer self.allocator.free(source_code);
-
- warn("====input:====\n");
-
- warn("{}", source_code);
+ errdefer self.a().free(source_code);
- warn("====parse:====\n");
-
- var tree = try std.zig.parse(self.allocator, source_code);
+ var tree = try std.zig.parse(self.a(), source_code);
defer tree.deinit();
- var stderr_file = try std.io.getStdErr();
- var stderr_file_out_stream = std.io.FileOutStream.init(&stderr_file);
- const out_stream = &stderr_file_out_stream.stream;
-
- warn("====fmt:====\n");
- _ = try std.zig.render(self.allocator, out_stream, &tree);
-
- warn("====ir:====\n");
- warn("TODO\n\n");
-
- warn("====llvm ir:====\n");
- self.dump();
+ //var it = tree.root_node.decls.iterator();
+ //while (it.next()) |decl_ptr| {
+ // const decl = decl_ptr.*;
+ // switch (decl.id) {
+ // ast.Node.Comptime => @panic("TODO"),
+ // ast.Node.VarDecl => @panic("TODO"),
+ // ast.Node.UseDecl => @panic("TODO"),
+ // ast.Node.FnDef => @panic("TODO"),
+ // ast.Node.TestDecl => @panic("TODO"),
+ // else => unreachable,
+ // }
+ //}
}
pub fn link(self: *Module, out_file: ?[]const u8) !void {
@@ -263,11 +324,11 @@ pub const Module = struct {
}
}
- const link_lib = try self.allocator.create(LinkLib{
+ const link_lib = try self.a().create(LinkLib{
.name = name,
.path = null,
.provided_explicitly = provided_explicitly,
- .symbols = ArrayList([]u8).init(self.allocator),
+ .symbols = ArrayList([]u8).init(self.a()),
});
try self.link_libs_list.append(link_lib);
if (is_libc) {
@@ -275,6 +336,10 @@ pub const Module = struct {
}
return link_lib;
}
+
+ fn a(self: Module) *mem.Allocator {
+ return self.loop.allocator;
+ }
};
fn printError(comptime format: []const u8, args: ...) !void {
diff --git a/std/atomic/queue_mpsc.zig b/std/atomic/queue_mpsc.zig
index 66eb4573df..8030565d7a 100644
--- a/std/atomic/queue_mpsc.zig
+++ b/std/atomic/queue_mpsc.zig
@@ -1,4 +1,4 @@
-const std = @import("std");
+const std = @import("../index.zig");
const assert = std.debug.assert;
const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
diff --git a/std/event.zig b/std/event.zig
index 0821c789b7..7f823bc732 100644
--- a/std/event.zig
+++ b/std/event.zig
@@ -4,6 +4,8 @@ const assert = std.debug.assert;
const event = this;
const mem = std.mem;
const posix = std.os.posix;
+const AtomicRmwOp = builtin.AtomicRmwOp;
+const AtomicOrder = builtin.AtomicOrder;
pub const TcpServer = struct {
handleRequestFn: async<*mem.Allocator> fn (*TcpServer, *const std.net.Address, *const std.os.File) void,
@@ -95,16 +97,29 @@ pub const Loop = struct {
allocator: *mem.Allocator,
epollfd: i32,
keep_running: bool,
+ next_tick_queue: std.atomic.QueueMpsc(promise),
- fn init(allocator: *mem.Allocator) !Loop {
+ pub const NextTickNode = std.atomic.QueueMpsc(promise).Node;
+
+ /// The allocator must be thread-safe because we use it for multiplexing
+ /// coroutines onto kernel threads.
+ pub fn init(allocator: *mem.Allocator) !Loop {
const epollfd = try std.os.linuxEpollCreate(std.os.linux.EPOLL_CLOEXEC);
+ errdefer std.os.close(epollfd);
+
return Loop{
.keep_running = true,
.allocator = allocator,
.epollfd = epollfd,
+ .next_tick_queue = std.atomic.QueueMpsc(promise).init(),
};
}
+ /// must call stop before deinit
+ pub fn deinit(self: *Loop) void {
+ std.os.close(self.epollfd);
+ }
+
pub fn addFd(self: *Loop, fd: i32, prom: promise) !void {
var ev = std.os.linux.epoll_event{
.events = std.os.linux.EPOLLIN | std.os.linux.EPOLLOUT | std.os.linux.EPOLLET,
@@ -126,11 +141,21 @@ pub const Loop = struct {
pub fn stop(self: *Loop) void {
// TODO make atomic
self.keep_running = false;
- // TODO activate an fd in the epoll set
+ // TODO activate an fd in the epoll set which should cancel all the promises
+ }
+
+ /// bring your own linked list node. this means it can't fail.
+ pub fn onNextTick(self: *Loop, node: *NextTickNode) void {
+ self.next_tick_queue.put(node);
}
pub fn run(self: *Loop) void {
while (self.keep_running) {
+ // TODO multiplex the next tick queue and the epoll event results onto a thread pool
+ while (self.next_tick_queue.get()) |node| {
+ resume node.data;
+ }
+ if (!self.keep_running) break;
var events: [16]std.os.linux.epoll_event = undefined;
const count = std.os.linuxEpollWait(self.epollfd, events[0..], -1);
for (events[0..count]) |ev| {
@@ -141,6 +166,215 @@ pub const Loop = struct {
}
};
+/// many producer, many consumer, thread-safe, lock-free, runtime configurable buffer size
+/// when buffer is empty, consumers suspend and are resumed by producers
+/// when buffer is full, producers suspend and are resumed by consumers
+pub fn Channel(comptime T: type) type {
+ return struct {
+ loop: *Loop,
+
+ getters: std.atomic.QueueMpsc(GetNode),
+ putters: std.atomic.QueueMpsc(PutNode),
+ get_count: usize,
+ put_count: usize,
+ dispatch_lock: u8, // TODO make this a bool
+ need_dispatch: u8, // TODO make this a bool
+
+ // simple fixed size ring buffer
+ buffer_nodes: []T,
+ buffer_index: usize,
+ buffer_len: usize,
+
+ const SelfChannel = this;
+ const GetNode = struct {
+ ptr: *T,
+ tick_node: *Loop.NextTickNode,
+ };
+ const PutNode = struct {
+ data: T,
+ tick_node: *Loop.NextTickNode,
+ };
+
+ /// call destroy when done
+ pub fn create(loop: *Loop, capacity: usize) !*SelfChannel {
+ const buffer_nodes = try loop.allocator.alloc(T, capacity);
+ errdefer loop.allocator.free(buffer_nodes);
+
+ const self = try loop.allocator.create(SelfChannel{
+ .loop = loop,
+ .buffer_len = 0,
+ .buffer_nodes = buffer_nodes,
+ .buffer_index = 0,
+ .dispatch_lock = 0,
+ .need_dispatch = 0,
+ .getters = std.atomic.QueueMpsc(GetNode).init(),
+ .putters = std.atomic.QueueMpsc(PutNode).init(),
+ .get_count = 0,
+ .put_count = 0,
+ });
+ errdefer loop.allocator.destroy(self);
+
+ return self;
+ }
+
+ /// must be called when all calls to put and get have suspended and no more calls occur
+ pub fn destroy(self: *SelfChannel) void {
+ while (self.getters.get()) |get_node| {
+ cancel get_node.data.tick_node.data;
+ }
+ while (self.putters.get()) |put_node| {
+ cancel put_node.data.tick_node.data;
+ }
+ self.loop.allocator.free(self.buffer_nodes);
+ self.loop.allocator.destroy(self);
+ }
+
+ /// puts a data item in the channel. The promise completes when the value has been added to the
+ /// buffer, or in the case of a zero size buffer, when the item has been retrieved by a getter.
+ pub async fn put(self: *SelfChannel, data: T) void {
+ // TODO should be able to group memory allocation failure before first suspend point
+ // so that the async invocation catches it
+ var dispatch_tick_node_ptr: *Loop.NextTickNode = undefined;
+ _ = async self.dispatch(&dispatch_tick_node_ptr) catch unreachable;
+
+ suspend |handle| {
+ var my_tick_node = Loop.NextTickNode{
+ .next = undefined,
+ .data = handle,
+ };
+ var queue_node = std.atomic.QueueMpsc(PutNode).Node{
+ .data = PutNode{
+ .tick_node = &my_tick_node,
+ .data = data,
+ },
+ .next = undefined,
+ };
+ self.putters.put(&queue_node);
+ _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+
+ self.loop.onNextTick(dispatch_tick_node_ptr);
+ }
+ }
+
+ /// await this function to get an item from the channel. If the buffer is empty, the promise will
+ /// complete when the next item is put in the channel.
+ pub async fn get(self: *SelfChannel) T {
+ // TODO should be able to group memory allocation failure before first suspend point
+ // so that the async invocation catches it
+ var dispatch_tick_node_ptr: *Loop.NextTickNode = undefined;
+ _ = async self.dispatch(&dispatch_tick_node_ptr) catch unreachable;
+
+ // TODO integrate this function with named return values
+ // so we can get rid of this extra result copy
+ var result: T = undefined;
+ var debug_handle: usize = undefined;
+ suspend |handle| {
+ debug_handle = @ptrToInt(handle);
+ var my_tick_node = Loop.NextTickNode{
+ .next = undefined,
+ .data = handle,
+ };
+ var queue_node = std.atomic.QueueMpsc(GetNode).Node{
+ .data = GetNode{
+ .ptr = &result,
+ .tick_node = &my_tick_node,
+ },
+ .next = undefined,
+ };
+ self.getters.put(&queue_node);
+ _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+
+ self.loop.onNextTick(dispatch_tick_node_ptr);
+ }
+ return result;
+ }
+
+ async fn dispatch(self: *SelfChannel, tick_node_ptr: **Loop.NextTickNode) void {
+ // resumed by onNextTick
+ suspend |handle| {
+ var tick_node = Loop.NextTickNode{
+ .data = handle,
+ .next = undefined,
+ };
+ tick_node_ptr.* = &tick_node;
+ }
+
+ // set the "need dispatch" flag
+ _ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+
+ lock: while (true) {
+ // set the lock flag
+ const prev_lock = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ if (prev_lock != 0) return;
+
+ // clear the need_dispatch flag since we're about to do it
+ _ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+
+ while (true) {
+ one_dispatch: {
+ // later we correct these extra subtractions
+ var get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ var put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+
+ // transfer self.buffer to self.getters
+ while (self.buffer_len != 0) {
+ if (get_count == 0) break :one_dispatch;
+
+ const get_node = &self.getters.get().?.data;
+ get_node.ptr.* = self.buffer_nodes[self.buffer_index -% self.buffer_len];
+ self.loop.onNextTick(get_node.tick_node);
+ self.buffer_len -= 1;
+
+ get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ }
+
+ // direct transfer self.putters to self.getters
+ while (get_count != 0 and put_count != 0) {
+ const get_node = &self.getters.get().?.data;
+ const put_node = &self.putters.get().?.data;
+
+ get_node.ptr.* = put_node.data;
+ self.loop.onNextTick(get_node.tick_node);
+ self.loop.onNextTick(put_node.tick_node);
+
+ get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ }
+
+ // transfer self.putters to self.buffer
+ while (self.buffer_len != self.buffer_nodes.len and put_count != 0) {
+ const put_node = &self.putters.get().?.data;
+
+ self.buffer_nodes[self.buffer_index] = put_node.data;
+ self.loop.onNextTick(put_node.tick_node);
+ self.buffer_index +%= 1;
+ self.buffer_len += 1;
+
+ put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ }
+ }
+
+ // undo the extra subtractions
+ _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+
+ // clear need-dispatch flag
+ const need_dispatch = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ if (need_dispatch != 0) continue;
+
+ const my_lock = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ assert(my_lock != 0);
+
+ // we have to check again now that we unlocked
+ if (@atomicLoad(u8, &self.need_dispatch, AtomicOrder.SeqCst) != 0) continue :lock;
+
+ return;
+ }
+ }
+ }
+ };
+}
+
pub async fn connect(loop: *Loop, _address: *const std.net.Address) !std.os.File {
var address = _address.*; // TODO https://github.com/ziglang/zig/issues/733
@@ -199,6 +433,7 @@ test "listen on a port, send bytes, receive bytes" {
defer cancel p;
loop.run();
}
+
async fn doAsyncTest(loop: *Loop, address: *const std.net.Address) void {
errdefer @panic("test failure");
@@ -211,3 +446,43 @@ async fn doAsyncTest(loop: *Loop, address: *const std.net.Address) void {
assert(mem.eql(u8, msg, "hello from server\n"));
loop.stop();
}
+
+test "std.event.Channel" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const allocator = &da.allocator;
+
+ var loop = try Loop.init(allocator);
+ defer loop.deinit();
+
+ const channel = try Channel(i32).create(&loop, 0);
+ defer channel.destroy();
+
+ const handle = try async testChannelGetter(&loop, channel);
+ defer cancel handle;
+
+ const putter = try async testChannelPutter(channel);
+ defer cancel putter;
+
+ loop.run();
+}
+
+async fn testChannelGetter(loop: *Loop, channel: *Channel(i32)) void {
+ errdefer @panic("test failed");
+
+ const value1_promise = try async channel.get();
+ const value1 = await value1_promise;
+ assert(value1 == 1234);
+
+ const value2_promise = try async channel.get();
+ const value2 = await value2_promise;
+ assert(value2 == 4567);
+
+ loop.stop();
+}
+
+async fn testChannelPutter(channel: *Channel(i32)) void {
+ await (async channel.put(1234) catch @panic("out of memory"));
+ await (async channel.put(4567) catch @panic("out of memory"));
+}
diff --git a/std/fmt/index.zig b/std/fmt/index.zig
index bf12e86fef..c3c17f5322 100644
--- a/std/fmt/index.zig
+++ b/std/fmt/index.zig
@@ -130,6 +130,9 @@ pub fn formatType(
try output(context, "error.");
return output(context, @errorName(value));
},
+ builtin.TypeId.Promise => {
+ return format(context, Errors, output, "promise@{x}", @ptrToInt(value));
+ },
builtin.TypeId.Pointer => |ptr_info| switch (ptr_info.size) {
builtin.TypeInfo.Pointer.Size.One => switch (@typeInfo(ptr_info.child)) {
builtin.TypeId.Array => |info| {
diff --git a/std/heap.zig b/std/heap.zig
index 41d7802fdd..2e02733da1 100644
--- a/std/heap.zig
+++ b/std/heap.zig
@@ -38,6 +38,7 @@ fn cFree(self: *Allocator, old_mem: []u8) void {
}
/// This allocator makes a syscall directly for every allocation and free.
+/// TODO make this thread-safe. The windows implementation will need some atomics.
pub const DirectAllocator = struct {
allocator: Allocator,
heap_handle: ?HeapHandle,
--
cgit v1.2.3
From 2da999372a1f7848af59b07fe14ef025354f4c51 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 2 Jul 2018 15:25:23 -0400
Subject: add another BuildError code
---
src-self-hosted/module.zig | 1 +
1 file changed, 1 insertion(+)
(limited to 'src-self-hosted/module.zig')
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index 4fac760790..c984610257 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -114,6 +114,7 @@ pub const Module = struct {
DiskQuota,
InputOutput,
NoStdHandles,
+ Overflow,
};
pub const Event = union(enum) {
--
cgit v1.2.3
From 06e8c2e5194439ce5b66f18fcf60108604449957 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 2 Jul 2018 17:55:32 -0400
Subject: fix stage2 macos build
See #1173
---
src-self-hosted/module.zig | 1 +
1 file changed, 1 insertion(+)
(limited to 'src-self-hosted/module.zig')
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index c984610257..cf27c826c8 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -115,6 +115,7 @@ pub const Module = struct {
InputOutput,
NoStdHandles,
Overflow,
+ NotSupported,
};
pub const Event = union(enum) {
--
cgit v1.2.3
From eb326e15530dd6dca4ccbe7dbfde7bf048de813e Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 5 Jul 2018 15:09:02 -0400
Subject: M:N threading
* add std.atomic.QueueMpsc.isEmpty
* make std.debug.global_allocator thread-safe
* std.event.Loop: now you have to choose between
- initSingleThreaded
- initMultiThreaded
* std.event.Loop multiplexes coroutines onto kernel threads
* Remove std.event.Loop.stop. Instead the event loop run() function
returns once there are no pending coroutines.
* fix crash in ir.cpp for calling methods under some conditions
* small progress self-hosted compiler, analyzing top level declarations
* Introduce std.event.Lock for synchronizing coroutines
* introduce std.event.Locked(T) for data that only 1 coroutine should
modify at once.
* make the self hosted compiler use multi threaded event loop
* make std.heap.DirectAllocator thread-safe
See #174
TODO:
* call sched_getaffinity instead of hard coding thread pool size 4
* support for Windows and MacOS
* #1194
* #1197
---
src-self-hosted/main.zig | 5 +-
src-self-hosted/module.zig | 257 ++++++++++++++++++--
src/ir.cpp | 2 +-
std/atomic/queue_mpsc.zig | 17 ++
std/debug/index.zig | 7 +-
std/event.zig | 580 +++++++++++++++++++++++++++++++++++++++------
std/heap.zig | 30 +--
std/mem.zig | 2 +-
std/os/index.zig | 39 ++-
std/os/linux/index.zig | 8 +
10 files changed, 833 insertions(+), 114 deletions(-)
(limited to 'src-self-hosted/module.zig')
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index d17fc94c82..fe94a4460a 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -384,7 +384,8 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
const zig_lib_dir = introspect.resolveZigLibDir(allocator) catch os.exit(1);
defer allocator.free(zig_lib_dir);
- var loop = try event.Loop.init(allocator);
+ var loop: event.Loop = undefined;
+ try loop.initMultiThreaded(allocator);
var module = try Module.create(
&loop,
@@ -493,8 +494,6 @@ async fn processBuildEvents(module: *Module, watch: bool) void {
switch (build_event) {
Module.Event.Ok => {
std.debug.warn("Build succeeded\n");
- // for now we stop after 1
- module.loop.stop();
return;
},
Module.Event.Error => |err| {
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index cf27c826c8..5ce1a7965a 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -2,6 +2,7 @@ const std = @import("std");
const os = std.os;
const io = std.io;
const mem = std.mem;
+const Allocator = mem.Allocator;
const Buffer = std.Buffer;
const llvm = @import("llvm.zig");
const c = @import("c.zig");
@@ -13,6 +14,7 @@ const ArrayList = std.ArrayList;
const errmsg = @import("errmsg.zig");
const ast = std.zig.ast;
const event = std.event;
+const assert = std.debug.assert;
pub const Module = struct {
loop: *event.Loop,
@@ -81,6 +83,8 @@ pub const Module = struct {
link_out_file: ?[]const u8,
events: *event.Channel(Event),
+ exported_symbol_names: event.Locked(Decl.Table),
+
// TODO handle some of these earlier and report them in a way other than error codes
pub const BuildError = error{
OutOfMemory,
@@ -232,6 +236,7 @@ pub const Module = struct {
.test_name_prefix = null,
.emit_file_type = Emit.Binary,
.link_out_file = null,
+ .exported_symbol_names = event.Locked(Decl.Table).init(loop, Decl.Table.init(loop.allocator)),
});
}
@@ -272,38 +277,91 @@ pub const Module = struct {
return;
};
await (async self.events.put(Event.Ok) catch unreachable);
+ // for now we stop after 1
+ return;
}
}
async fn addRootSrc(self: *Module) !void {
const root_src_path = self.root_src_path orelse @panic("TODO handle null root src path");
+ // TODO async/await os.path.real
const root_src_real_path = os.path.real(self.a(), root_src_path) catch |err| {
try printError("unable to get real path '{}': {}", root_src_path, err);
return err;
};
errdefer self.a().free(root_src_real_path);
+ // TODO async/await readFileAlloc()
const source_code = io.readFileAlloc(self.a(), root_src_real_path) catch |err| {
try printError("unable to open '{}': {}", root_src_real_path, err);
return err;
};
errdefer self.a().free(source_code);
- var tree = try std.zig.parse(self.a(), source_code);
- defer tree.deinit();
-
- //var it = tree.root_node.decls.iterator();
- //while (it.next()) |decl_ptr| {
- // const decl = decl_ptr.*;
- // switch (decl.id) {
- // ast.Node.Comptime => @panic("TODO"),
- // ast.Node.VarDecl => @panic("TODO"),
- // ast.Node.UseDecl => @panic("TODO"),
- // ast.Node.FnDef => @panic("TODO"),
- // ast.Node.TestDecl => @panic("TODO"),
- // else => unreachable,
- // }
- //}
+ var parsed_file = ParsedFile{
+ .tree = try std.zig.parse(self.a(), source_code),
+ .realpath = root_src_real_path,
+ };
+ errdefer parsed_file.tree.deinit();
+
+ const tree = &parsed_file.tree;
+
+ // create empty struct for it
+ const decls = try Scope.Decls.create(self.a(), null);
+ errdefer decls.destroy();
+
+ var it = tree.root_node.decls.iterator(0);
+ while (it.next()) |decl_ptr| {
+ const decl = decl_ptr.*;
+ switch (decl.id) {
+ ast.Node.Id.Comptime => @panic("TODO"),
+ ast.Node.Id.VarDecl => @panic("TODO"),
+ ast.Node.Id.FnProto => {
+ const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
+
+ const name = if (fn_proto.name_token) |name_token| tree.tokenSlice(name_token) else {
+ @panic("TODO add compile error");
+ //try self.addCompileError(
+ // &parsed_file,
+ // fn_proto.fn_token,
+ // fn_proto.fn_token + 1,
+ // "missing function name",
+ //);
+ continue;
+ };
+
+ const fn_decl = try self.a().create(Decl.Fn{
+ .base = Decl{
+ .id = Decl.Id.Fn,
+ .name = name,
+ .visib = parseVisibToken(tree, fn_proto.visib_token),
+ .resolution = Decl.Resolution.Unresolved,
+ },
+ .value = Decl.Fn.Val{ .Unresolved = {} },
+ .fn_proto = fn_proto,
+ });
+ errdefer self.a().destroy(fn_decl);
+
+ // TODO make this parallel
+ try await try async self.addTopLevelDecl(tree, &fn_decl.base);
+ },
+ ast.Node.Id.TestDecl => @panic("TODO"),
+ else => unreachable,
+ }
+ }
+ }
+
+ async fn addTopLevelDecl(self: *Module, tree: *ast.Tree, decl: *Decl) !void {
+ const is_export = decl.isExported(tree);
+
+ {
+ const exported_symbol_names = await try async self.exported_symbol_names.acquire();
+ defer exported_symbol_names.release();
+
+ if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| {
+ @panic("TODO report compile error");
+ }
+ }
}
pub fn link(self: *Module, out_file: ?[]const u8) !void {
@@ -350,3 +408,172 @@ fn printError(comptime format: []const u8, args: ...) !void {
const out_stream = &stderr_file_out_stream.stream;
try out_stream.print(format, args);
}
+
+fn parseVisibToken(tree: *ast.Tree, optional_token_index: ?ast.TokenIndex) Visib {
+ if (optional_token_index) |token_index| {
+ const token = tree.tokens.at(token_index);
+ assert(token.id == Token.Id.Keyword_pub);
+ return Visib.Pub;
+ } else {
+ return Visib.Private;
+ }
+}
+
+pub const Scope = struct {
+ id: Id,
+ parent: ?*Scope,
+
+ pub const Id = enum {
+ Decls,
+ Block,
+ };
+
+ pub const Decls = struct {
+ base: Scope,
+ table: Decl.Table,
+
+ pub fn create(a: *Allocator, parent: ?*Scope) !*Decls {
+ const self = try a.create(Decls{
+ .base = Scope{
+ .id = Id.Decls,
+ .parent = parent,
+ },
+ .table = undefined,
+ });
+ errdefer a.destroy(self);
+
+ self.table = Decl.Table.init(a);
+ errdefer self.table.deinit();
+
+ return self;
+ }
+
+ pub fn destroy(self: *Decls) void {
+ self.table.deinit();
+ self.table.allocator.destroy(self);
+ self.* = undefined;
+ }
+ };
+
+ pub const Block = struct {
+ base: Scope,
+ };
+};
+
+pub const Visib = enum {
+ Private,
+ Pub,
+};
+
+pub const Decl = struct {
+ id: Id,
+ name: []const u8,
+ visib: Visib,
+ resolution: Resolution,
+
+ pub const Table = std.HashMap([]const u8, *Decl, mem.hash_slice_u8, mem.eql_slice_u8);
+
+ pub fn isExported(base: *const Decl, tree: *ast.Tree) bool {
+ switch (base.id) {
+ Id.Fn => {
+ const fn_decl = @fieldParentPtr(Fn, "base", base);
+ return fn_decl.isExported(tree);
+ },
+ else => return false,
+ }
+ }
+
+ pub const Resolution = enum {
+ Unresolved,
+ InProgress,
+ Invalid,
+ Ok,
+ };
+
+ pub const Id = enum {
+ Var,
+ Fn,
+ CompTime,
+ };
+
+ pub const Var = struct {
+ base: Decl,
+ };
+
+ pub const Fn = struct {
+ base: Decl,
+ value: Val,
+ fn_proto: *const ast.Node.FnProto,
+
+ // TODO https://github.com/ziglang/zig/issues/683 and then make this anonymous
+ pub const Val = union {
+ Unresolved: void,
+ Ok: *Value.Fn,
+ };
+
+ pub fn externLibName(self: Fn, tree: *ast.Tree) ?[]const u8 {
+ return if (self.fn_proto.extern_export_inline_token) |tok_index| x: {
+ const token = tree.tokens.at(tok_index);
+ break :x switch (token.id) {
+ Token.Id.Extern => tree.tokenSlicePtr(token),
+ else => null,
+ };
+ } else null;
+ }
+
+ pub fn isExported(self: Fn, tree: *ast.Tree) bool {
+ if (self.fn_proto.extern_export_inline_token) |tok_index| {
+ const token = tree.tokens.at(tok_index);
+ return token.id == Token.Id.Keyword_export;
+ } else {
+ return false;
+ }
+ }
+ };
+
+ pub const CompTime = struct {
+ base: Decl,
+ };
+};
+
+pub const Value = struct {
+ pub const Fn = struct {};
+};
+
+pub const Type = struct {
+ id: Id,
+
+ pub const Id = enum {
+ Type,
+ Void,
+ Bool,
+ NoReturn,
+ Int,
+ Float,
+ Pointer,
+ Array,
+ Struct,
+ ComptimeFloat,
+ ComptimeInt,
+ Undefined,
+ Null,
+ Optional,
+ ErrorUnion,
+ ErrorSet,
+ Enum,
+ Union,
+ Fn,
+ Opaque,
+ Promise,
+ };
+
+ pub const Struct = struct {
+ base: Type,
+ decls: *Scope.Decls,
+ };
+};
+
+pub const ParsedFile = struct {
+ tree: ast.Tree,
+ realpath: []const u8,
+};
diff --git a/src/ir.cpp b/src/ir.cpp
index 98b1bd85ad..3fc8306339 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -13278,7 +13278,7 @@ static TypeTableEntry *ir_analyze_instruction_call(IrAnalyze *ira, IrInstruction
FnTableEntry *fn_table_entry = fn_ref->value.data.x_bound_fn.fn;
IrInstruction *first_arg_ptr = fn_ref->value.data.x_bound_fn.first_arg;
return ir_analyze_fn_call(ira, call_instruction, fn_table_entry, fn_table_entry->type_entry,
- nullptr, first_arg_ptr, is_comptime, call_instruction->fn_inline);
+ fn_ref, first_arg_ptr, is_comptime, call_instruction->fn_inline);
} else {
ir_add_error_node(ira, fn_ref->source_node,
buf_sprintf("type '%s' not a function", buf_ptr(&fn_ref->value.type->name)));
diff --git a/std/atomic/queue_mpsc.zig b/std/atomic/queue_mpsc.zig
index 8030565d7a..bc0a94258b 100644
--- a/std/atomic/queue_mpsc.zig
+++ b/std/atomic/queue_mpsc.zig
@@ -15,6 +15,8 @@ pub fn QueueMpsc(comptime T: type) type {
pub const Node = std.atomic.Stack(T).Node;
+ /// Not thread-safe. The call to init() must complete before any other functions are called.
+ /// No deinitialization required.
pub fn init() Self {
return Self{
.inboxes = []std.atomic.Stack(T){
@@ -26,12 +28,15 @@ pub fn QueueMpsc(comptime T: type) type {
};
}
+ /// Fully thread-safe. put() may be called from any thread at any time.
pub fn put(self: *Self, node: *Node) void {
const inbox_index = @atomicLoad(usize, &self.inbox_index, AtomicOrder.SeqCst);
const inbox = &self.inboxes[inbox_index];
inbox.push(node);
}
+ /// Must be called by only 1 consumer at a time. Every call to get() and isEmpty() must complete before
+ /// the next call to get().
pub fn get(self: *Self) ?*Node {
if (self.outbox.pop()) |node| {
return node;
@@ -43,6 +48,18 @@ pub fn QueueMpsc(comptime T: type) type {
}
return self.outbox.pop();
}
+
+ /// Must be called by only 1 consumer at a time. Every call to get() and isEmpty() must complete before
+ /// the next call to isEmpty().
+ pub fn isEmpty(self: *Self) bool {
+ if (!self.outbox.isEmpty()) return false;
+ const prev_inbox_index = @atomicRmw(usize, &self.inbox_index, AtomicRmwOp.Xor, 0x1, AtomicOrder.SeqCst);
+ const prev_inbox = &self.inboxes[prev_inbox_index];
+ while (prev_inbox.pop()) |node| {
+ self.outbox.push(node);
+ }
+ return self.outbox.isEmpty();
+ }
};
}
diff --git a/std/debug/index.zig b/std/debug/index.zig
index 57b2dfc300..a5e1c313f0 100644
--- a/std/debug/index.zig
+++ b/std/debug/index.zig
@@ -11,6 +11,11 @@ const builtin = @import("builtin");
pub const FailingAllocator = @import("failing_allocator.zig").FailingAllocator;
+pub const runtime_safety = switch (builtin.mode) {
+ builtin.Mode.Debug, builtin.Mode.ReleaseSafe => true,
+ builtin.Mode.ReleaseFast, builtin.Mode.ReleaseSmall => false,
+};
+
/// Tries to write to stderr, unbuffered, and ignores any error returned.
/// Does not append a newline.
/// TODO atomic/multithread support
@@ -1098,7 +1103,7 @@ fn readILeb128(in_stream: var) !i64 {
/// This should only be used in temporary test programs.
pub const global_allocator = &global_fixed_allocator.allocator;
-var global_fixed_allocator = std.heap.FixedBufferAllocator.init(global_allocator_mem[0..]);
+var global_fixed_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(global_allocator_mem[0..]);
var global_allocator_mem: [100 * 1024]u8 = undefined;
// TODO make thread safe
diff --git a/std/event.zig b/std/event.zig
index c6ac04a9d0..2d69d0cb16 100644
--- a/std/event.zig
+++ b/std/event.zig
@@ -11,53 +11,69 @@ pub const TcpServer = struct {
handleRequestFn: async<*mem.Allocator> fn (*TcpServer, *const std.net.Address, *const std.os.File) void,
loop: *Loop,
- sockfd: i32,
+ sockfd: ?i32,
accept_coro: ?promise,
listen_address: std.net.Address,
waiting_for_emfile_node: PromiseNode,
+ listen_resume_node: event.Loop.ResumeNode,
const PromiseNode = std.LinkedList(promise).Node;
- pub fn init(loop: *Loop) !TcpServer {
- const sockfd = try std.os.posixSocket(posix.AF_INET, posix.SOCK_STREAM | posix.SOCK_CLOEXEC | posix.SOCK_NONBLOCK, posix.PROTO_tcp);
- errdefer std.os.close(sockfd);
-
+ pub fn init(loop: *Loop) TcpServer {
// TODO can't initialize handler coroutine here because we need well defined copy elision
return TcpServer{
.loop = loop,
- .sockfd = sockfd,
+ .sockfd = null,
.accept_coro = null,
.handleRequestFn = undefined,
.waiting_for_emfile_node = undefined,
.listen_address = undefined,
+ .listen_resume_node = event.Loop.ResumeNode{
+ .id = event.Loop.ResumeNode.Id.Basic,
+ .handle = undefined,
+ },
};
}
- pub fn listen(self: *TcpServer, address: *const std.net.Address, handleRequestFn: async<*mem.Allocator> fn (*TcpServer, *const std.net.Address, *const std.os.File) void) !void {
+ pub fn listen(
+ self: *TcpServer,
+ address: *const std.net.Address,
+ handleRequestFn: async<*mem.Allocator> fn (*TcpServer, *const std.net.Address, *const std.os.File) void,
+ ) !void {
self.handleRequestFn = handleRequestFn;
- try std.os.posixBind(self.sockfd, &address.os_addr);
- try std.os.posixListen(self.sockfd, posix.SOMAXCONN);
- self.listen_address = std.net.Address.initPosix(try std.os.posixGetSockName(self.sockfd));
+ const sockfd = try std.os.posixSocket(posix.AF_INET, posix.SOCK_STREAM | posix.SOCK_CLOEXEC | posix.SOCK_NONBLOCK, posix.PROTO_tcp);
+ errdefer std.os.close(sockfd);
+ self.sockfd = sockfd;
+
+ try std.os.posixBind(sockfd, &address.os_addr);
+ try std.os.posixListen(sockfd, posix.SOMAXCONN);
+ self.listen_address = std.net.Address.initPosix(try std.os.posixGetSockName(sockfd));
self.accept_coro = try async TcpServer.handler(self);
errdefer cancel self.accept_coro.?;
- try self.loop.addFd(self.sockfd, self.accept_coro.?);
- errdefer self.loop.removeFd(self.sockfd);
+ self.listen_resume_node.handle = self.accept_coro.?;
+ try self.loop.addFd(sockfd, &self.listen_resume_node);
+ errdefer self.loop.removeFd(sockfd);
+ }
+
+ /// Stop listening
+ pub fn close(self: *TcpServer) void {
+ self.loop.removeFd(self.sockfd.?);
+ std.os.close(self.sockfd.?);
}
pub fn deinit(self: *TcpServer) void {
- self.loop.removeFd(self.sockfd);
if (self.accept_coro) |accept_coro| cancel accept_coro;
- std.os.close(self.sockfd);
+ if (self.sockfd) |sockfd| std.os.close(sockfd);
}
pub async fn handler(self: *TcpServer) void {
while (true) {
var accepted_addr: std.net.Address = undefined;
- if (std.os.posixAccept(self.sockfd, &accepted_addr.os_addr, posix.SOCK_NONBLOCK | posix.SOCK_CLOEXEC)) |accepted_fd| {
+ if (std.os.posixAccept(self.sockfd.?, &accepted_addr.os_addr, posix.SOCK_NONBLOCK | posix.SOCK_CLOEXEC)) |accepted_fd| {
var socket = std.os.File.openHandle(accepted_fd);
_ = async self.handleRequestFn(self, accepted_addr, socket) catch |err| switch (err) {
error.OutOfMemory => {
@@ -95,32 +111,65 @@ pub const TcpServer = struct {
pub const Loop = struct {
allocator: *mem.Allocator,
- keep_running: bool,
next_tick_queue: std.atomic.QueueMpsc(promise),
os_data: OsData,
+ dispatch_lock: u8, // TODO make this a bool
+ pending_event_count: usize,
+ extra_threads: []*std.os.Thread,
+ final_resume_node: ResumeNode,
- const OsData = switch (builtin.os) {
- builtin.Os.linux => struct {
- epollfd: i32,
- },
- else => struct {},
+ pub const NextTickNode = std.atomic.QueueMpsc(promise).Node;
+
+ pub const ResumeNode = struct {
+ id: Id,
+ handle: promise,
+
+ pub const Id = enum {
+ Basic,
+ Stop,
+ EventFd,
+ };
+
+ pub const EventFd = struct {
+ base: ResumeNode,
+ eventfd: i32,
+ };
};
- pub const NextTickNode = std.atomic.QueueMpsc(promise).Node;
+ /// After initialization, call run().
+ /// TODO copy elision / named return values so that the threads referencing *Loop
+ /// have the correct pointer value.
+ fn initSingleThreaded(self: *Loop, allocator: *mem.Allocator) !void {
+ return self.initInternal(allocator, 1);
+ }
/// The allocator must be thread-safe because we use it for multiplexing
/// coroutines onto kernel threads.
- pub fn init(allocator: *mem.Allocator) !Loop {
- var self = Loop{
- .keep_running = true,
+ /// After initialization, call run().
+ /// TODO copy elision / named return values so that the threads referencing *Loop
+ /// have the correct pointer value.
+ fn initMultiThreaded(self: *Loop, allocator: *mem.Allocator) !void {
+ // TODO check the actual cpu core count
+ return self.initInternal(allocator, 4);
+ }
+
+ /// Thread count is the total thread count. The thread pool size will be
+ /// max(thread_count - 1, 0)
+ fn initInternal(self: *Loop, allocator: *mem.Allocator, thread_count: usize) !void {
+ self.* = Loop{
+ .pending_event_count = 0,
.allocator = allocator,
.os_data = undefined,
.next_tick_queue = std.atomic.QueueMpsc(promise).init(),
+ .dispatch_lock = 1, // start locked so threads go directly into epoll wait
+ .extra_threads = undefined,
+ .final_resume_node = ResumeNode{
+ .id = ResumeNode.Id.Stop,
+ .handle = undefined,
+ },
};
- try self.initOsData();
+ try self.initOsData(thread_count);
errdefer self.deinitOsData();
-
- return self;
}
/// must call stop before deinit
@@ -128,13 +177,70 @@ pub const Loop = struct {
self.deinitOsData();
}
- const InitOsDataError = std.os.LinuxEpollCreateError;
+ const InitOsDataError = std.os.LinuxEpollCreateError || mem.Allocator.Error || std.os.LinuxEventFdError ||
+ std.os.SpawnThreadError || std.os.LinuxEpollCtlError;
+
+ const wakeup_bytes = []u8{0x1} ** 8;
- fn initOsData(self: *Loop) InitOsDataError!void {
+ fn initOsData(self: *Loop, thread_count: usize) InitOsDataError!void {
switch (builtin.os) {
builtin.Os.linux => {
- self.os_data.epollfd = try std.os.linuxEpollCreate(std.os.linux.EPOLL_CLOEXEC);
+ const extra_thread_count = thread_count - 1;
+ self.os_data.available_eventfd_resume_nodes = std.atomic.Stack(ResumeNode.EventFd).init();
+ self.os_data.eventfd_resume_nodes = try self.allocator.alloc(
+ std.atomic.Stack(ResumeNode.EventFd).Node,
+ extra_thread_count,
+ );
+ errdefer self.allocator.free(self.os_data.eventfd_resume_nodes);
+
+ errdefer {
+ while (self.os_data.available_eventfd_resume_nodes.pop()) |node| std.os.close(node.data.eventfd);
+ }
+ for (self.os_data.eventfd_resume_nodes) |*eventfd_node| {
+ eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
+ .data = ResumeNode.EventFd{
+ .base = ResumeNode{
+ .id = ResumeNode.Id.EventFd,
+ .handle = undefined,
+ },
+ .eventfd = try std.os.linuxEventFd(1, posix.EFD_CLOEXEC | posix.EFD_NONBLOCK),
+ },
+ .next = undefined,
+ };
+ self.os_data.available_eventfd_resume_nodes.push(eventfd_node);
+ }
+
+ self.os_data.epollfd = try std.os.linuxEpollCreate(posix.EPOLL_CLOEXEC);
errdefer std.os.close(self.os_data.epollfd);
+
+ self.os_data.final_eventfd = try std.os.linuxEventFd(0, posix.EFD_CLOEXEC | posix.EFD_NONBLOCK);
+ errdefer std.os.close(self.os_data.final_eventfd);
+
+ self.os_data.final_eventfd_event = posix.epoll_event{
+ .events = posix.EPOLLIN,
+ .data = posix.epoll_data{ .ptr = @ptrToInt(&self.final_resume_node) },
+ };
+ try std.os.linuxEpollCtl(
+ self.os_data.epollfd,
+ posix.EPOLL_CTL_ADD,
+ self.os_data.final_eventfd,
+ &self.os_data.final_eventfd_event,
+ );
+ self.extra_threads = try self.allocator.alloc(*std.os.Thread, extra_thread_count);
+ errdefer self.allocator.free(self.extra_threads);
+
+ var extra_thread_index: usize = 0;
+ errdefer {
+ while (extra_thread_index != 0) {
+ extra_thread_index -= 1;
+ // writing 8 bytes to an eventfd cannot fail
+ std.os.posixWrite(self.os_data.final_eventfd, wakeup_bytes) catch unreachable;
+ self.extra_threads[extra_thread_index].wait();
+ }
+ }
+ while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
+ self.extra_threads[extra_thread_index] = try std.os.spawnThread(self, workerRun);
+ }
},
else => {},
}
@@ -142,65 +248,154 @@ pub const Loop = struct {
fn deinitOsData(self: *Loop) void {
switch (builtin.os) {
- builtin.Os.linux => std.os.close(self.os_data.epollfd),
+ builtin.Os.linux => {
+ std.os.close(self.os_data.final_eventfd);
+ while (self.os_data.available_eventfd_resume_nodes.pop()) |node| std.os.close(node.data.eventfd);
+ std.os.close(self.os_data.epollfd);
+ self.allocator.free(self.os_data.eventfd_resume_nodes);
+ self.allocator.free(self.extra_threads);
+ },
else => {},
}
}
- pub fn addFd(self: *Loop, fd: i32, prom: promise) !void {
+ /// resume_node must live longer than the promise that it holds a reference to.
+ pub fn addFd(self: *Loop, fd: i32, resume_node: *ResumeNode) !void {
+ _ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ errdefer {
+ _ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ }
+ try self.addFdNoCounter(fd, resume_node);
+ }
+
+ fn addFdNoCounter(self: *Loop, fd: i32, resume_node: *ResumeNode) !void {
var ev = std.os.linux.epoll_event{
.events = std.os.linux.EPOLLIN | std.os.linux.EPOLLOUT | std.os.linux.EPOLLET,
- .data = std.os.linux.epoll_data{ .ptr = @ptrToInt(prom) },
+ .data = std.os.linux.epoll_data{ .ptr = @ptrToInt(resume_node) },
};
try std.os.linuxEpollCtl(self.os_data.epollfd, std.os.linux.EPOLL_CTL_ADD, fd, &ev);
}
pub fn removeFd(self: *Loop, fd: i32) void {
+ self.removeFdNoCounter(fd);
+ _ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ }
+
+ fn removeFdNoCounter(self: *Loop, fd: i32) void {
std.os.linuxEpollCtl(self.os_data.epollfd, std.os.linux.EPOLL_CTL_DEL, fd, undefined) catch {};
}
- async fn waitFd(self: *Loop, fd: i32) !void {
+
+ pub async fn waitFd(self: *Loop, fd: i32) !void {
defer self.removeFd(fd);
+ var resume_node = ResumeNode{
+ .id = ResumeNode.Id.Basic,
+ .handle = undefined,
+ };
suspend |p| {
- try self.addFd(fd, p);
+ resume_node.handle = p;
+ try self.addFd(fd, &resume_node);
}
+ var a = &resume_node; // TODO better way to explicitly put memory in coro frame
}
- pub fn stop(self: *Loop) void {
- // TODO make atomic
- self.keep_running = false;
- // TODO activate an fd in the epoll set which should cancel all the promises
- }
-
- /// bring your own linked list node. this means it can't fail.
+ /// Bring your own linked list node. This means it can't fail.
pub fn onNextTick(self: *Loop, node: *NextTickNode) void {
+ _ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
self.next_tick_queue.put(node);
}
pub fn run(self: *Loop) void {
- while (self.keep_running) {
- // TODO multiplex the next tick queue and the epoll event results onto a thread pool
- while (self.next_tick_queue.get()) |node| {
- resume node.data;
- }
- if (!self.keep_running) break;
-
- self.dispatchOsEvents();
+ _ = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ self.workerRun();
+ for (self.extra_threads) |extra_thread| {
+ extra_thread.wait();
}
}
- fn dispatchOsEvents(self: *Loop) void {
- switch (builtin.os) {
- builtin.Os.linux => {
- var events: [16]std.os.linux.epoll_event = undefined;
- const count = std.os.linuxEpollWait(self.os_data.epollfd, events[0..], -1);
- for (events[0..count]) |ev| {
- const p = @intToPtr(promise, ev.data.ptr);
- resume p;
+ fn workerRun(self: *Loop) void {
+ start_over: while (true) {
+ if (@atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) == 0) {
+ while (self.next_tick_queue.get()) |next_tick_node| {
+ const handle = next_tick_node.data;
+ if (self.next_tick_queue.isEmpty()) {
+ // last node, just resume it
+ _ = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ resume handle;
+ _ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ continue :start_over;
+ }
+
+ // non-last node, stick it in the epoll set so that
+ // other threads can get to it
+ if (self.os_data.available_eventfd_resume_nodes.pop()) |resume_stack_node| {
+ const eventfd_node = &resume_stack_node.data;
+ eventfd_node.base.handle = handle;
+ // the pending count is already accounted for
+ self.addFdNoCounter(eventfd_node.eventfd, &eventfd_node.base) catch |_| {
+ // fine, we didn't need it anyway
+ _ = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ self.os_data.available_eventfd_resume_nodes.push(resume_stack_node);
+ resume handle;
+ _ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ continue :start_over;
+ };
+ } else {
+ // threads are too busy, can't add another eventfd to wake one up
+ _ = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ resume handle;
+ _ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ continue :start_over;
+ }
}
- },
- else => {},
+
+ const pending_event_count = @atomicLoad(usize, &self.pending_event_count, AtomicOrder.SeqCst);
+ if (pending_event_count == 0) {
+ // cause all the threads to stop
+ // writing 8 bytes to an eventfd cannot fail
+ std.os.posixWrite(self.os_data.final_eventfd, wakeup_bytes) catch unreachable;
+ return;
+ }
+
+ _ = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ }
+
+ // only process 1 event so we don't steal from other threads
+ var events: [1]std.os.linux.epoll_event = undefined;
+ const count = std.os.linuxEpollWait(self.os_data.epollfd, events[0..], -1);
+ for (events[0..count]) |ev| {
+ const resume_node = @intToPtr(*ResumeNode, ev.data.ptr);
+ const handle = resume_node.handle;
+ const resume_node_id = resume_node.id;
+ switch (resume_node_id) {
+ ResumeNode.Id.Basic => {},
+ ResumeNode.Id.Stop => return,
+ ResumeNode.Id.EventFd => {
+ const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
+ self.removeFdNoCounter(event_fd_node.eventfd);
+ const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node);
+ self.os_data.available_eventfd_resume_nodes.push(stack_node);
+ },
+ }
+ resume handle;
+ if (resume_node_id == ResumeNode.Id.EventFd) {
+ _ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ }
+ }
}
}
+
+ const OsData = switch (builtin.os) {
+ builtin.Os.linux => struct {
+ epollfd: i32,
+ // pre-allocated eventfds. all permanently active.
+ // this is how we send promises to be resumed on other threads.
+ available_eventfd_resume_nodes: std.atomic.Stack(ResumeNode.EventFd),
+ eventfd_resume_nodes: []std.atomic.Stack(ResumeNode.EventFd).Node,
+ final_eventfd: i32,
+ final_eventfd_event: posix.epoll_event,
+ },
+ else => struct {},
+ };
};
/// many producer, many consumer, thread-safe, lock-free, runtime configurable buffer size
@@ -304,9 +499,7 @@ pub fn Channel(comptime T: type) type {
// TODO integrate this function with named return values
// so we can get rid of this extra result copy
var result: T = undefined;
- var debug_handle: usize = undefined;
suspend |handle| {
- debug_handle = @ptrToInt(handle);
var my_tick_node = Loop.NextTickNode{
.next = undefined,
.data = handle,
@@ -438,9 +631,8 @@ test "listen on a port, send bytes, receive bytes" {
const self = @fieldParentPtr(Self, "tcp_server", tcp_server);
var socket = _socket.*; // TODO https://github.com/ziglang/zig/issues/733
defer socket.close();
- const next_handler = async errorableHandler(self, _addr, socket) catch |err| switch (err) {
- error.OutOfMemory => @panic("unable to handle connection: out of memory"),
- };
+ // TODO guarantee elision of this allocation
+ const next_handler = async errorableHandler(self, _addr, socket) catch unreachable;
(await next_handler) catch |err| {
std.debug.panic("unable to handle connection: {}\n", err);
};
@@ -461,17 +653,18 @@ test "listen on a port, send bytes, receive bytes" {
const ip4addr = std.net.parseIp4("127.0.0.1") catch unreachable;
const addr = std.net.Address.initIp4(ip4addr, 0);
- var loop = try Loop.init(std.debug.global_allocator);
- var server = MyServer{ .tcp_server = try TcpServer.init(&loop) };
+ var loop: Loop = undefined;
+ try loop.initSingleThreaded(std.debug.global_allocator);
+ var server = MyServer{ .tcp_server = TcpServer.init(&loop) };
defer server.tcp_server.deinit();
try server.tcp_server.listen(addr, MyServer.handler);
- const p = try async doAsyncTest(&loop, server.tcp_server.listen_address);
+ const p = try async doAsyncTest(&loop, server.tcp_server.listen_address, &server.tcp_server);
defer cancel p;
loop.run();
}
-async fn doAsyncTest(loop: *Loop, address: *const std.net.Address) void {
+async fn doAsyncTest(loop: *Loop, address: *const std.net.Address, server: *TcpServer) void {
errdefer @panic("test failure");
var socket_file = try await try async event.connect(loop, address);
@@ -481,7 +674,7 @@ async fn doAsyncTest(loop: *Loop, address: *const std.net.Address) void {
const amt_read = try socket_file.read(buf[0..]);
const msg = buf[0..amt_read];
assert(mem.eql(u8, msg, "hello from server\n"));
- loop.stop();
+ server.close();
}
test "std.event.Channel" {
@@ -490,7 +683,9 @@ test "std.event.Channel" {
const allocator = &da.allocator;
- var loop = try Loop.init(allocator);
+ var loop: Loop = undefined;
+ // TODO make a multi threaded test
+ try loop.initSingleThreaded(allocator);
defer loop.deinit();
const channel = try Channel(i32).create(&loop, 0);
@@ -515,11 +710,248 @@ async fn testChannelGetter(loop: *Loop, channel: *Channel(i32)) void {
const value2_promise = try async channel.get();
const value2 = await value2_promise;
assert(value2 == 4567);
-
- loop.stop();
}
async fn testChannelPutter(channel: *Channel(i32)) void {
await (async channel.put(1234) catch @panic("out of memory"));
await (async channel.put(4567) catch @panic("out of memory"));
}
+
+/// Thread-safe async/await lock.
+/// Does not make any syscalls - coroutines which are waiting for the lock are suspended, and
+/// are resumed when the lock is released, in order.
+pub const Lock = struct {
+ loop: *Loop,
+ shared_bit: u8, // TODO make this a bool
+ queue: Queue,
+ queue_empty_bit: u8, // TODO make this a bool
+
+ const Queue = std.atomic.QueueMpsc(promise);
+
+ pub const Held = struct {
+ lock: *Lock,
+
+ pub fn release(self: Held) void {
+ // Resume the next item from the queue.
+ if (self.lock.queue.get()) |node| {
+ self.lock.loop.onNextTick(node);
+ return;
+ }
+
+ // We need to release the lock.
+ _ = @atomicRmw(u8, &self.lock.queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+
+ // There might be a queue item. If we know the queue is empty, we can be done,
+ // because the other actor will try to obtain the lock.
+ // But if there's a queue item, we are the actor which must loop and attempt
+ // to grab the lock again.
+ if (@atomicLoad(u8, &self.lock.queue_empty_bit, AtomicOrder.SeqCst) == 1) {
+ return;
+ }
+
+ while (true) {
+ const old_bit = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ if (old_bit != 0) {
+ // We did not obtain the lock. Great, the queue is someone else's problem.
+ return;
+ }
+
+ // Resume the next item from the queue.
+ if (self.lock.queue.get()) |node| {
+ self.lock.loop.onNextTick(node);
+ return;
+ }
+
+ // Release the lock again.
+ _ = @atomicRmw(u8, &self.lock.queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+
+ // Find out if we can be done.
+ if (@atomicLoad(u8, &self.lock.queue_empty_bit, AtomicOrder.SeqCst) == 1) {
+ return;
+ }
+ }
+ }
+ };
+
+ pub fn init(loop: *Loop) Lock {
+ return Lock{
+ .loop = loop,
+ .shared_bit = 0,
+ .queue = Queue.init(),
+ .queue_empty_bit = 1,
+ };
+ }
+
+ /// Must be called when not locked. Not thread safe.
+ /// All calls to acquire() and release() must complete before calling deinit().
+ pub fn deinit(self: *Lock) void {
+ assert(self.shared_bit == 0);
+ while (self.queue.get()) |node| cancel node.data;
+ }
+
+ pub async fn acquire(self: *Lock) Held {
+ var my_tick_node: Loop.NextTickNode = undefined;
+
+ s: suspend |handle| {
+ my_tick_node.data = handle;
+ self.queue.put(&my_tick_node);
+
+ // At this point, we are in the queue, so we might have already been resumed and this coroutine
+ // frame might be destroyed. For the rest of the suspend block we cannot access the coroutine frame.
+
+ // We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor
+ // will attempt to grab the lock.
+ _ = @atomicRmw(u8, &self.queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+
+ while (true) {
+ const old_bit = @atomicRmw(u8, &self.shared_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ if (old_bit != 0) {
+ // We did not obtain the lock. Trust that our queue entry will resume us, and allow
+ // suspend to complete.
+ break;
+ }
+ // We got the lock. However we might have already been resumed from the queue.
+ if (self.queue.get()) |node| {
+ // Whether this node is us or someone else, we tail resume it.
+ resume node.data;
+ break;
+ } else {
+ // We already got resumed, and there are none left in the queue, which means that
+ // we aren't even supposed to hold the lock right now.
+ _ = @atomicRmw(u8, &self.queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.shared_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+
+ // There might be a queue item. If we know the queue is empty, we can be done,
+ // because the other actor will try to obtain the lock.
+ // But if there's a queue item, we are the actor which must loop and attempt
+ // to grab the lock again.
+ if (@atomicLoad(u8, &self.queue_empty_bit, AtomicOrder.SeqCst) == 1) {
+ break;
+ } else {
+ continue;
+ }
+ }
+ unreachable;
+ }
+ }
+
+ // TODO this workaround to force my_tick_node to be in the coroutine frame should
+ // not be necessary
+ var trash1 = &my_tick_node;
+
+ return Held{ .lock = self };
+ }
+};
+
+/// Thread-safe async/await lock that protects one piece of data.
+/// Does not make any syscalls - coroutines which are waiting for the lock are suspended, and
+/// are resumed when the lock is released, in order.
+pub fn Locked(comptime T: type) type {
+ return struct {
+ lock: Lock,
+ private_data: T,
+
+ const Self = this;
+
+ pub const HeldLock = struct {
+ value: *T,
+ held: Lock.Held,
+
+ pub fn release(self: HeldLock) void {
+ self.held.release();
+ }
+ };
+
+ pub fn init(loop: *Loop, data: T) Self {
+ return Self{
+ .lock = Lock.init(loop),
+ .private_data = data,
+ };
+ }
+
+ pub fn deinit(self: *Self) void {
+ self.lock.deinit();
+ }
+
+ pub async fn acquire(self: *Self) HeldLock {
+ return HeldLock{
+ // TODO guaranteed allocation elision
+ .held = await (async self.lock.acquire() catch unreachable),
+ .value = &self.private_data,
+ };
+ }
+ };
+}
+
+test "std.event.Lock" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const allocator = &da.allocator;
+
+ var loop: Loop = undefined;
+ try loop.initMultiThreaded(allocator);
+ defer loop.deinit();
+
+ var lock = Lock.init(&loop);
+ defer lock.deinit();
+
+ const handle = try async testLock(&loop, &lock);
+ defer cancel handle;
+ loop.run();
+
+ assert(mem.eql(i32, shared_test_data, [1]i32{3 * 10} ** 10));
+}
+
+async fn testLock(loop: *Loop, lock: *Lock) void {
+ const handle1 = async lockRunner(lock) catch @panic("out of memory");
+ var tick_node1 = Loop.NextTickNode{
+ .next = undefined,
+ .data = handle1,
+ };
+ loop.onNextTick(&tick_node1);
+
+ const handle2 = async lockRunner(lock) catch @panic("out of memory");
+ var tick_node2 = Loop.NextTickNode{
+ .next = undefined,
+ .data = handle2,
+ };
+ loop.onNextTick(&tick_node2);
+
+ const handle3 = async lockRunner(lock) catch @panic("out of memory");
+ var tick_node3 = Loop.NextTickNode{
+ .next = undefined,
+ .data = handle3,
+ };
+ loop.onNextTick(&tick_node3);
+
+ await handle1;
+ await handle2;
+ await handle3;
+
+ // TODO this is to force tick node memory to be in the coro frame
+ // there should be a way to make it explicit where the memory is
+ var a = &tick_node1;
+ var b = &tick_node2;
+ var c = &tick_node3;
+}
+
+var shared_test_data = [1]i32{0} ** 10;
+var shared_test_index: usize = 0;
+
+async fn lockRunner(lock: *Lock) void {
+ suspend; // resumed by onNextTick
+
+ var i: usize = 0;
+ while (i < 10) : (i += 1) {
+ const handle = await (async lock.acquire() catch @panic("out of memory"));
+ defer handle.release();
+
+ shared_test_index = 0;
+ while (shared_test_index < shared_test_data.len) : (shared_test_index += 1) {
+ shared_test_data[shared_test_index] = shared_test_data[shared_test_index] + 1;
+ }
+ }
+}
diff --git a/std/heap.zig b/std/heap.zig
index 2e02733da1..bcace34afe 100644
--- a/std/heap.zig
+++ b/std/heap.zig
@@ -38,7 +38,7 @@ fn cFree(self: *Allocator, old_mem: []u8) void {
}
/// This allocator makes a syscall directly for every allocation and free.
-/// TODO make this thread-safe. The windows implementation will need some atomics.
+/// Thread-safe and lock-free.
pub const DirectAllocator = struct {
allocator: Allocator,
heap_handle: ?HeapHandle,
@@ -74,34 +74,34 @@ pub const DirectAllocator = struct {
const alloc_size = if (alignment <= os.page_size) n else n + alignment;
const addr = p.mmap(null, alloc_size, p.PROT_READ | p.PROT_WRITE, p.MAP_PRIVATE | p.MAP_ANONYMOUS, -1, 0);
if (addr == p.MAP_FAILED) return error.OutOfMemory;
-
if (alloc_size == n) return @intToPtr([*]u8, addr)[0..n];
- var aligned_addr = addr & ~usize(alignment - 1);
- aligned_addr += alignment;
+ const aligned_addr = (addr & ~usize(alignment - 1)) + alignment;
- //We can unmap the unused portions of our mmap, but we must only
- // pass munmap bytes that exist outside our allocated pages or it
- // will happily eat us too
+ // We can unmap the unused portions of our mmap, but we must only
+ // pass munmap bytes that exist outside our allocated pages or it
+ // will happily eat us too.
- //Since alignment > page_size, we are by definition on a page boundry
+ // Since alignment > page_size, we are by definition on a page boundary.
const unused_start = addr;
const unused_len = aligned_addr - 1 - unused_start;
- var err = p.munmap(unused_start, unused_len);
- debug.assert(p.getErrno(err) == 0);
+ const err = p.munmap(unused_start, unused_len);
+ assert(p.getErrno(err) == 0);
- //It is impossible that there is an unoccupied page at the top of our
- // mmap.
+ // It is impossible that there is an unoccupied page at the top of our
+ // mmap.
return @intToPtr([*]u8, aligned_addr)[0..n];
},
Os.windows => {
const amt = n + alignment + @sizeOf(usize);
- const heap_handle = self.heap_handle orelse blk: {
+ const optional_heap_handle = @atomicLoad(?HeapHandle, ?self.heap_handle, builtin.AtomicOrder.SeqCst);
+ const heap_handle = optional_heap_handle orelse blk: {
const hh = os.windows.HeapCreate(os.windows.HEAP_NO_SERIALIZE, amt, 0) orelse return error.OutOfMemory;
- self.heap_handle = hh;
- break :blk hh;
+ const other_hh = @cmpxchgStrong(?HeapHandle, &self.heap_handle, null, hh, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) orelse break :blk hh;
+ _ = os.windows.HeapDestroy(hh);
+ break :blk other_hh;
};
const ptr = os.windows.HeapAlloc(heap_handle, 0, amt) orelse return error.OutOfMemory;
const root_addr = @ptrToInt(ptr);
diff --git a/std/mem.zig b/std/mem.zig
index b52d3e9f68..555e1e249d 100644
--- a/std/mem.zig
+++ b/std/mem.zig
@@ -6,7 +6,7 @@ const builtin = @import("builtin");
const mem = this;
pub const Allocator = struct {
- const Error = error{OutOfMemory};
+ pub const Error = error{OutOfMemory};
/// Allocate byte_count bytes and return them in a slice, with the
/// slice's pointer aligned at least to alignment bytes.
diff --git a/std/os/index.zig b/std/os/index.zig
index 52b36c351c..74a1b64f6e 100644
--- a/std/os/index.zig
+++ b/std/os/index.zig
@@ -2309,6 +2309,30 @@ pub fn linuxEpollWait(epfd: i32, events: []linux.epoll_event, timeout: i32) usiz
}
}
+pub const LinuxEventFdError = error{
+ InvalidFlagValue,
+ SystemResources,
+ ProcessFdQuotaExceeded,
+ SystemFdQuotaExceeded,
+
+ Unexpected,
+};
+
+pub fn linuxEventFd(initval: u32, flags: u32) LinuxEventFdError!i32 {
+ const rc = posix.eventfd(initval, flags);
+ const err = posix.getErrno(rc);
+ switch (err) {
+ 0 => return @intCast(i32, rc),
+ else => return unexpectedErrorPosix(err),
+
+ posix.EINVAL => return LinuxEventFdError.InvalidFlagValue,
+ posix.EMFILE => return LinuxEventFdError.ProcessFdQuotaExceeded,
+ posix.ENFILE => return LinuxEventFdError.SystemFdQuotaExceeded,
+ posix.ENODEV => return LinuxEventFdError.SystemResources,
+ posix.ENOMEM => return LinuxEventFdError.SystemResources,
+ }
+}
+
pub const PosixGetSockNameError = error{
/// Insufficient resources were available in the system to perform the operation.
SystemResources,
@@ -2605,10 +2629,17 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread
const MainFuncs = struct {
extern fn linuxThreadMain(ctx_addr: usize) u8 {
- if (@sizeOf(Context) == 0) {
- return startFn({});
- } else {
- return startFn(@intToPtr(*const Context, ctx_addr).*);
+ const arg = if (@sizeOf(Context) == 0) {} else @intToPtr(*const Context, ctx_addr).*;
+
+ switch (@typeId(@typeOf(startFn).ReturnType)) {
+ builtin.TypeId.Int => {
+ return startFn(arg);
+ },
+ builtin.TypeId.Void => {
+ startFn(arg);
+ return 0;
+ },
+ else => @compileError("expected return type of startFn to be 'u8', 'noreturn', 'void', or '!void'"),
}
}
extern fn posixThreadMain(ctx: ?*c_void) ?*c_void {
diff --git a/std/os/linux/index.zig b/std/os/linux/index.zig
index 65aa659c82..1c15be4887 100644
--- a/std/os/linux/index.zig
+++ b/std/os/linux/index.zig
@@ -523,6 +523,10 @@ pub const CLONE_NEWPID = 0x20000000;
pub const CLONE_NEWNET = 0x40000000;
pub const CLONE_IO = 0x80000000;
+pub const EFD_SEMAPHORE = 1;
+pub const EFD_CLOEXEC = O_CLOEXEC;
+pub const EFD_NONBLOCK = O_NONBLOCK;
+
pub const MS_RDONLY = 1;
pub const MS_NOSUID = 2;
pub const MS_NODEV = 4;
@@ -1221,6 +1225,10 @@ pub fn epoll_wait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeout
return syscall4(SYS_epoll_wait, @intCast(usize, epoll_fd), @ptrToInt(events), @intCast(usize, maxevents), @intCast(usize, timeout));
}
+pub fn eventfd(count: u32, flags: u32) usize {
+ return syscall2(SYS_eventfd2, count, flags);
+}
+
pub fn timerfd_create(clockid: i32, flags: u32) usize {
return syscall2(SYS_timerfd_create, @intCast(usize, clockid), @intCast(usize, flags));
}
--
cgit v1.2.3
From 8fba0a6ae862993afa2aeca774347adc399b3605 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 10 Jul 2018 15:17:01 -0400
Subject: introduce std.event.Group for making parallel async calls
---
CMakeLists.txt | 1 +
src-self-hosted/module.zig | 36 ++++++++---
std/event.zig | 2 +
std/event/group.zig | 158 +++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 189 insertions(+), 8 deletions(-)
create mode 100644 std/event/group.zig
(limited to 'src-self-hosted/module.zig')
diff --git a/CMakeLists.txt b/CMakeLists.txt
index fdedcd5eec..eeb0ec2058 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -459,6 +459,7 @@ set(ZIG_STD_FILES
"empty.zig"
"event.zig"
"event/channel.zig"
+ "event/group.zig"
"event/lock.zig"
"event/locked.zig"
"event/loop.zig"
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index 5ce1a7965a..24be228eb8 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -85,6 +85,17 @@ pub const Module = struct {
exported_symbol_names: event.Locked(Decl.Table),
+ /// Before code generation starts, must wait on this group to make sure
+ /// the build is complete.
+ build_group: event.Group(BuildError!void),
+
+ const BuildErrorsList = std.SegmentedList(BuildErrorDesc, 1);
+
+ pub const BuildErrorDesc = struct {
+ code: BuildError,
+ text: []const u8,
+ };
+
// TODO handle some of these earlier and report them in a way other than error codes
pub const BuildError = error{
OutOfMemory,
@@ -237,6 +248,7 @@ pub const Module = struct {
.emit_file_type = Emit.Binary,
.link_out_file = null,
.exported_symbol_names = event.Locked(Decl.Table).init(loop, Decl.Table.init(loop.allocator)),
+ .build_group = event.Group(BuildError!void).init(loop),
});
}
@@ -310,6 +322,9 @@ pub const Module = struct {
const decls = try Scope.Decls.create(self.a(), null);
errdefer decls.destroy();
+ var decl_group = event.Group(BuildError!void).init(self.loop);
+ errdefer decl_group.cancelAll();
+
var it = tree.root_node.decls.iterator(0);
while (it.next()) |decl_ptr| {
const decl = decl_ptr.*;
@@ -342,25 +357,30 @@ pub const Module = struct {
});
errdefer self.a().destroy(fn_decl);
- // TODO make this parallel
- try await try async self.addTopLevelDecl(tree, &fn_decl.base);
+ try decl_group.call(addTopLevelDecl, self, tree, &fn_decl.base);
},
ast.Node.Id.TestDecl => @panic("TODO"),
else => unreachable,
}
}
+ try await (async decl_group.wait() catch unreachable);
+ try await (async self.build_group.wait() catch unreachable);
}
async fn addTopLevelDecl(self: *Module, tree: *ast.Tree, decl: *Decl) !void {
const is_export = decl.isExported(tree);
- {
- const exported_symbol_names = await try async self.exported_symbol_names.acquire();
- defer exported_symbol_names.release();
+ if (is_export) {
+ try self.build_group.call(verifyUniqueSymbol, self, decl);
+ }
+ }
- if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| {
- @panic("TODO report compile error");
- }
+ async fn verifyUniqueSymbol(self: *Module, decl: *Decl) !void {
+ const exported_symbol_names = await (async self.exported_symbol_names.acquire() catch unreachable);
+ defer exported_symbol_names.release();
+
+ if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| {
+ @panic("TODO report compile error");
}
}
diff --git a/std/event.zig b/std/event.zig
index 7e9928b3d7..516defebf8 100644
--- a/std/event.zig
+++ b/std/event.zig
@@ -3,6 +3,7 @@ pub const Loop = @import("event/loop.zig").Loop;
pub const Lock = @import("event/lock.zig").Lock;
pub const tcp = @import("event/tcp.zig");
pub const Channel = @import("event/channel.zig").Channel;
+pub const Group = @import("event/group.zig").Group;
test "import event tests" {
_ = @import("event/locked.zig");
@@ -10,4 +11,5 @@ test "import event tests" {
_ = @import("event/lock.zig");
_ = @import("event/tcp.zig");
_ = @import("event/channel.zig");
+ _ = @import("event/group.zig");
}
diff --git a/std/event/group.zig b/std/event/group.zig
new file mode 100644
index 0000000000..c286803b53
--- /dev/null
+++ b/std/event/group.zig
@@ -0,0 +1,158 @@
+const std = @import("../index.zig");
+const builtin = @import("builtin");
+const Lock = std.event.Lock;
+const Loop = std.event.Loop;
+const AtomicRmwOp = builtin.AtomicRmwOp;
+const AtomicOrder = builtin.AtomicOrder;
+const assert = std.debug.assert;
+
+/// ReturnType should be `void` or `E!void`
+pub fn Group(comptime ReturnType: type) type {
+ return struct {
+ coro_stack: Stack,
+ alloc_stack: Stack,
+ lock: Lock,
+
+ const Self = this;
+
+ const Error = switch (@typeInfo(ReturnType)) {
+ builtin.TypeId.ErrorUnion => |payload| payload.error_set,
+ else => void,
+ };
+ const Stack = std.atomic.Stack(promise->ReturnType);
+
+ pub fn init(loop: *Loop) Self {
+ return Self{
+ .coro_stack = Stack.init(),
+ .alloc_stack = Stack.init(),
+ .lock = Lock.init(loop),
+ };
+ }
+
+ /// Add a promise to the group. Thread-safe.
+ pub fn add(self: *Self, handle: promise->ReturnType) (error{OutOfMemory}!void) {
+ const node = try self.lock.loop.allocator.create(Stack.Node{
+ .next = undefined,
+ .data = handle,
+ });
+ self.alloc_stack.push(node);
+ }
+
+ /// This is equivalent to an async call, but the async function is added to the group, instead
+ /// of returning a promise. func must be async and have return type void.
+ /// Thread-safe.
+ pub fn call(self: *Self, comptime func: var, args: ...) (error{OutOfMemory}!void) {
+ const S = struct {
+ async fn asyncFunc(node: **Stack.Node, args2: ...) ReturnType {
+ // TODO this is a hack to make the memory following be inside the coro frame
+ suspend |p| {
+ var my_node: Stack.Node = undefined;
+ node.* = &my_node;
+ resume p;
+ }
+
+ // TODO this allocation elision should be guaranteed because we await it in
+ // this coro frame
+ return await (async func(args2) catch unreachable);
+ }
+ };
+ var node: *Stack.Node = undefined;
+ const handle = try async S.asyncFunc(&node, args);
+ node.* = Stack.Node{
+ .next = undefined,
+ .data = handle,
+ };
+ self.coro_stack.push(node);
+ }
+
+ /// Wait for all the calls and promises of the group to complete.
+ /// Thread-safe.
+ pub async fn wait(self: *Self) ReturnType {
+ // TODO catch unreachable because the allocation can be grouped with
+ // the coro frame allocation
+ const held = await (async self.lock.acquire() catch unreachable);
+ defer held.release();
+
+ while (self.coro_stack.pop()) |node| {
+ if (Error == void) {
+ await node.data;
+ } else {
+ (await node.data) catch |err| {
+ self.cancelAll();
+ return err;
+ };
+ }
+ }
+ while (self.alloc_stack.pop()) |node| {
+ const handle = node.data;
+ self.lock.loop.allocator.destroy(node);
+ if (Error == void) {
+ await handle;
+ } else {
+ (await handle) catch |err| {
+ self.cancelAll();
+ return err;
+ };
+ }
+ }
+ }
+
+ /// Cancel all the outstanding promises. May only be called if wait was never called.
+ pub fn cancelAll(self: *Self) void {
+ while (self.coro_stack.pop()) |node| {
+ cancel node.data;
+ }
+ while (self.alloc_stack.pop()) |node| {
+ cancel node.data;
+ self.lock.loop.allocator.destroy(node);
+ }
+ }
+ };
+}
+
+test "std.event.Group" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const allocator = &da.allocator;
+
+ var loop: Loop = undefined;
+ try loop.initMultiThreaded(allocator);
+ defer loop.deinit();
+
+ const handle = try async testGroup(&loop);
+ defer cancel handle;
+
+ loop.run();
+}
+
+async fn testGroup(loop: *Loop) void {
+ var count: usize = 0;
+ var group = Group(void).init(loop);
+ group.add(async sleepALittle(&count) catch @panic("memory")) catch @panic("memory");
+ group.call(increaseByTen, &count) catch @panic("memory");
+ await (async group.wait() catch @panic("memory"));
+ assert(count == 11);
+
+ var another = Group(error!void).init(loop);
+ another.add(async somethingElse() catch @panic("memory")) catch @panic("memory");
+ another.call(doSomethingThatFails) catch @panic("memory");
+ std.debug.assertError(await (async another.wait() catch @panic("memory")), error.ItBroke);
+}
+
+async fn sleepALittle(count: *usize) void {
+ std.os.time.sleep(0, 1000000);
+ _ = @atomicRmw(usize, count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+}
+
+async fn increaseByTen(count: *usize) void {
+ var i: usize = 0;
+ while (i < 10) : (i += 1) {
+ _ = @atomicRmw(usize, count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ }
+}
+
+async fn doSomethingThatFails() error!void {}
+async fn somethingElse() error!void {
+ return error.ItBroke;
+}
--
cgit v1.2.3
From 574e31f0a046aa6e6fad73fff2cbbb3617fe1bae Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 10 Jul 2018 20:18:43 -0400
Subject: self-hosted: first passing test
* introduce std.atomic.Int
* add src-self-hosted/test.zig which is tested by the main test suite
- it fully utilizes the multithreaded async/await event loop so the
tests should Go Fast
* `stage2/bin/zig build-obj test.zig` is able to spit out an error if 2 exported
functions collide
* ability for `zig test` to accept `--object` and `--assembly`
arguments
* std.build: TestStep supports addLibPath and addObjectFile
---
CMakeLists.txt | 1 +
build.zig | 152 ++++++++++++++++++++---------------
src-self-hosted/errmsg.zig | 18 +++--
src-self-hosted/introspect.zig | 5 ++
src-self-hosted/main.zig | 36 ++++-----
src-self-hosted/module.zig | 98 +++++++++++++++++++----
src-self-hosted/test.zig | 176 +++++++++++++++++++++++++++++++++++++++++
src/main.cpp | 10 ++-
std/atomic/index.zig | 2 +
std/atomic/int.zig | 19 +++++
std/build.zig | 22 ++++++
11 files changed, 432 insertions(+), 107 deletions(-)
create mode 100644 src-self-hosted/test.zig
create mode 100644 std/atomic/int.zig
(limited to 'src-self-hosted/module.zig')
diff --git a/CMakeLists.txt b/CMakeLists.txt
index eeb0ec2058..559b3b6964 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -431,6 +431,7 @@ set(ZIG_CPP_SOURCES
set(ZIG_STD_FILES
"array_list.zig"
"atomic/index.zig"
+ "atomic/int.zig"
"atomic/queue_mpmc.zig"
"atomic/queue_mpsc.zig"
"atomic/stack.zig"
diff --git a/build.zig b/build.zig
index fd154c7504..273048d458 100644
--- a/build.zig
+++ b/build.zig
@@ -35,70 +35,27 @@ pub fn build(b: *Builder) !void {
"BUILD_INFO",
});
var index: usize = 0;
- const cmake_binary_dir = nextValue(&index, build_info);
- const cxx_compiler = nextValue(&index, build_info);
- const llvm_config_exe = nextValue(&index, build_info);
- const lld_include_dir = nextValue(&index, build_info);
- const lld_libraries = nextValue(&index, build_info);
- const std_files = nextValue(&index, build_info);
- const c_header_files = nextValue(&index, build_info);
- const dia_guids_lib = nextValue(&index, build_info);
+ var ctx = Context{
+ .cmake_binary_dir = nextValue(&index, build_info),
+ .cxx_compiler = nextValue(&index, build_info),
+ .llvm_config_exe = nextValue(&index, build_info),
+ .lld_include_dir = nextValue(&index, build_info),
+ .lld_libraries = nextValue(&index, build_info),
+ .std_files = nextValue(&index, build_info),
+ .c_header_files = nextValue(&index, build_info),
+ .dia_guids_lib = nextValue(&index, build_info),
+ .llvm = undefined,
+ };
+ ctx.llvm = try findLLVM(b, ctx.llvm_config_exe);
- const llvm = findLLVM(b, llvm_config_exe) catch unreachable;
+ var test_stage2 = b.addTest("src-self-hosted/test.zig");
+ test_stage2.setBuildMode(builtin.Mode.Debug);
var exe = b.addExecutable("zig", "src-self-hosted/main.zig");
exe.setBuildMode(mode);
- // This is for finding /lib/libz.a on alpine linux.
- // TODO turn this into -Dextra-lib-path=/lib option
- exe.addLibPath("/lib");
-
- exe.addIncludeDir("src");
- exe.addIncludeDir(cmake_binary_dir);
- addCppLib(b, exe, cmake_binary_dir, "zig_cpp");
- if (lld_include_dir.len != 0) {
- exe.addIncludeDir(lld_include_dir);
- var it = mem.split(lld_libraries, ";");
- while (it.next()) |lib| {
- exe.addObjectFile(lib);
- }
- } else {
- addCppLib(b, exe, cmake_binary_dir, "embedded_lld_wasm");
- addCppLib(b, exe, cmake_binary_dir, "embedded_lld_elf");
- addCppLib(b, exe, cmake_binary_dir, "embedded_lld_coff");
- addCppLib(b, exe, cmake_binary_dir, "embedded_lld_lib");
- }
- dependOnLib(exe, llvm);
-
- if (exe.target.getOs() == builtin.Os.linux) {
- const libstdcxx_path_padded = try b.exec([][]const u8{
- cxx_compiler,
- "-print-file-name=libstdc++.a",
- });
- const libstdcxx_path = mem.split(libstdcxx_path_padded, "\r\n").next().?;
- if (mem.eql(u8, libstdcxx_path, "libstdc++.a")) {
- warn(
- \\Unable to determine path to libstdc++.a
- \\On Fedora, install libstdc++-static and try again.
- \\
- );
- return error.RequiredLibraryNotFound;
- }
- exe.addObjectFile(libstdcxx_path);
-
- exe.linkSystemLibrary("pthread");
- } else if (exe.target.isDarwin()) {
- exe.linkSystemLibrary("c++");
- }
-
- if (dia_guids_lib.len != 0) {
- exe.addObjectFile(dia_guids_lib);
- }
-
- if (exe.target.getOs() != builtin.Os.windows) {
- exe.linkSystemLibrary("xml2");
- }
- exe.linkSystemLibrary("c");
+ try configureStage2(b, test_stage2, ctx);
+ try configureStage2(b, exe, ctx);
b.default_step.dependOn(&exe.step);
@@ -110,12 +67,16 @@ pub fn build(b: *Builder) !void {
exe.setVerboseLink(verbose_link_exe);
b.installArtifact(exe);
- installStdLib(b, std_files);
- installCHeaders(b, c_header_files);
+ installStdLib(b, ctx.std_files);
+ installCHeaders(b, ctx.c_header_files);
const test_filter = b.option([]const u8, "test-filter", "Skip tests that do not match filter");
const with_lldb = b.option(bool, "with-lldb", "Run tests in LLDB to get a backtrace if one fails") orelse false;
+ const test_stage2_step = b.step("test-stage2", "Run the stage2 compiler tests");
+ test_stage2_step.dependOn(&test_stage2.step);
+ test_step.dependOn(test_stage2_step);
+
test_step.dependOn(docs_step);
test_step.dependOn(tests.addPkgTests(b, test_filter, "test/behavior.zig", "behavior", "Run the behavior tests", with_lldb));
@@ -133,7 +94,7 @@ pub fn build(b: *Builder) !void {
test_step.dependOn(tests.addGenHTests(b, test_filter));
}
-fn dependOnLib(lib_exe_obj: *std.build.LibExeObjStep, dep: *const LibraryDep) void {
+fn dependOnLib(lib_exe_obj: var, dep: *const LibraryDep) void {
for (dep.libdirs.toSliceConst()) |lib_dir| {
lib_exe_obj.addLibPath(lib_dir);
}
@@ -148,7 +109,7 @@ fn dependOnLib(lib_exe_obj: *std.build.LibExeObjStep, dep: *const LibraryDep) vo
}
}
-fn addCppLib(b: *Builder, lib_exe_obj: *std.build.LibExeObjStep, cmake_binary_dir: []const u8, lib_name: []const u8) void {
+fn addCppLib(b: *Builder, lib_exe_obj: var, cmake_binary_dir: []const u8, lib_name: []const u8) void {
const lib_prefix = if (lib_exe_obj.target.isWindows()) "" else "lib";
lib_exe_obj.addObjectFile(os.path.join(b.allocator, cmake_binary_dir, "zig_cpp", b.fmt("{}{}{}", lib_prefix, lib_name, lib_exe_obj.target.libFileExt())) catch unreachable);
}
@@ -254,3 +215,68 @@ fn nextValue(index: *usize, build_info: []const u8) []const u8 {
}
}
}
+
+fn configureStage2(b: *Builder, exe: var, ctx: Context) !void {
+ // This is for finding /lib/libz.a on alpine linux.
+ // TODO turn this into -Dextra-lib-path=/lib option
+ exe.addLibPath("/lib");
+
+ exe.addIncludeDir("src");
+ exe.addIncludeDir(ctx.cmake_binary_dir);
+ addCppLib(b, exe, ctx.cmake_binary_dir, "zig_cpp");
+ if (ctx.lld_include_dir.len != 0) {
+ exe.addIncludeDir(ctx.lld_include_dir);
+ var it = mem.split(ctx.lld_libraries, ";");
+ while (it.next()) |lib| {
+ exe.addObjectFile(lib);
+ }
+ } else {
+ addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_wasm");
+ addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_elf");
+ addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_coff");
+ addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_lib");
+ }
+ dependOnLib(exe, ctx.llvm);
+
+ if (exe.target.getOs() == builtin.Os.linux) {
+ const libstdcxx_path_padded = try b.exec([][]const u8{
+ ctx.cxx_compiler,
+ "-print-file-name=libstdc++.a",
+ });
+ const libstdcxx_path = mem.split(libstdcxx_path_padded, "\r\n").next().?;
+ if (mem.eql(u8, libstdcxx_path, "libstdc++.a")) {
+ warn(
+ \\Unable to determine path to libstdc++.a
+ \\On Fedora, install libstdc++-static and try again.
+ \\
+ );
+ return error.RequiredLibraryNotFound;
+ }
+ exe.addObjectFile(libstdcxx_path);
+
+ exe.linkSystemLibrary("pthread");
+ } else if (exe.target.isDarwin()) {
+ exe.linkSystemLibrary("c++");
+ }
+
+ if (ctx.dia_guids_lib.len != 0) {
+ exe.addObjectFile(ctx.dia_guids_lib);
+ }
+
+ if (exe.target.getOs() != builtin.Os.windows) {
+ exe.linkSystemLibrary("xml2");
+ }
+ exe.linkSystemLibrary("c");
+}
+
+const Context = struct {
+ cmake_binary_dir: []const u8,
+ cxx_compiler: []const u8,
+ llvm_config_exe: []const u8,
+ lld_include_dir: []const u8,
+ lld_libraries: []const u8,
+ std_files: []const u8,
+ c_header_files: []const u8,
+ dia_guids_lib: []const u8,
+ llvm: LibraryDep,
+};
diff --git a/src-self-hosted/errmsg.zig b/src-self-hosted/errmsg.zig
index b6fd78d8f6..a92b5145ce 100644
--- a/src-self-hosted/errmsg.zig
+++ b/src-self-hosted/errmsg.zig
@@ -11,11 +11,15 @@ pub const Color = enum {
On,
};
+pub const Span = struct {
+ first: ast.TokenIndex,
+ last: ast.TokenIndex,
+};
+
pub const Msg = struct {
path: []const u8,
text: []u8,
- first_token: TokenIndex,
- last_token: TokenIndex,
+ span: Span,
tree: *ast.Tree,
};
@@ -39,8 +43,10 @@ pub fn createFromParseError(
.tree = tree,
.path = path,
.text = text_buf.toOwnedSlice(),
- .first_token = loc_token,
- .last_token = loc_token,
+ .span = Span{
+ .first = loc_token,
+ .last = loc_token,
+ },
});
errdefer allocator.destroy(msg);
@@ -48,8 +54,8 @@ pub fn createFromParseError(
}
pub fn printToStream(stream: var, msg: *const Msg, color_on: bool) !void {
- const first_token = msg.tree.tokens.at(msg.first_token);
- const last_token = msg.tree.tokens.at(msg.last_token);
+ const first_token = msg.tree.tokens.at(msg.span.first);
+ const last_token = msg.tree.tokens.at(msg.span.last);
const start_loc = msg.tree.tokenLocationPtr(0, first_token);
const end_loc = msg.tree.tokenLocationPtr(first_token.end, last_token);
if (!color_on) {
diff --git a/src-self-hosted/introspect.zig b/src-self-hosted/introspect.zig
index 74084b48c6..ecd04c4467 100644
--- a/src-self-hosted/introspect.zig
+++ b/src-self-hosted/introspect.zig
@@ -53,3 +53,8 @@ pub fn resolveZigLibDir(allocator: *mem.Allocator) ![]u8 {
return error.ZigLibDirNotFound;
};
}
+
+/// Caller must free result
+pub fn resolveZigCacheDir(allocator: *mem.Allocator) ![]u8 {
+ return std.mem.dupe(allocator, u8, "zig-cache");
+}
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index fe94a4460a..d7ead0ba32 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -481,29 +481,29 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
module.link_out_file = flags.single("out-file");
try module.build();
- const process_build_events_handle = try async processBuildEvents(module, true);
+ const process_build_events_handle = try async processBuildEvents(module, color);
defer cancel process_build_events_handle;
loop.run();
}
-async fn processBuildEvents(module: *Module, watch: bool) void {
- while (watch) {
- // TODO directly awaiting async should guarantee memory allocation elision
- const build_event = await (async module.events.get() catch unreachable);
+async fn processBuildEvents(module: *Module, color: errmsg.Color) void {
+ // TODO directly awaiting async should guarantee memory allocation elision
+ const build_event = await (async module.events.get() catch unreachable);
- switch (build_event) {
- Module.Event.Ok => {
- std.debug.warn("Build succeeded\n");
- return;
- },
- Module.Event.Error => |err| {
- std.debug.warn("build failed: {}\n", @errorName(err));
- @panic("TODO error return trace");
- },
- Module.Event.Fail => |errs| {
- @panic("TODO print compile error messages");
- },
- }
+ switch (build_event) {
+ Module.Event.Ok => {
+ std.debug.warn("Build succeeded\n");
+ return;
+ },
+ Module.Event.Error => |err| {
+ std.debug.warn("build failed: {}\n", @errorName(err));
+ @panic("TODO error return trace");
+ },
+ Module.Event.Fail => |msgs| {
+ for (msgs) |msg| {
+ errmsg.printToFile(&stderr_file, msg, color) catch os.exit(1);
+ }
+ },
}
}
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index 24be228eb8..44954e4cd1 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -89,12 +89,9 @@ pub const Module = struct {
/// the build is complete.
build_group: event.Group(BuildError!void),
- const BuildErrorsList = std.SegmentedList(BuildErrorDesc, 1);
+ compile_errors: event.Locked(CompileErrList),
- pub const BuildErrorDesc = struct {
- code: BuildError,
- text: []const u8,
- };
+ const CompileErrList = std.ArrayList(*errmsg.Msg);
// TODO handle some of these earlier and report them in a way other than error codes
pub const BuildError = error{
@@ -131,11 +128,12 @@ pub const Module = struct {
NoStdHandles,
Overflow,
NotSupported,
+ BufferTooSmall,
};
pub const Event = union(enum) {
Ok,
- Fail: []errmsg.Msg,
+ Fail: []*errmsg.Msg,
Error: BuildError,
};
@@ -249,6 +247,7 @@ pub const Module = struct {
.link_out_file = null,
.exported_symbol_names = event.Locked(Decl.Table).init(loop, Decl.Table.init(loop.allocator)),
.build_group = event.Group(BuildError!void).init(loop),
+ .compile_errors = event.Locked(CompileErrList).init(loop, CompileErrList.init(loop.allocator)),
});
}
@@ -288,7 +287,17 @@ pub const Module = struct {
await (async self.events.put(Event{ .Error = err }) catch unreachable);
return;
};
- await (async self.events.put(Event.Ok) catch unreachable);
+ const compile_errors = blk: {
+ const held = await (async self.compile_errors.acquire() catch unreachable);
+ defer held.release();
+ break :blk held.value.toOwnedSlice();
+ };
+
+ if (compile_errors.len == 0) {
+ await (async self.events.put(Event.Ok) catch unreachable);
+ } else {
+ await (async self.events.put(Event{ .Fail = compile_errors }) catch unreachable);
+ }
// for now we stop after 1
return;
}
@@ -310,10 +319,13 @@ pub const Module = struct {
};
errdefer self.a().free(source_code);
- var parsed_file = ParsedFile{
- .tree = try std.zig.parse(self.a(), source_code),
+ const parsed_file = try self.a().create(ParsedFile{
+ .tree = undefined,
.realpath = root_src_real_path,
- };
+ });
+ errdefer self.a().destroy(parsed_file);
+
+ parsed_file.tree = try std.zig.parse(self.a(), source_code);
errdefer parsed_file.tree.deinit();
const tree = &parsed_file.tree;
@@ -337,7 +349,7 @@ pub const Module = struct {
const name = if (fn_proto.name_token) |name_token| tree.tokenSlice(name_token) else {
@panic("TODO add compile error");
//try self.addCompileError(
- // &parsed_file,
+ // parsed_file,
// fn_proto.fn_token,
// fn_proto.fn_token + 1,
// "missing function name",
@@ -357,7 +369,7 @@ pub const Module = struct {
});
errdefer self.a().destroy(fn_decl);
- try decl_group.call(addTopLevelDecl, self, tree, &fn_decl.base);
+ try decl_group.call(addTopLevelDecl, self, parsed_file, &fn_decl.base);
},
ast.Node.Id.TestDecl => @panic("TODO"),
else => unreachable,
@@ -367,20 +379,56 @@ pub const Module = struct {
try await (async self.build_group.wait() catch unreachable);
}
- async fn addTopLevelDecl(self: *Module, tree: *ast.Tree, decl: *Decl) !void {
- const is_export = decl.isExported(tree);
+ async fn addTopLevelDecl(self: *Module, parsed_file: *ParsedFile, decl: *Decl) !void {
+ const is_export = decl.isExported(&parsed_file.tree);
if (is_export) {
- try self.build_group.call(verifyUniqueSymbol, self, decl);
+ try self.build_group.call(verifyUniqueSymbol, self, parsed_file, decl);
}
}
- async fn verifyUniqueSymbol(self: *Module, decl: *Decl) !void {
+ fn addCompileError(self: *Module, parsed_file: *ParsedFile, span: errmsg.Span, comptime fmt: []const u8, args: ...) !void {
+ const text = try std.fmt.allocPrint(self.loop.allocator, fmt, args);
+ errdefer self.loop.allocator.free(text);
+
+ try self.build_group.call(addCompileErrorAsync, self, parsed_file, span.first, span.last, text);
+ }
+
+ async fn addCompileErrorAsync(
+ self: *Module,
+ parsed_file: *ParsedFile,
+ first_token: ast.TokenIndex,
+ last_token: ast.TokenIndex,
+ text: []u8,
+ ) !void {
+ const msg = try self.loop.allocator.create(errmsg.Msg{
+ .path = parsed_file.realpath,
+ .text = text,
+ .span = errmsg.Span{
+ .first = first_token,
+ .last = last_token,
+ },
+ .tree = &parsed_file.tree,
+ });
+ errdefer self.loop.allocator.destroy(msg);
+
+ const compile_errors = await (async self.compile_errors.acquire() catch unreachable);
+ defer compile_errors.release();
+
+ try compile_errors.value.append(msg);
+ }
+
+ async fn verifyUniqueSymbol(self: *Module, parsed_file: *ParsedFile, decl: *Decl) !void {
const exported_symbol_names = await (async self.exported_symbol_names.acquire() catch unreachable);
defer exported_symbol_names.release();
if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| {
- @panic("TODO report compile error");
+ try self.addCompileError(
+ parsed_file,
+ decl.getSpan(),
+ "exported symbol collision: '{}'",
+ decl.name,
+ );
}
}
@@ -503,6 +551,22 @@ pub const Decl = struct {
}
}
+ pub fn getSpan(base: *const Decl) errmsg.Span {
+ switch (base.id) {
+ Id.Fn => {
+ const fn_decl = @fieldParentPtr(Fn, "base", base);
+ const fn_proto = fn_decl.fn_proto;
+ const start = fn_proto.fn_token;
+ const end = fn_proto.name_token orelse start;
+ return errmsg.Span{
+ .first = start,
+ .last = end + 1,
+ };
+ },
+ else => @panic("TODO"),
+ }
+ }
+
pub const Resolution = enum {
Unresolved,
InProgress,
diff --git a/src-self-hosted/test.zig b/src-self-hosted/test.zig
new file mode 100644
index 0000000000..7ce7cf6ee3
--- /dev/null
+++ b/src-self-hosted/test.zig
@@ -0,0 +1,176 @@
+const std = @import("std");
+const mem = std.mem;
+const builtin = @import("builtin");
+const Target = @import("target.zig").Target;
+const Module = @import("module.zig").Module;
+const introspect = @import("introspect.zig");
+const assertOrPanic = std.debug.assertOrPanic;
+const errmsg = @import("errmsg.zig");
+
+test "compile errors" {
+ var ctx: TestContext = undefined;
+ try ctx.init();
+ defer ctx.deinit();
+
+ try ctx.testCompileError(
+ \\export fn entry() void {}
+ \\export fn entry() void {}
+ , file1, 2, 8, "exported symbol collision: 'entry'");
+
+ try ctx.run();
+}
+
+const file1 = "1.zig";
+
+const TestContext = struct {
+ loop: std.event.Loop,
+ zig_lib_dir: []u8,
+ direct_allocator: std.heap.DirectAllocator,
+ arena: std.heap.ArenaAllocator,
+ zig_cache_dir: []u8,
+ file_index: std.atomic.Int(usize),
+ group: std.event.Group(error!void),
+ any_err: error!void,
+
+ const tmp_dir_name = "stage2_test_tmp";
+
+ fn init(self: *TestContext) !void {
+ self.* = TestContext{
+ .any_err = {},
+ .direct_allocator = undefined,
+ .arena = undefined,
+ .loop = undefined,
+ .zig_lib_dir = undefined,
+ .zig_cache_dir = undefined,
+ .group = undefined,
+ .file_index = std.atomic.Int(usize).init(0),
+ };
+
+ self.direct_allocator = std.heap.DirectAllocator.init();
+ errdefer self.direct_allocator.deinit();
+
+ self.arena = std.heap.ArenaAllocator.init(&self.direct_allocator.allocator);
+ errdefer self.arena.deinit();
+
+ // TODO faster allocator for coroutines that is thread-safe/lock-free
+ try self.loop.initMultiThreaded(&self.direct_allocator.allocator);
+ errdefer self.loop.deinit();
+
+ self.group = std.event.Group(error!void).init(&self.loop);
+ errdefer self.group.cancelAll();
+
+ self.zig_lib_dir = try introspect.resolveZigLibDir(&self.arena.allocator);
+ errdefer self.arena.allocator.free(self.zig_lib_dir);
+
+ self.zig_cache_dir = try introspect.resolveZigCacheDir(&self.arena.allocator);
+ errdefer self.arena.allocator.free(self.zig_cache_dir);
+
+ try std.os.makePath(&self.arena.allocator, tmp_dir_name);
+ errdefer std.os.deleteTree(&self.arena.allocator, tmp_dir_name) catch {};
+ }
+
+ fn deinit(self: *TestContext) void {
+ std.os.deleteTree(&self.arena.allocator, tmp_dir_name) catch {};
+ self.arena.allocator.free(self.zig_cache_dir);
+ self.arena.allocator.free(self.zig_lib_dir);
+ self.loop.deinit();
+ self.arena.deinit();
+ self.direct_allocator.deinit();
+ }
+
+ fn run(self: *TestContext) !void {
+ const handle = try self.loop.call(waitForGroup, self);
+ defer cancel handle;
+ self.loop.run();
+ return self.any_err;
+ }
+
+ async fn waitForGroup(self: *TestContext) void {
+ self.any_err = await (async self.group.wait() catch unreachable);
+ }
+
+ fn testCompileError(
+ self: *TestContext,
+ source: []const u8,
+ path: []const u8,
+ line: usize,
+ column: usize,
+ msg: []const u8,
+ ) !void {
+ var file_index_buf: [20]u8 = undefined;
+ const file_index = try std.fmt.bufPrint(file_index_buf[0..], "{}", self.file_index.next());
+ const file1_path = try std.os.path.join(&self.arena.allocator, tmp_dir_name, file_index, file1);
+
+ if (std.os.path.dirname(file1_path)) |dirname| {
+ try std.os.makePath(&self.arena.allocator, dirname);
+ }
+
+ // TODO async I/O
+ try std.io.writeFile(&self.arena.allocator, file1_path, source);
+
+ var module = try Module.create(
+ &self.loop,
+ "test",
+ file1_path,
+ Target.Native,
+ Module.Kind.Obj,
+ builtin.Mode.Debug,
+ self.zig_lib_dir,
+ self.zig_cache_dir,
+ );
+ errdefer module.destroy();
+
+ try module.build();
+
+ try self.group.call(getModuleEvent, module, source, path, line, column, msg);
+ }
+
+ async fn getModuleEvent(
+ module: *Module,
+ source: []const u8,
+ path: []const u8,
+ line: usize,
+ column: usize,
+ text: []const u8,
+ ) !void {
+ defer module.destroy();
+ const build_event = await (async module.events.get() catch unreachable);
+
+ switch (build_event) {
+ Module.Event.Ok => {
+ @panic("build incorrectly succeeded");
+ },
+ Module.Event.Error => |err| {
+ @panic("build incorrectly failed");
+ },
+ Module.Event.Fail => |msgs| {
+ assertOrPanic(msgs.len != 0);
+ for (msgs) |msg| {
+ if (mem.endsWith(u8, msg.path, path) and mem.eql(u8, msg.text, text)) {
+ const first_token = msg.tree.tokens.at(msg.span.first);
+ const last_token = msg.tree.tokens.at(msg.span.first);
+ const start_loc = msg.tree.tokenLocationPtr(0, first_token);
+ if (start_loc.line + 1 == line and start_loc.column + 1 == column) {
+ return;
+ }
+ }
+ }
+ std.debug.warn(
+ "\n=====source:=======\n{}\n====expected:========\n{}:{}:{}: error: {}\n",
+ source,
+ path,
+ line,
+ column,
+ text,
+ );
+ std.debug.warn("\n====found:========\n");
+ var stderr = try std.io.getStdErr();
+ for (msgs) |msg| {
+ try errmsg.printToFile(&stderr, msg, errmsg.Color.Auto);
+ }
+ std.debug.warn("============\n");
+ return error.TestFailed;
+ },
+ }
+ }
+};
diff --git a/src/main.cpp b/src/main.cpp
index a409778a78..5f96953f21 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -891,15 +891,19 @@ int main(int argc, char **argv) {
add_package(g, cur_pkg, g->root_package);
- if (cmd == CmdBuild || cmd == CmdRun) {
- codegen_set_emit_file_type(g, emit_file_type);
-
+ if (cmd == CmdBuild || cmd == CmdRun || cmd == CmdTest) {
for (size_t i = 0; i < objects.length; i += 1) {
codegen_add_object(g, buf_create_from_str(objects.at(i)));
}
for (size_t i = 0; i < asm_files.length; i += 1) {
codegen_add_assembly(g, buf_create_from_str(asm_files.at(i)));
}
+ }
+
+
+ if (cmd == CmdBuild || cmd == CmdRun) {
+ codegen_set_emit_file_type(g, emit_file_type);
+
codegen_build(g);
codegen_link(g, out_file);
if (timing_info)
diff --git a/std/atomic/index.zig b/std/atomic/index.zig
index c0ea5be183..cf344a8231 100644
--- a/std/atomic/index.zig
+++ b/std/atomic/index.zig
@@ -1,9 +1,11 @@
pub const Stack = @import("stack.zig").Stack;
pub const QueueMpsc = @import("queue_mpsc.zig").QueueMpsc;
pub const QueueMpmc = @import("queue_mpmc.zig").QueueMpmc;
+pub const Int = @import("int.zig").Int;
test "std.atomic" {
_ = @import("stack.zig");
_ = @import("queue_mpsc.zig");
_ = @import("queue_mpmc.zig");
+ _ = @import("int.zig");
}
diff --git a/std/atomic/int.zig b/std/atomic/int.zig
new file mode 100644
index 0000000000..7042bca78d
--- /dev/null
+++ b/std/atomic/int.zig
@@ -0,0 +1,19 @@
+const builtin = @import("builtin");
+const AtomicOrder = builtin.AtomicOrder;
+
+/// Thread-safe, lock-free integer
+pub fn Int(comptime T: type) type {
+ return struct {
+ value: T,
+
+ pub const Self = this;
+
+ pub fn init(init_val: T) Self {
+ return Self{ .value = init_val };
+ }
+
+ pub fn next(self: *Self) T {
+ return @atomicRmw(T, &self.value, builtin.AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ }
+ };
+}
diff --git a/std/build.zig b/std/build.zig
index 24fa85383a..cea760e8a2 100644
--- a/std/build.zig
+++ b/std/build.zig
@@ -1596,6 +1596,8 @@ pub const TestStep = struct {
target: Target,
exec_cmd_args: ?[]const ?[]const u8,
include_dirs: ArrayList([]const u8),
+ lib_paths: ArrayList([]const u8),
+ object_files: ArrayList([]const u8),
pub fn init(builder: *Builder, root_src: []const u8) TestStep {
const step_name = builder.fmt("test {}", root_src);
@@ -1611,9 +1613,15 @@ pub const TestStep = struct {
.target = Target{ .Native = {} },
.exec_cmd_args = null,
.include_dirs = ArrayList([]const u8).init(builder.allocator),
+ .lib_paths = ArrayList([]const u8).init(builder.allocator),
+ .object_files = ArrayList([]const u8).init(builder.allocator),
};
}
+ pub fn addLibPath(self: *TestStep, path: []const u8) void {
+ self.lib_paths.append(path) catch unreachable;
+ }
+
pub fn setVerbose(self: *TestStep, value: bool) void {
self.verbose = value;
}
@@ -1638,6 +1646,10 @@ pub const TestStep = struct {
self.filter = text;
}
+ pub fn addObjectFile(self: *TestStep, path: []const u8) void {
+ self.object_files.append(path) catch unreachable;
+ }
+
pub fn setTarget(self: *TestStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void {
self.target = Target{
.Cross = CrossTarget{
@@ -1699,6 +1711,11 @@ pub const TestStep = struct {
try zig_args.append(self.name_prefix);
}
+ for (self.object_files.toSliceConst()) |object_file| {
+ try zig_args.append("--object");
+ try zig_args.append(builder.pathFromRoot(object_file));
+ }
+
{
var it = self.link_libs.iterator();
while (true) {
@@ -1734,6 +1751,11 @@ pub const TestStep = struct {
try zig_args.append(rpath);
}
+ for (self.lib_paths.toSliceConst()) |lib_path| {
+ try zig_args.append("--library-path");
+ try zig_args.append(lib_path);
+ }
+
for (builder.lib_paths.toSliceConst()) |lib_path| {
try zig_args.append("--library-path");
try zig_args.append(lib_path);
--
cgit v1.2.3
From c6c49389ebd1503de38d9bb6ff6d9f6fba94d63b Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 11 Jul 2018 01:26:46 -0400
Subject: self-hosted: add compile error test for missing fn name
---
src-self-hosted/module.zig | 11 ++++-------
src-self-hosted/test.zig | 4 ++++
2 files changed, 8 insertions(+), 7 deletions(-)
(limited to 'src-self-hosted/module.zig')
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index 44954e4cd1..4b0c44529b 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -347,13 +347,10 @@ pub const Module = struct {
const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
const name = if (fn_proto.name_token) |name_token| tree.tokenSlice(name_token) else {
- @panic("TODO add compile error");
- //try self.addCompileError(
- // parsed_file,
- // fn_proto.fn_token,
- // fn_proto.fn_token + 1,
- // "missing function name",
- //);
+ try self.addCompileError(parsed_file, errmsg.Span{
+ .first = fn_proto.fn_token,
+ .last = fn_proto.fn_token + 1,
+ }, "missing function name");
continue;
};
diff --git a/src-self-hosted/test.zig b/src-self-hosted/test.zig
index 01a857f21d..ffad7f1b8d 100644
--- a/src-self-hosted/test.zig
+++ b/src-self-hosted/test.zig
@@ -17,6 +17,10 @@ test "compile errors" {
\\export fn entry() void {}
, file1, 2, 8, "exported symbol collision: 'entry'");
+ try ctx.testCompileError(
+ \\fn() void {}
+ , file1, 1, 1, "missing function name");
+
try ctx.run();
}
--
cgit v1.2.3
From 9bdcd2a495d4189d6536d43f1294dffb38daa9a5 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 11 Jul 2018 15:58:48 -0400
Subject: add std.event.Future
This is like a promise, but it's for multiple getters, and
uses an event loop.
---
CMakeLists.txt | 1 +
src-self-hosted/module.zig | 19 ++++++++-
src-self-hosted/test.zig | 11 +-----
std/event.zig | 2 +
std/event/future.zig | 87 ++++++++++++++++++++++++++++++++++++++++++
std/event/lock.zig | 11 +++++-
test/stage2/compile_errors.zig | 12 ++++++
7 files changed, 132 insertions(+), 11 deletions(-)
create mode 100644 std/event/future.zig
create mode 100644 test/stage2/compile_errors.zig
(limited to 'src-self-hosted/module.zig')
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 559b3b6964..51d348f042 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -460,6 +460,7 @@ set(ZIG_STD_FILES
"empty.zig"
"event.zig"
"event/channel.zig"
+ "event/future.zig"
"event/group.zig"
"event/lock.zig"
"event/locked.zig"
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index 4b0c44529b..5cde12f65c 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -381,6 +381,7 @@ pub const Module = struct {
if (is_export) {
try self.build_group.call(verifyUniqueSymbol, self, parsed_file, decl);
+ try self.build_group.call(generateDecl, self, parsed_file, decl);
}
}
@@ -429,6 +430,22 @@ pub const Module = struct {
}
}
+ /// This declaration has been blessed as going into the final code generation.
+ async fn generateDecl(self: *Module, parsed_file: *ParsedFile, decl: *Decl) void {
+ switch (decl.id) {
+ Decl.Id.Var => @panic("TODO"),
+ Decl.Id.Fn => {
+ const fn_decl = @fieldParentPtr(Decl.Fn, "base", decl);
+ return await (async self.generateDeclFn(parsed_file, fn_decl) catch unreachable);
+ },
+ Decl.Id.CompTime => @panic("TODO"),
+ }
+ }
+
+ async fn generateDeclFn(self: *Module, parsed_file: *ParsedFile, fn_decl: *Decl.Fn) void {
+ fn_decl.value = Decl.Fn.Val{ .Ok = Value.Fn{} };
+ }
+
pub fn link(self: *Module, out_file: ?[]const u8) !void {
warn("TODO link");
return error.Todo;
@@ -589,7 +606,7 @@ pub const Decl = struct {
// TODO https://github.com/ziglang/zig/issues/683 and then make this anonymous
pub const Val = union {
Unresolved: void,
- Ok: *Value.Fn,
+ Ok: Value.Fn,
};
pub fn externLibName(self: Fn, tree: *ast.Tree) ?[]const u8 {
diff --git a/src-self-hosted/test.zig b/src-self-hosted/test.zig
index ffad7f1b8d..4455352f95 100644
--- a/src-self-hosted/test.zig
+++ b/src-self-hosted/test.zig
@@ -12,14 +12,7 @@ test "compile errors" {
try ctx.init();
defer ctx.deinit();
- try ctx.testCompileError(
- \\export fn entry() void {}
- \\export fn entry() void {}
- , file1, 2, 8, "exported symbol collision: 'entry'");
-
- try ctx.testCompileError(
- \\fn() void {}
- , file1, 1, 1, "missing function name");
+ try @import("../test/stage2/compile_errors.zig").addCases(&ctx);
try ctx.run();
}
@@ -27,7 +20,7 @@ test "compile errors" {
const file1 = "1.zig";
const allocator = std.heap.c_allocator;
-const TestContext = struct {
+pub const TestContext = struct {
loop: std.event.Loop,
zig_lib_dir: []u8,
zig_cache_dir: []u8,
diff --git a/std/event.zig b/std/event.zig
index 516defebf8..f3913a432b 100644
--- a/std/event.zig
+++ b/std/event.zig
@@ -4,6 +4,7 @@ pub const Lock = @import("event/lock.zig").Lock;
pub const tcp = @import("event/tcp.zig");
pub const Channel = @import("event/channel.zig").Channel;
pub const Group = @import("event/group.zig").Group;
+pub const Future = @import("event/future.zig").Group;
test "import event tests" {
_ = @import("event/locked.zig");
@@ -12,4 +13,5 @@ test "import event tests" {
_ = @import("event/tcp.zig");
_ = @import("event/channel.zig");
_ = @import("event/group.zig");
+ _ = @import("event/future.zig");
}
diff --git a/std/event/future.zig b/std/event/future.zig
new file mode 100644
index 0000000000..8001f675a2
--- /dev/null
+++ b/std/event/future.zig
@@ -0,0 +1,87 @@
+const std = @import("../index.zig");
+const assert = std.debug.assert;
+const builtin = @import("builtin");
+const AtomicRmwOp = builtin.AtomicRmwOp;
+const AtomicOrder = builtin.AtomicOrder;
+const Lock = std.event.Lock;
+const Loop = std.event.Loop;
+
+/// This is a value that starts out unavailable, until a value is put().
+/// While it is unavailable, coroutines suspend when they try to get() it,
+/// and then are resumed when the value is put().
+/// At this point the value remains forever available, and another put() is not allowed.
+pub fn Future(comptime T: type) type {
+ return struct {
+ lock: Lock,
+ data: T,
+ available: u8, // TODO make this a bool
+
+ const Self = this;
+ const Queue = std.atomic.QueueMpsc(promise);
+
+ pub fn init(loop: *Loop) Self {
+ return Self{
+ .lock = Lock.initLocked(loop),
+ .available = 0,
+ .data = undefined,
+ };
+ }
+
+ /// Obtain the value. If it's not available, wait until it becomes
+ /// available.
+ /// Thread-safe.
+ pub async fn get(self: *Self) T {
+ if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 1) {
+ return self.data;
+ }
+ const held = await (async self.lock.acquire() catch unreachable);
+ defer held.release();
+
+ return self.data;
+ }
+
+ /// Make the data become available. May be called only once.
+ pub fn put(self: *Self, value: T) void {
+ self.data = value;
+ const prev = @atomicRmw(u8, &self.available, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ assert(prev == 0); // put() called twice
+ Lock.Held.release(Lock.Held{ .lock = &self.lock });
+ }
+ };
+}
+
+test "std.event.Future" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const allocator = &da.allocator;
+
+ var loop: Loop = undefined;
+ try loop.initMultiThreaded(allocator);
+ defer loop.deinit();
+
+ const handle = try async testFuture(&loop);
+ defer cancel handle;
+
+ loop.run();
+}
+
+async fn testFuture(loop: *Loop) void {
+ var future = Future(i32).init(loop);
+
+ const a = async waitOnFuture(&future) catch @panic("memory");
+ const b = async waitOnFuture(&future) catch @panic("memory");
+ const c = async resolveFuture(&future) catch @panic("memory");
+
+ const result = (await a) + (await b);
+ cancel c;
+ assert(result == 12);
+}
+
+async fn waitOnFuture(future: *Future(i32)) i32 {
+ return await (async future.get() catch @panic("memory"));
+}
+
+async fn resolveFuture(future: *Future(i32)) void {
+ future.put(6);
+}
diff --git a/std/event/lock.zig b/std/event/lock.zig
index 2a8d5ada77..cba3594b50 100644
--- a/std/event/lock.zig
+++ b/std/event/lock.zig
@@ -73,6 +73,15 @@ pub const Lock = struct {
};
}
+ pub fn initLocked(loop: *Loop) Lock {
+ return Lock{
+ .loop = loop,
+ .shared_bit = 1,
+ .queue = Queue.init(),
+ .queue_empty_bit = 1,
+ };
+ }
+
/// Must be called when not locked. Not thread safe.
/// All calls to acquire() and release() must complete before calling deinit().
pub fn deinit(self: *Lock) void {
@@ -81,7 +90,7 @@ pub const Lock = struct {
}
pub async fn acquire(self: *Lock) Held {
- s: suspend |handle| {
+ suspend |handle| {
// TODO explicitly put this memory in the coroutine frame #1194
var my_tick_node = Loop.NextTickNode{
.data = handle,
diff --git a/test/stage2/compile_errors.zig b/test/stage2/compile_errors.zig
new file mode 100644
index 0000000000..1dca908e69
--- /dev/null
+++ b/test/stage2/compile_errors.zig
@@ -0,0 +1,12 @@
+const TestContext = @import("../../src-self-hosted/test.zig").TestContext;
+
+pub fn addCases(ctx: *TestContext) !void {
+ try ctx.testCompileError(
+ \\export fn entry() void {}
+ \\export fn entry() void {}
+ , "1.zig", 2, 8, "exported symbol collision: 'entry'");
+
+ try ctx.testCompileError(
+ \\fn() void {}
+ , "1.zig", 1, 1, "missing function name");
+}
--
cgit v1.2.3
From 687bd92f9c3d9f521c8fe5884627ef1b00320364 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 12 Jul 2018 15:08:40 -0400
Subject: self-hosted: generate zig IR for simple function
no tests for this yet. I think the quickest path to testing will be
creating the .o files and linking with libc, executing, and then
comparing output.
---
src-self-hosted/decl.zig | 96 ++++++
src-self-hosted/ir.zig | 745 ++++++++++++++++++++++++++++++++++------
src-self-hosted/module.zig | 383 ++++++++++-----------
src-self-hosted/parsed_file.zig | 6 +
src-self-hosted/scope.zig | 230 ++++++++++++-
src-self-hosted/type.zig | 268 +++++++++++++++
src-self-hosted/value.zig | 125 +++++++
src-self-hosted/visib.zig | 4 +
std/event/future.zig | 2 +-
std/zig/ast.zig | 6 -
std/zig/parse.zig | 5 -
11 files changed, 1555 insertions(+), 315 deletions(-)
create mode 100644 src-self-hosted/decl.zig
create mode 100644 src-self-hosted/parsed_file.zig
create mode 100644 src-self-hosted/type.zig
create mode 100644 src-self-hosted/value.zig
create mode 100644 src-self-hosted/visib.zig
(limited to 'src-self-hosted/module.zig')
diff --git a/src-self-hosted/decl.zig b/src-self-hosted/decl.zig
new file mode 100644
index 0000000000..1a75a3249e
--- /dev/null
+++ b/src-self-hosted/decl.zig
@@ -0,0 +1,96 @@
+const std = @import("std");
+const Allocator = mem.Allocator;
+const mem = std.mem;
+const ast = std.zig.ast;
+const Visib = @import("visib.zig").Visib;
+const ParsedFile = @import("parsed_file.zig").ParsedFile;
+const event = std.event;
+const Value = @import("value.zig").Value;
+const Token = std.zig.Token;
+const errmsg = @import("errmsg.zig");
+const Scope = @import("scope.zig").Scope;
+const Module = @import("module.zig").Module;
+
+pub const Decl = struct {
+ id: Id,
+ name: []const u8,
+ visib: Visib,
+ resolution: event.Future(Module.BuildError!void),
+ resolution_in_progress: u8,
+ parsed_file: *ParsedFile,
+ parent_scope: *Scope,
+
+ pub const Table = std.HashMap([]const u8, *Decl, mem.hash_slice_u8, mem.eql_slice_u8);
+
+ pub fn isExported(base: *const Decl, tree: *ast.Tree) bool {
+ switch (base.id) {
+ Id.Fn => {
+ const fn_decl = @fieldParentPtr(Fn, "base", base);
+ return fn_decl.isExported(tree);
+ },
+ else => return false,
+ }
+ }
+
+ pub fn getSpan(base: *const Decl) errmsg.Span {
+ switch (base.id) {
+ Id.Fn => {
+ const fn_decl = @fieldParentPtr(Fn, "base", base);
+ const fn_proto = fn_decl.fn_proto;
+ const start = fn_proto.fn_token;
+ const end = fn_proto.name_token orelse start;
+ return errmsg.Span{
+ .first = start,
+ .last = end + 1,
+ };
+ },
+ else => @panic("TODO"),
+ }
+ }
+
+ pub const Id = enum {
+ Var,
+ Fn,
+ CompTime,
+ };
+
+ pub const Var = struct {
+ base: Decl,
+ };
+
+ pub const Fn = struct {
+ base: Decl,
+ value: Val,
+ fn_proto: *const ast.Node.FnProto,
+
+ // TODO https://github.com/ziglang/zig/issues/683 and then make this anonymous
+ pub const Val = union {
+ Unresolved: void,
+ Ok: *Value.Fn,
+ };
+
+ pub fn externLibName(self: Fn, tree: *ast.Tree) ?[]const u8 {
+ return if (self.fn_proto.extern_export_inline_token) |tok_index| x: {
+ const token = tree.tokens.at(tok_index);
+ break :x switch (token.id) {
+ Token.Id.Extern => tree.tokenSlicePtr(token),
+ else => null,
+ };
+ } else null;
+ }
+
+ pub fn isExported(self: Fn, tree: *ast.Tree) bool {
+ if (self.fn_proto.extern_export_inline_token) |tok_index| {
+ const token = tree.tokens.at(tok_index);
+ return token.id == Token.Id.Keyword_export;
+ } else {
+ return false;
+ }
+ }
+ };
+
+ pub const CompTime = struct {
+ base: Decl,
+ };
+};
+
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index 3334d9511b..f517dfe579 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -1,111 +1,656 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const Module = @import("module.zig").Module;
const Scope = @import("scope.zig").Scope;
+const ast = std.zig.ast;
+const Allocator = std.mem.Allocator;
+const Value = @import("value.zig").Value;
+const Type = Value.Type;
+const assert = std.debug.assert;
+const Token = std.zig.Token;
+const ParsedFile = @import("parsed_file.zig").ParsedFile;
+
+pub const LVal = enum {
+ None,
+ Ptr,
+};
+
+pub const Mut = enum {
+ Mut,
+ Const,
+};
+
+pub const Volatility = enum {
+ NonVolatile,
+ Volatile,
+};
+
+pub const IrVal = union(enum) {
+ Unknown,
+ Known: *Value,
+
+ pub fn dump(self: IrVal) void {
+ switch (self) {
+ IrVal.Unknown => std.debug.warn("Unknown"),
+ IrVal.Known => |value| {
+ std.debug.warn("Known(");
+ value.dump();
+ std.debug.warn(")");
+ },
+ }
+ }
+};
pub const Instruction = struct {
id: Id,
scope: *Scope,
+ debug_id: usize,
+ val: IrVal,
+
+ /// true if this instruction was generated by zig and not from user code
+ is_generated: bool,
+
+ pub fn cast(base: *Instruction, comptime T: type) ?*T {
+ if (base.id == comptime typeToId(T)) {
+ return @fieldParentPtr(T, "base", base);
+ }
+ return null;
+ }
+
+ pub fn typeToId(comptime T: type) Id {
+ comptime var i = 0;
+ inline while (i < @memberCount(Id)) : (i += 1) {
+ if (T == @field(Instruction, @memberName(Id, i))) {
+ return @field(Id, @memberName(Id, i));
+ }
+ }
+ unreachable;
+ }
+
+ pub fn dump(base: *const Instruction) void {
+ comptime var i = 0;
+ inline while (i < @memberCount(Id)) : (i += 1) {
+ if (base.id == @field(Id, @memberName(Id, i))) {
+ const T = @field(Instruction, @memberName(Id, i));
+ std.debug.warn("#{} = {}(", base.debug_id, @tagName(base.id));
+ @fieldParentPtr(T, "base", base).dump();
+ std.debug.warn(")");
+ return;
+ }
+ }
+ unreachable;
+ }
+
+ pub fn setGenerated(base: *Instruction) void {
+ base.is_generated = true;
+ }
+
+ pub fn isNoReturn(base: *const Instruction) bool {
+ switch (base.val) {
+ IrVal.Unknown => return false,
+ IrVal.Known => |x| return x.typeof.id == Type.Id.NoReturn,
+ }
+ }
pub const Id = enum {
- Br,
- CondBr,
- SwitchBr,
- SwitchVar,
- SwitchTarget,
- Phi,
- UnOp,
- BinOp,
- DeclVar,
- LoadPtr,
- StorePtr,
- FieldPtr,
- StructFieldPtr,
- UnionFieldPtr,
- ElemPtr,
- VarPtr,
- Call,
- Const,
Return,
- Cast,
- ContainerInitList,
- ContainerInitFields,
- StructInit,
- UnionInit,
- Unreachable,
- TypeOf,
- ToPtrType,
- PtrTypeChild,
- SetRuntimeSafety,
- SetFloatMode,
- ArrayType,
- SliceType,
- Asm,
- SizeOf,
- TestNonNull,
- UnwrapMaybe,
- MaybeWrap,
- UnionTag,
- Clz,
- Ctz,
- Import,
- CImport,
- CInclude,
- CDefine,
- CUndef,
- ArrayLen,
+ Const,
Ref,
- MinValue,
- MaxValue,
- CompileErr,
- CompileLog,
- ErrName,
- EmbedFile,
- Cmpxchg,
- Fence,
- Truncate,
- IntType,
- BoolNot,
- Memset,
- Memcpy,
- Slice,
- MemberCount,
- MemberType,
- MemberName,
- Breakpoint,
- ReturnAddress,
- FrameAddress,
- AlignOf,
- OverflowOp,
- TestErr,
- UnwrapErrCode,
- UnwrapErrPayload,
- ErrWrapCode,
- ErrWrapPayload,
- FnProto,
- TestComptime,
- PtrCast,
- BitCast,
- WidenOrShorten,
- IntToPtr,
- PtrToInt,
- IntToEnum,
- IntToErr,
- ErrToInt,
- CheckSwitchProngs,
- CheckStatementIsVoid,
- TypeName,
- CanImplicitCast,
- DeclRef,
- Panic,
- TagName,
- TagType,
- FieldParentPtr,
- OffsetOf,
- TypeId,
- SetEvalBranchQuota,
- PtrTypeOf,
- AlignCast,
- OpaqueType,
- SetAlignStack,
- ArgType,
- Export,
+ DeclVar,
+ CheckVoidStmt,
+ Phi,
+ Br,
+ };
+
+ pub const Const = struct {
+ base: Instruction,
+
+ pub fn buildBool(irb: *Builder, scope: *Scope, val: bool) !*Instruction {
+ const inst = try irb.arena().create(Const{
+ .base = Instruction{
+ .id = Instruction.Id.Const,
+ .is_generated = false,
+ .scope = scope,
+ .debug_id = irb.next_debug_id,
+ .val = IrVal{ .Known = &Value.Bool.get(irb.module, val).base },
+ },
+ });
+ irb.next_debug_id += 1;
+ try irb.current_basic_block.instruction_list.append(&inst.base);
+ return &inst.base;
+ }
+
+ pub fn buildVoid(irb: *Builder, scope: *Scope, is_generated: bool) !*Instruction {
+ const inst = try irb.arena().create(Const{
+ .base = Instruction{
+ .id = Instruction.Id.Const,
+ .is_generated = is_generated,
+ .scope = scope,
+ .debug_id = irb.next_debug_id,
+ .val = IrVal{ .Known = &Value.Void.get(irb.module).base },
+ },
+ });
+ irb.next_debug_id += 1;
+ try irb.current_basic_block.instruction_list.append(&inst.base);
+ return &inst.base;
+ }
+
+ pub fn dump(inst: *const Const) void {
+ inst.base.val.Known.dump();
+ }
+ };
+
+ pub const Return = struct {
+ base: Instruction,
+ return_value: *Instruction,
+
+ pub fn build(irb: *Builder, scope: *Scope, return_value: *Instruction) !*Instruction {
+ const inst = try irb.arena().create(Return{
+ .base = Instruction{
+ .id = Instruction.Id.Return,
+ .is_generated = false,
+ .scope = scope,
+ .debug_id = irb.next_debug_id,
+ .val = IrVal{ .Known = &Value.Void.get(irb.module).base },
+ },
+ .return_value = return_value,
+ });
+ irb.next_debug_id += 1;
+ try irb.current_basic_block.instruction_list.append(&inst.base);
+ return &inst.base;
+ }
+
+ pub fn dump(inst: *const Return) void {
+ std.debug.warn("#{}", inst.return_value.debug_id);
+ }
+ };
+
+ pub const Ref = struct {
+ base: Instruction,
+ target: *Instruction,
+ mut: Mut,
+ volatility: Volatility,
+
+ pub fn build(
+ irb: *Builder,
+ scope: *Scope,
+ target: *Instruction,
+ mut: Mut,
+ volatility: Volatility,
+ ) !*Instruction {
+ const inst = try irb.arena().create(Ref{
+ .base = Instruction{
+ .id = Instruction.Id.Ref,
+ .is_generated = false,
+ .scope = scope,
+ .debug_id = irb.next_debug_id,
+ .val = IrVal.Unknown,
+ },
+ .target = target,
+ .mut = mut,
+ .volatility = volatility,
+ });
+ irb.next_debug_id += 1;
+ try irb.current_basic_block.instruction_list.append(&inst.base);
+ return &inst.base;
+ }
+
+ pub fn dump(inst: *const Ref) void {}
+ };
+
+ pub const DeclVar = struct {
+ base: Instruction,
+ variable: *Variable,
+
+ pub fn dump(inst: *const DeclVar) void {}
+ };
+
+ pub const CheckVoidStmt = struct {
+ base: Instruction,
+ target: *Instruction,
+
+ pub fn build(
+ irb: *Builder,
+ scope: *Scope,
+ target: *Instruction,
+ ) !*Instruction {
+ const inst = try irb.arena().create(CheckVoidStmt{
+ .base = Instruction{
+ .id = Instruction.Id.CheckVoidStmt,
+ .is_generated = true,
+ .scope = scope,
+ .debug_id = irb.next_debug_id,
+ .val = IrVal{ .Known = &Value.Void.get(irb.module).base },
+ },
+ .target = target,
+ });
+ irb.next_debug_id += 1;
+ try irb.current_basic_block.instruction_list.append(&inst.base);
+ return &inst.base;
+ }
+
+ pub fn dump(inst: *const CheckVoidStmt) void {}
+ };
+
+ pub const Phi = struct {
+ base: Instruction,
+ incoming_blocks: []*BasicBlock,
+ incoming_values: []*Instruction,
+
+ pub fn build(
+ irb: *Builder,
+ scope: *Scope,
+ incoming_blocks: []*BasicBlock,
+ incoming_values: []*Instruction,
+ ) !*Instruction {
+ const inst = try irb.arena().create(Phi{
+ .base = Instruction{
+ .id = Instruction.Id.Phi,
+ .is_generated = false,
+ .scope = scope,
+ .debug_id = irb.next_debug_id,
+ .val = IrVal.Unknown,
+ },
+ .incoming_blocks = incoming_blocks,
+ .incoming_values = incoming_values,
+ });
+ irb.next_debug_id += 1;
+ try irb.current_basic_block.instruction_list.append(&inst.base);
+ return &inst.base;
+ }
+
+ pub fn dump(inst: *const Phi) void {}
+ };
+
+ pub const Br = struct {
+ base: Instruction,
+ dest_block: *BasicBlock,
+ is_comptime: *Instruction,
+
+ pub fn build(
+ irb: *Builder,
+ scope: *Scope,
+ dest_block: *BasicBlock,
+ is_comptime: *Instruction,
+ ) !*Instruction {
+ const inst = try irb.arena().create(Br{
+ .base = Instruction{
+ .id = Instruction.Id.Br,
+ .is_generated = false,
+ .scope = scope,
+ .debug_id = irb.next_debug_id,
+ .val = IrVal{ .Known = &Value.NoReturn.get(irb.module).base },
+ },
+ .dest_block = dest_block,
+ .is_comptime = is_comptime,
+ });
+ irb.next_debug_id += 1;
+ try irb.current_basic_block.instruction_list.append(&inst.base);
+ return &inst.base;
+ }
+
+ pub fn dump(inst: *const Br) void {}
};
};
+
+pub const Variable = struct {
+ child_scope: *Scope,
+};
+
+pub const BasicBlock = struct {
+ ref_count: usize,
+ name_hint: []const u8,
+ debug_id: usize,
+ scope: *Scope,
+ instruction_list: std.ArrayList(*Instruction),
+
+ pub fn ref(self: *BasicBlock) void {
+ self.ref_count += 1;
+ }
+};
+
+/// Stuff that survives longer than Builder
+pub const Code = struct {
+ basic_block_list: std.ArrayList(*BasicBlock),
+ arena: std.heap.ArenaAllocator,
+
+ /// allocator is module.a()
+ pub fn destroy(self: *Code, allocator: *Allocator) void {
+ self.arena.deinit();
+ allocator.destroy(self);
+ }
+
+ pub fn dump(self: *Code) void {
+ var bb_i: usize = 0;
+ for (self.basic_block_list.toSliceConst()) |bb| {
+ std.debug.warn("{}_{}:\n", bb.name_hint, bb.debug_id);
+ for (bb.instruction_list.toSliceConst()) |instr| {
+ std.debug.warn(" ");
+ instr.dump();
+ std.debug.warn("\n");
+ }
+ }
+ }
+};
+
+pub const Builder = struct {
+ module: *Module,
+ code: *Code,
+ current_basic_block: *BasicBlock,
+ next_debug_id: usize,
+ parsed_file: *ParsedFile,
+ is_comptime: bool,
+
+ pub const Error = error{
+ OutOfMemory,
+ Unimplemented,
+ };
+
+ pub fn init(module: *Module, parsed_file: *ParsedFile) !Builder {
+ const code = try module.a().create(Code{
+ .basic_block_list = undefined,
+ .arena = std.heap.ArenaAllocator.init(module.a()),
+ });
+ code.basic_block_list = std.ArrayList(*BasicBlock).init(&code.arena.allocator);
+ errdefer code.destroy(module.a());
+
+ return Builder{
+ .module = module,
+ .parsed_file = parsed_file,
+ .current_basic_block = undefined,
+ .code = code,
+ .next_debug_id = 0,
+ .is_comptime = false,
+ };
+ }
+
+ pub fn abort(self: *Builder) void {
+ self.code.destroy(self.module.a());
+ }
+
+ /// Call code.destroy() when done
+ pub fn finish(self: *Builder) *Code {
+ return self.code;
+ }
+
+ /// No need to clean up resources thanks to the arena allocator.
+ pub fn createBasicBlock(self: *Builder, scope: *Scope, name_hint: []const u8) !*BasicBlock {
+ const basic_block = try self.arena().create(BasicBlock{
+ .ref_count = 0,
+ .name_hint = name_hint,
+ .debug_id = self.next_debug_id,
+ .scope = scope,
+ .instruction_list = std.ArrayList(*Instruction).init(self.arena()),
+ });
+ self.next_debug_id += 1;
+ return basic_block;
+ }
+
+ pub fn setCursorAtEndAndAppendBlock(self: *Builder, basic_block: *BasicBlock) !void {
+ try self.code.basic_block_list.append(basic_block);
+ self.setCursorAtEnd(basic_block);
+ }
+
+ pub fn setCursorAtEnd(self: *Builder, basic_block: *BasicBlock) void {
+ self.current_basic_block = basic_block;
+ }
+
+ pub fn genNode(irb: *Builder, node: *ast.Node, scope: *Scope, lval: LVal) Error!*Instruction {
+ switch (node.id) {
+ ast.Node.Id.Root => unreachable,
+ ast.Node.Id.Use => unreachable,
+ ast.Node.Id.TestDecl => unreachable,
+ ast.Node.Id.VarDecl => @panic("TODO"),
+ ast.Node.Id.Defer => @panic("TODO"),
+ ast.Node.Id.InfixOp => @panic("TODO"),
+ ast.Node.Id.PrefixOp => @panic("TODO"),
+ ast.Node.Id.SuffixOp => @panic("TODO"),
+ ast.Node.Id.Switch => @panic("TODO"),
+ ast.Node.Id.While => @panic("TODO"),
+ ast.Node.Id.For => @panic("TODO"),
+ ast.Node.Id.If => @panic("TODO"),
+ ast.Node.Id.ControlFlowExpression => return error.Unimplemented,
+ ast.Node.Id.Suspend => @panic("TODO"),
+ ast.Node.Id.VarType => @panic("TODO"),
+ ast.Node.Id.ErrorType => @panic("TODO"),
+ ast.Node.Id.FnProto => @panic("TODO"),
+ ast.Node.Id.PromiseType => @panic("TODO"),
+ ast.Node.Id.IntegerLiteral => @panic("TODO"),
+ ast.Node.Id.FloatLiteral => @panic("TODO"),
+ ast.Node.Id.StringLiteral => @panic("TODO"),
+ ast.Node.Id.MultilineStringLiteral => @panic("TODO"),
+ ast.Node.Id.CharLiteral => @panic("TODO"),
+ ast.Node.Id.BoolLiteral => @panic("TODO"),
+ ast.Node.Id.NullLiteral => @panic("TODO"),
+ ast.Node.Id.UndefinedLiteral => @panic("TODO"),
+ ast.Node.Id.ThisLiteral => @panic("TODO"),
+ ast.Node.Id.Unreachable => @panic("TODO"),
+ ast.Node.Id.Identifier => @panic("TODO"),
+ ast.Node.Id.GroupedExpression => {
+ const grouped_expr = @fieldParentPtr(ast.Node.GroupedExpression, "base", node);
+ return irb.genNode(grouped_expr.expr, scope, lval);
+ },
+ ast.Node.Id.BuiltinCall => @panic("TODO"),
+ ast.Node.Id.ErrorSetDecl => @panic("TODO"),
+ ast.Node.Id.ContainerDecl => @panic("TODO"),
+ ast.Node.Id.Asm => @panic("TODO"),
+ ast.Node.Id.Comptime => @panic("TODO"),
+ ast.Node.Id.Block => {
+ const block = @fieldParentPtr(ast.Node.Block, "base", node);
+ return irb.lvalWrap(scope, try irb.genBlock(block, scope), lval);
+ },
+ ast.Node.Id.DocComment => @panic("TODO"),
+ ast.Node.Id.SwitchCase => @panic("TODO"),
+ ast.Node.Id.SwitchElse => @panic("TODO"),
+ ast.Node.Id.Else => @panic("TODO"),
+ ast.Node.Id.Payload => @panic("TODO"),
+ ast.Node.Id.PointerPayload => @panic("TODO"),
+ ast.Node.Id.PointerIndexPayload => @panic("TODO"),
+ ast.Node.Id.StructField => @panic("TODO"),
+ ast.Node.Id.UnionTag => @panic("TODO"),
+ ast.Node.Id.EnumTag => @panic("TODO"),
+ ast.Node.Id.ErrorTag => @panic("TODO"),
+ ast.Node.Id.AsmInput => @panic("TODO"),
+ ast.Node.Id.AsmOutput => @panic("TODO"),
+ ast.Node.Id.AsyncAttribute => @panic("TODO"),
+ ast.Node.Id.ParamDecl => @panic("TODO"),
+ ast.Node.Id.FieldInitializer => @panic("TODO"),
+ }
+ }
+
+ fn isCompTime(irb: *Builder, target_scope: *Scope) bool {
+ if (irb.is_comptime)
+ return true;
+
+ var scope = target_scope;
+ while (true) {
+ switch (scope.id) {
+ Scope.Id.CompTime => return true,
+ Scope.Id.FnDef => return false,
+ Scope.Id.Decls => unreachable,
+ Scope.Id.Block,
+ Scope.Id.Defer,
+ Scope.Id.DeferExpr,
+ => scope = scope.parent orelse return false,
+ }
+ }
+ }
+
+ pub fn genBlock(irb: *Builder, block: *ast.Node.Block, parent_scope: *Scope) !*Instruction {
+ const block_scope = try Scope.Block.create(irb.module, parent_scope);
+
+ const outer_block_scope = &block_scope.base;
+ var child_scope = outer_block_scope;
+
+ if (parent_scope.findFnDef()) |fndef_scope| {
+ if (fndef_scope.fn_val.child_scope == parent_scope) {
+ fndef_scope.fn_val.block_scope = block_scope;
+ }
+ }
+
+ if (block.statements.len == 0) {
+ // {}
+ return Instruction.Const.buildVoid(irb, child_scope, false);
+ }
+
+ if (block.label) |label| {
+ block_scope.incoming_values = std.ArrayList(*Instruction).init(irb.arena());
+ block_scope.incoming_blocks = std.ArrayList(*BasicBlock).init(irb.arena());
+ block_scope.end_block = try irb.createBasicBlock(parent_scope, "BlockEnd");
+ block_scope.is_comptime = try Instruction.Const.buildBool(irb, parent_scope, irb.isCompTime(parent_scope));
+ }
+
+ var is_continuation_unreachable = false;
+ var noreturn_return_value: ?*Instruction = null;
+
+ var stmt_it = block.statements.iterator(0);
+ while (stmt_it.next()) |statement_node_ptr| {
+ const statement_node = statement_node_ptr.*;
+
+ if (statement_node.cast(ast.Node.Defer)) |defer_node| {
+ // defer starts a new scope
+ const defer_token = irb.parsed_file.tree.tokens.at(defer_node.defer_token);
+ const kind = switch (defer_token.id) {
+ Token.Id.Keyword_defer => Scope.Defer.Kind.ScopeExit,
+ Token.Id.Keyword_errdefer => Scope.Defer.Kind.ErrorExit,
+ else => unreachable,
+ };
+ const defer_expr_scope = try Scope.DeferExpr.create(irb.module, parent_scope, defer_node.expr);
+ const defer_child_scope = try Scope.Defer.create(irb.module, parent_scope, kind, defer_expr_scope);
+ child_scope = &defer_child_scope.base;
+ continue;
+ }
+ const statement_value = try irb.genNode(statement_node, child_scope, LVal.None);
+
+ is_continuation_unreachable = statement_value.isNoReturn();
+ if (is_continuation_unreachable) {
+ // keep the last noreturn statement value around in case we need to return it
+ noreturn_return_value = statement_value;
+ }
+
+ if (statement_value.cast(Instruction.DeclVar)) |decl_var| {
+ // variable declarations start a new scope
+ child_scope = decl_var.variable.child_scope;
+ } else if (!is_continuation_unreachable) {
+ // this statement's value must be void
+ _ = Instruction.CheckVoidStmt.build(irb, child_scope, statement_value);
+ }
+ }
+
+ if (is_continuation_unreachable) {
+ assert(noreturn_return_value != null);
+ if (block.label == null or block_scope.incoming_blocks.len == 0) {
+ return noreturn_return_value.?;
+ }
+
+ try irb.setCursorAtEndAndAppendBlock(block_scope.end_block);
+ return Instruction.Phi.build(
+ irb,
+ parent_scope,
+ block_scope.incoming_blocks.toOwnedSlice(),
+ block_scope.incoming_values.toOwnedSlice(),
+ );
+ }
+
+ if (block.label) |label| {
+ try block_scope.incoming_blocks.append(irb.current_basic_block);
+ try block_scope.incoming_values.append(
+ try Instruction.Const.buildVoid(irb, parent_scope, true),
+ );
+ _ = try irb.genDefersForBlock(child_scope, outer_block_scope, Scope.Defer.Kind.ScopeExit);
+ (try Instruction.Br.build(
+ irb,
+ parent_scope,
+ block_scope.end_block,
+ block_scope.is_comptime,
+ )).setGenerated();
+ try irb.setCursorAtEndAndAppendBlock(block_scope.end_block);
+ return Instruction.Phi.build(
+ irb,
+ parent_scope,
+ block_scope.incoming_blocks.toOwnedSlice(),
+ block_scope.incoming_values.toOwnedSlice(),
+ );
+ }
+
+ _ = try irb.genDefersForBlock(child_scope, outer_block_scope, Scope.Defer.Kind.ScopeExit);
+ const result = try Instruction.Const.buildVoid(irb, child_scope, false);
+ result.setGenerated();
+ return result;
+ }
+
+ fn genDefersForBlock(
+ irb: *Builder,
+ inner_scope: *Scope,
+ outer_scope: *Scope,
+ gen_kind: Scope.Defer.Kind,
+ ) !bool {
+ var scope = inner_scope;
+ var is_noreturn = false;
+ while (true) {
+ switch (scope.id) {
+ Scope.Id.Defer => {
+ const defer_scope = @fieldParentPtr(Scope.Defer, "base", scope);
+ const generate = switch (defer_scope.kind) {
+ Scope.Defer.Kind.ScopeExit => true,
+ Scope.Defer.Kind.ErrorExit => gen_kind == Scope.Defer.Kind.ErrorExit,
+ };
+ if (generate) {
+ const defer_expr_scope = defer_scope.defer_expr_scope;
+ const instruction = try irb.genNode(
+ defer_expr_scope.expr_node,
+ &defer_expr_scope.base,
+ LVal.None,
+ );
+ if (instruction.isNoReturn()) {
+ is_noreturn = true;
+ } else {
+ _ = Instruction.CheckVoidStmt.build(irb, &defer_expr_scope.base, instruction);
+ }
+ }
+ },
+ Scope.Id.FnDef,
+ Scope.Id.Decls,
+ => return is_noreturn,
+
+ Scope.Id.CompTime,
+ Scope.Id.Block,
+ => scope = scope.parent orelse return is_noreturn,
+
+ Scope.Id.DeferExpr => unreachable,
+ }
+ }
+ }
+
+ pub fn lvalWrap(irb: *Builder, scope: *Scope, instruction: *Instruction, lval: LVal) !*Instruction {
+ switch (lval) {
+ LVal.None => return instruction,
+ LVal.Ptr => {
+ // We needed a pointer to a value, but we got a value. So we create
+ // an instruction which just makes a const pointer of it.
+ return Instruction.Ref.build(irb, scope, instruction, Mut.Const, Volatility.NonVolatile);
+ },
+ }
+ }
+
+ fn arena(self: *Builder) *Allocator {
+ return &self.code.arena.allocator;
+ }
+};
+
+pub async fn gen(module: *Module, body_node: *ast.Node, scope: *Scope, parsed_file: *ParsedFile) !*Code {
+ var irb = try Builder.init(module, parsed_file);
+ errdefer irb.abort();
+
+ const entry_block = try irb.createBasicBlock(scope, "Entry");
+ entry_block.ref(); // Entry block gets a reference because we enter it to begin.
+ try irb.setCursorAtEndAndAppendBlock(entry_block);
+
+ const result = try irb.genNode(body_node, scope, LVal.None);
+ if (!result.isNoReturn()) {
+ const void_inst = try Instruction.Const.buildVoid(&irb, scope, false);
+ (try Instruction.Return.build(&irb, scope, void_inst)).setGenerated();
+ }
+
+ return irb.finish();
+}
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index 5cde12f65c..e74c84e02c 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -15,12 +15,21 @@ const errmsg = @import("errmsg.zig");
const ast = std.zig.ast;
const event = std.event;
const assert = std.debug.assert;
+const AtomicRmwOp = builtin.AtomicRmwOp;
+const AtomicOrder = builtin.AtomicOrder;
+const Scope = @import("scope.zig").Scope;
+const Decl = @import("decl.zig").Decl;
+const ir = @import("ir.zig");
+const Visib = @import("visib.zig").Visib;
+const ParsedFile = @import("parsed_file.zig").ParsedFile;
+const Value = @import("value.zig").Value;
+const Type = Value.Type;
pub const Module = struct {
loop: *event.Loop,
name: Buffer,
root_src_path: ?[]const u8,
- module: llvm.ModuleRef,
+ llvm_module: llvm.ModuleRef,
context: llvm.ContextRef,
builder: llvm.BuilderRef,
target: Target,
@@ -91,6 +100,16 @@ pub const Module = struct {
compile_errors: event.Locked(CompileErrList),
+ meta_type: *Type.MetaType,
+ void_type: *Type.Void,
+ bool_type: *Type.Bool,
+ noreturn_type: *Type.NoReturn,
+
+ void_value: *Value.Void,
+ true_value: *Value.Bool,
+ false_value: *Value.Bool,
+ noreturn_value: *Value.NoReturn,
+
const CompileErrList = std.ArrayList(*errmsg.Msg);
// TODO handle some of these earlier and report them in a way other than error codes
@@ -129,6 +148,7 @@ pub const Module = struct {
Overflow,
NotSupported,
BufferTooSmall,
+ Unimplemented,
};
pub const Event = union(enum) {
@@ -180,8 +200,8 @@ pub const Module = struct {
const context = c.LLVMContextCreate() orelse return error.OutOfMemory;
errdefer c.LLVMContextDispose(context);
- const module = c.LLVMModuleCreateWithNameInContext(name_buffer.ptr(), context) orelse return error.OutOfMemory;
- errdefer c.LLVMDisposeModule(module);
+ const llvm_module = c.LLVMModuleCreateWithNameInContext(name_buffer.ptr(), context) orelse return error.OutOfMemory;
+ errdefer c.LLVMDisposeModule(llvm_module);
const builder = c.LLVMCreateBuilderInContext(context) orelse return error.OutOfMemory;
errdefer c.LLVMDisposeBuilder(builder);
@@ -189,12 +209,12 @@ pub const Module = struct {
const events = try event.Channel(Event).create(loop, 0);
errdefer events.destroy();
- return loop.allocator.create(Module{
+ const module = try loop.allocator.create(Module{
.loop = loop,
.events = events,
.name = name_buffer,
.root_src_path = root_src_path,
- .module = module,
+ .llvm_module = llvm_module,
.context = context,
.builder = builder,
.target = target.*,
@@ -248,7 +268,109 @@ pub const Module = struct {
.exported_symbol_names = event.Locked(Decl.Table).init(loop, Decl.Table.init(loop.allocator)),
.build_group = event.Group(BuildError!void).init(loop),
.compile_errors = event.Locked(CompileErrList).init(loop, CompileErrList.init(loop.allocator)),
+
+ .meta_type = undefined,
+ .void_type = undefined,
+ .void_value = undefined,
+ .bool_type = undefined,
+ .true_value = undefined,
+ .false_value = undefined,
+ .noreturn_type = undefined,
+ .noreturn_value = undefined,
+ });
+ try module.initTypes();
+ return module;
+ }
+
+ fn initTypes(module: *Module) !void {
+ module.meta_type = try module.a().create(Type.MetaType{
+ .base = Type{
+ .base = Value{
+ .id = Value.Id.Type,
+ .typeof = undefined,
+ .ref_count = 3, // 3 because it references itself twice
+ },
+ .id = builtin.TypeId.Type,
+ },
+ .value = undefined,
+ });
+ module.meta_type.value = &module.meta_type.base;
+ module.meta_type.base.base.typeof = &module.meta_type.base;
+ errdefer module.a().destroy(module.meta_type);
+
+ module.void_type = try module.a().create(Type.Void{
+ .base = Type{
+ .base = Value{
+ .id = Value.Id.Type,
+ .typeof = &Type.MetaType.get(module).base,
+ .ref_count = 1,
+ },
+ .id = builtin.TypeId.Void,
+ },
+ });
+ errdefer module.a().destroy(module.void_type);
+
+ module.noreturn_type = try module.a().create(Type.NoReturn{
+ .base = Type{
+ .base = Value{
+ .id = Value.Id.Type,
+ .typeof = &Type.MetaType.get(module).base,
+ .ref_count = 1,
+ },
+ .id = builtin.TypeId.NoReturn,
+ },
+ });
+ errdefer module.a().destroy(module.noreturn_type);
+
+ module.bool_type = try module.a().create(Type.Bool{
+ .base = Type{
+ .base = Value{
+ .id = Value.Id.Type,
+ .typeof = &Type.MetaType.get(module).base,
+ .ref_count = 1,
+ },
+ .id = builtin.TypeId.Bool,
+ },
+ });
+ errdefer module.a().destroy(module.bool_type);
+
+ module.void_value = try module.a().create(Value.Void{
+ .base = Value{
+ .id = Value.Id.Void,
+ .typeof = &Type.Void.get(module).base,
+ .ref_count = 1,
+ },
+ });
+ errdefer module.a().destroy(module.void_value);
+
+ module.true_value = try module.a().create(Value.Bool{
+ .base = Value{
+ .id = Value.Id.Bool,
+ .typeof = &Type.Bool.get(module).base,
+ .ref_count = 1,
+ },
+ .x = true,
+ });
+ errdefer module.a().destroy(module.true_value);
+
+ module.false_value = try module.a().create(Value.Bool{
+ .base = Value{
+ .id = Value.Id.Bool,
+ .typeof = &Type.Bool.get(module).base,
+ .ref_count = 1,
+ },
+ .x = false,
});
+ errdefer module.a().destroy(module.false_value);
+
+ module.noreturn_value = try module.a().create(Value.NoReturn{
+ .base = Value{
+ .id = Value.Id.NoReturn,
+ .typeof = &Type.NoReturn.get(module).base,
+ .ref_count = 1,
+ },
+ });
+ errdefer module.a().destroy(module.noreturn_value);
}
fn dump(self: *Module) void {
@@ -256,9 +378,17 @@ pub const Module = struct {
}
pub fn destroy(self: *Module) void {
+ self.noreturn_value.base.deref(self);
+ self.void_value.base.deref(self);
+ self.false_value.base.deref(self);
+ self.true_value.base.deref(self);
+ self.noreturn_type.base.base.deref(self);
+ self.void_type.base.base.deref(self);
+ self.meta_type.base.base.deref(self);
+
self.events.destroy();
c.LLVMDisposeBuilder(self.builder);
- c.LLVMDisposeModule(self.module);
+ c.LLVMDisposeModule(self.llvm_module);
c.LLVMContextDispose(self.context);
self.name.deinit();
@@ -331,8 +461,8 @@ pub const Module = struct {
const tree = &parsed_file.tree;
// create empty struct for it
- const decls = try Scope.Decls.create(self.a(), null);
- errdefer decls.destroy();
+ const decls = try Scope.Decls.create(self, null);
+ defer decls.base.deref(self);
var decl_group = event.Group(BuildError!void).init(self.loop);
errdefer decl_group.cancelAll();
@@ -359,14 +489,17 @@ pub const Module = struct {
.id = Decl.Id.Fn,
.name = name,
.visib = parseVisibToken(tree, fn_proto.visib_token),
- .resolution = Decl.Resolution.Unresolved,
+ .resolution = event.Future(BuildError!void).init(self.loop),
+ .resolution_in_progress = 0,
+ .parsed_file = parsed_file,
+ .parent_scope = &decls.base,
},
.value = Decl.Fn.Val{ .Unresolved = {} },
.fn_proto = fn_proto,
});
errdefer self.a().destroy(fn_decl);
- try decl_group.call(addTopLevelDecl, self, parsed_file, &fn_decl.base);
+ try decl_group.call(addTopLevelDecl, self, &fn_decl.base);
},
ast.Node.Id.TestDecl => @panic("TODO"),
else => unreachable,
@@ -376,12 +509,12 @@ pub const Module = struct {
try await (async self.build_group.wait() catch unreachable);
}
- async fn addTopLevelDecl(self: *Module, parsed_file: *ParsedFile, decl: *Decl) !void {
- const is_export = decl.isExported(&parsed_file.tree);
+ async fn addTopLevelDecl(self: *Module, decl: *Decl) !void {
+ const is_export = decl.isExported(&decl.parsed_file.tree);
if (is_export) {
- try self.build_group.call(verifyUniqueSymbol, self, parsed_file, decl);
- try self.build_group.call(generateDecl, self, parsed_file, decl);
+ try self.build_group.call(verifyUniqueSymbol, self, decl);
+ try self.build_group.call(resolveDecl, self, decl);
}
}
@@ -416,36 +549,21 @@ pub const Module = struct {
try compile_errors.value.append(msg);
}
- async fn verifyUniqueSymbol(self: *Module, parsed_file: *ParsedFile, decl: *Decl) !void {
+ async fn verifyUniqueSymbol(self: *Module, decl: *Decl) !void {
const exported_symbol_names = await (async self.exported_symbol_names.acquire() catch unreachable);
defer exported_symbol_names.release();
if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| {
try self.addCompileError(
- parsed_file,
+ decl.parsed_file,
decl.getSpan(),
"exported symbol collision: '{}'",
decl.name,
);
+ // TODO add error note showing location of other symbol
}
}
- /// This declaration has been blessed as going into the final code generation.
- async fn generateDecl(self: *Module, parsed_file: *ParsedFile, decl: *Decl) void {
- switch (decl.id) {
- Decl.Id.Var => @panic("TODO"),
- Decl.Id.Fn => {
- const fn_decl = @fieldParentPtr(Decl.Fn, "base", decl);
- return await (async self.generateDeclFn(parsed_file, fn_decl) catch unreachable);
- },
- Decl.Id.CompTime => @panic("TODO"),
- }
- }
-
- async fn generateDeclFn(self: *Module, parsed_file: *ParsedFile, fn_decl: *Decl.Fn) void {
- fn_decl.value = Decl.Fn.Val{ .Ok = Value.Fn{} };
- }
-
pub fn link(self: *Module, out_file: ?[]const u8) !void {
warn("TODO link");
return error.Todo;
@@ -501,177 +619,48 @@ fn parseVisibToken(tree: *ast.Tree, optional_token_index: ?ast.TokenIndex) Visib
}
}
-pub const Scope = struct {
- id: Id,
- parent: ?*Scope,
-
- pub const Id = enum {
- Decls,
- Block,
- };
-
- pub const Decls = struct {
- base: Scope,
- table: Decl.Table,
-
- pub fn create(a: *Allocator, parent: ?*Scope) !*Decls {
- const self = try a.create(Decls{
- .base = Scope{
- .id = Id.Decls,
- .parent = parent,
- },
- .table = undefined,
- });
- errdefer a.destroy(self);
-
- self.table = Decl.Table.init(a);
- errdefer self.table.deinit();
-
- return self;
- }
-
- pub fn destroy(self: *Decls) void {
- self.table.deinit();
- self.table.allocator.destroy(self);
- self.* = undefined;
- }
- };
-
- pub const Block = struct {
- base: Scope,
- };
-};
-
-pub const Visib = enum {
- Private,
- Pub,
-};
-
-pub const Decl = struct {
- id: Id,
- name: []const u8,
- visib: Visib,
- resolution: Resolution,
-
- pub const Table = std.HashMap([]const u8, *Decl, mem.hash_slice_u8, mem.eql_slice_u8);
-
- pub fn isExported(base: *const Decl, tree: *ast.Tree) bool {
- switch (base.id) {
- Id.Fn => {
- const fn_decl = @fieldParentPtr(Fn, "base", base);
- return fn_decl.isExported(tree);
- },
- else => return false,
- }
+/// This declaration has been blessed as going into the final code generation.
+pub async fn resolveDecl(module: *Module, decl: *Decl) !void {
+ if (@atomicRmw(u8, &decl.resolution_in_progress, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) == 0) {
+ decl.resolution.data = await (async generateDecl(module, decl) catch unreachable);
+ decl.resolution.resolve();
+ } else {
+ return (await (async decl.resolution.get() catch unreachable)).*;
}
+}
- pub fn getSpan(base: *const Decl) errmsg.Span {
- switch (base.id) {
- Id.Fn => {
- const fn_decl = @fieldParentPtr(Fn, "base", base);
- const fn_proto = fn_decl.fn_proto;
- const start = fn_proto.fn_token;
- const end = fn_proto.name_token orelse start;
- return errmsg.Span{
- .first = start,
- .last = end + 1,
- };
- },
- else => @panic("TODO"),
- }
+/// The function that actually does the generation.
+async fn generateDecl(module: *Module, decl: *Decl) !void {
+ switch (decl.id) {
+ Decl.Id.Var => @panic("TODO"),
+ Decl.Id.Fn => {
+ const fn_decl = @fieldParentPtr(Decl.Fn, "base", decl);
+ return await (async generateDeclFn(module, fn_decl) catch unreachable);
+ },
+ Decl.Id.CompTime => @panic("TODO"),
}
+}
- pub const Resolution = enum {
- Unresolved,
- InProgress,
- Invalid,
- Ok,
- };
-
- pub const Id = enum {
- Var,
- Fn,
- CompTime,
- };
-
- pub const Var = struct {
- base: Decl,
- };
+async fn generateDeclFn(module: *Module, fn_decl: *Decl.Fn) !void {
+ const body_node = fn_decl.fn_proto.body_node orelse @panic("TODO extern fn proto decl");
- pub const Fn = struct {
- base: Decl,
- value: Val,
- fn_proto: *const ast.Node.FnProto,
+ const fndef_scope = try Scope.FnDef.create(module, fn_decl.base.parent_scope);
+ defer fndef_scope.base.deref(module);
- // TODO https://github.com/ziglang/zig/issues/683 and then make this anonymous
- pub const Val = union {
- Unresolved: void,
- Ok: Value.Fn,
- };
+ const fn_type = try Type.Fn.create(module);
+ defer fn_type.base.base.deref(module);
- pub fn externLibName(self: Fn, tree: *ast.Tree) ?[]const u8 {
- return if (self.fn_proto.extern_export_inline_token) |tok_index| x: {
- const token = tree.tokens.at(tok_index);
- break :x switch (token.id) {
- Token.Id.Extern => tree.tokenSlicePtr(token),
- else => null,
- };
- } else null;
- }
+ const fn_val = try Value.Fn.create(module, fn_type, fndef_scope);
+ defer fn_val.base.deref(module);
- pub fn isExported(self: Fn, tree: *ast.Tree) bool {
- if (self.fn_proto.extern_export_inline_token) |tok_index| {
- const token = tree.tokens.at(tok_index);
- return token.id == Token.Id.Keyword_export;
- } else {
- return false;
- }
- }
- };
+ fn_decl.value = Decl.Fn.Val{ .Ok = fn_val };
- pub const CompTime = struct {
- base: Decl,
- };
-};
-
-pub const Value = struct {
- pub const Fn = struct {};
-};
-
-pub const Type = struct {
- id: Id,
-
- pub const Id = enum {
- Type,
- Void,
- Bool,
- NoReturn,
- Int,
- Float,
- Pointer,
- Array,
- Struct,
- ComptimeFloat,
- ComptimeInt,
- Undefined,
- Null,
- Optional,
- ErrorUnion,
- ErrorSet,
- Enum,
- Union,
- Fn,
- Opaque,
- Promise,
- };
-
- pub const Struct = struct {
- base: Type,
- decls: *Scope.Decls,
- };
-};
-
-pub const ParsedFile = struct {
- tree: ast.Tree,
- realpath: []const u8,
-};
+ const code = try await (async ir.gen(
+ module,
+ body_node,
+ &fndef_scope.base,
+ fn_decl.base.parsed_file,
+ ) catch unreachable);
+ //code.dump();
+ //try await (async irAnalyze(module, func) catch unreachable);
+}
diff --git a/src-self-hosted/parsed_file.zig b/src-self-hosted/parsed_file.zig
new file mode 100644
index 0000000000..d728c2fd18
--- /dev/null
+++ b/src-self-hosted/parsed_file.zig
@@ -0,0 +1,6 @@
+const ast = @import("std").zig.ast;
+
+pub const ParsedFile = struct {
+ tree: ast.Tree,
+ realpath: []const u8,
+};
diff --git a/src-self-hosted/scope.zig b/src-self-hosted/scope.zig
index b73dcb4ed3..8f8d016a7c 100644
--- a/src-self-hosted/scope.zig
+++ b/src-self-hosted/scope.zig
@@ -1,16 +1,234 @@
+const std = @import("std");
+const Allocator = mem.Allocator;
+const Decl = @import("decl.zig").Decl;
+const Module = @import("module.zig").Module;
+const mem = std.mem;
+const ast = std.zig.ast;
+const Value = @import("value.zig").Value;
+const ir = @import("ir.zig");
+
pub const Scope = struct {
id: Id,
- parent: *Scope,
+ parent: ?*Scope,
+ ref_count: usize,
+
+ pub fn ref(base: *Scope) void {
+ base.ref_count += 1;
+ }
+
+ pub fn deref(base: *Scope, module: *Module) void {
+ base.ref_count -= 1;
+ if (base.ref_count == 0) {
+ if (base.parent) |parent| parent.deref(module);
+ switch (base.id) {
+ Id.Decls => @fieldParentPtr(Decls, "base", base).destroy(),
+ Id.Block => @fieldParentPtr(Block, "base", base).destroy(module),
+ Id.FnDef => @fieldParentPtr(FnDef, "base", base).destroy(module),
+ Id.CompTime => @fieldParentPtr(CompTime, "base", base).destroy(module),
+ Id.Defer => @fieldParentPtr(Defer, "base", base).destroy(module),
+ Id.DeferExpr => @fieldParentPtr(DeferExpr, "base", base).destroy(module),
+ }
+ }
+ }
+
+ pub fn findFnDef(base: *Scope) ?*FnDef {
+ var scope = base;
+ while (true) {
+ switch (scope.id) {
+ Id.FnDef => return @fieldParentPtr(FnDef, "base", base),
+ Id.Decls => return null,
+
+ Id.Block,
+ Id.Defer,
+ Id.DeferExpr,
+ Id.CompTime,
+ => scope = scope.parent orelse return null,
+ }
+ }
+ }
pub const Id = enum {
Decls,
Block,
- Defer,
- DeferExpr,
- VarDecl,
- CImport,
- Loop,
FnDef,
CompTime,
+ Defer,
+ DeferExpr,
+ };
+
+ pub const Decls = struct {
+ base: Scope,
+ table: Decl.Table,
+
+ /// Creates a Decls scope with 1 reference
+ pub fn create(module: *Module, parent: ?*Scope) !*Decls {
+ const self = try module.a().create(Decls{
+ .base = Scope{
+ .id = Id.Decls,
+ .parent = parent,
+ .ref_count = 1,
+ },
+ .table = undefined,
+ });
+ errdefer module.a().destroy(self);
+
+ self.table = Decl.Table.init(module.a());
+ errdefer self.table.deinit();
+
+ if (parent) |p| p.ref();
+
+ return self;
+ }
+
+ pub fn destroy(self: *Decls) void {
+ self.table.deinit();
+ self.table.allocator.destroy(self);
+ }
+ };
+
+ pub const Block = struct {
+ base: Scope,
+ incoming_values: std.ArrayList(*ir.Instruction),
+ incoming_blocks: std.ArrayList(*ir.BasicBlock),
+ end_block: *ir.BasicBlock,
+ is_comptime: *ir.Instruction,
+
+ /// Creates a Block scope with 1 reference
+ pub fn create(module: *Module, parent: ?*Scope) !*Block {
+ const self = try module.a().create(Block{
+ .base = Scope{
+ .id = Id.Block,
+ .parent = parent,
+ .ref_count = 1,
+ },
+ .incoming_values = undefined,
+ .incoming_blocks = undefined,
+ .end_block = undefined,
+ .is_comptime = undefined,
+ });
+ errdefer module.a().destroy(self);
+
+ if (parent) |p| p.ref();
+ return self;
+ }
+
+ pub fn destroy(self: *Block, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+
+ pub const FnDef = struct {
+ base: Scope,
+
+ /// This reference is not counted so that the scope can get destroyed with the function
+ fn_val: *Value.Fn,
+
+ /// Creates a FnDef scope with 1 reference
+ /// Must set the fn_val later
+ pub fn create(module: *Module, parent: ?*Scope) !*FnDef {
+ const self = try module.a().create(FnDef{
+ .base = Scope{
+ .id = Id.FnDef,
+ .parent = parent,
+ .ref_count = 1,
+ },
+ .fn_val = undefined,
+ });
+
+ if (parent) |p| p.ref();
+
+ return self;
+ }
+
+ pub fn destroy(self: *FnDef, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+
+ pub const CompTime = struct {
+ base: Scope,
+
+ /// Creates a CompTime scope with 1 reference
+ pub fn create(module: *Module, parent: ?*Scope) !*CompTime {
+ const self = try module.a().create(CompTime{
+ .base = Scope{
+ .id = Id.CompTime,
+ .parent = parent,
+ .ref_count = 1,
+ },
+ });
+
+ if (parent) |p| p.ref();
+ return self;
+ }
+
+ pub fn destroy(self: *CompTime, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+
+ pub const Defer = struct {
+ base: Scope,
+ defer_expr_scope: *DeferExpr,
+ kind: Kind,
+
+ pub const Kind = enum {
+ ScopeExit,
+ ErrorExit,
+ };
+
+ /// Creates a Defer scope with 1 reference
+ pub fn create(
+ module: *Module,
+ parent: ?*Scope,
+ kind: Kind,
+ defer_expr_scope: *DeferExpr,
+ ) !*Defer {
+ const self = try module.a().create(Defer{
+ .base = Scope{
+ .id = Id.Defer,
+ .parent = parent,
+ .ref_count = 1,
+ },
+ .defer_expr_scope = defer_expr_scope,
+ .kind = kind,
+ });
+ errdefer module.a().destroy(self);
+
+ defer_expr_scope.base.ref();
+
+ if (parent) |p| p.ref();
+ return self;
+ }
+
+ pub fn destroy(self: *Defer, module: *Module) void {
+ self.defer_expr_scope.base.deref(module);
+ module.a().destroy(self);
+ }
+ };
+
+ pub const DeferExpr = struct {
+ base: Scope,
+ expr_node: *ast.Node,
+
+ /// Creates a DeferExpr scope with 1 reference
+ pub fn create(module: *Module, parent: ?*Scope, expr_node: *ast.Node) !*DeferExpr {
+ const self = try module.a().create(DeferExpr{
+ .base = Scope{
+ .id = Id.DeferExpr,
+ .parent = parent,
+ .ref_count = 1,
+ },
+ .expr_node = expr_node,
+ });
+ errdefer module.a().destroy(self);
+
+ if (parent) |p| p.ref();
+ return self;
+ }
+
+ pub fn destroy(self: *DeferExpr, module: *Module) void {
+ module.a().destroy(self);
+ }
};
};
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
new file mode 100644
index 0000000000..4b3918854d
--- /dev/null
+++ b/src-self-hosted/type.zig
@@ -0,0 +1,268 @@
+const builtin = @import("builtin");
+const Scope = @import("scope.zig").Scope;
+const Module = @import("module.zig").Module;
+const Value = @import("value.zig").Value;
+
+pub const Type = struct {
+ base: Value,
+ id: Id,
+
+ pub const Id = builtin.TypeId;
+
+ pub fn destroy(base: *Type, module: *Module) void {
+ switch (base.id) {
+ Id.Struct => @fieldParentPtr(Struct, "base", base).destroy(module),
+ Id.Fn => @fieldParentPtr(Fn, "base", base).destroy(module),
+ Id.Type => @fieldParentPtr(MetaType, "base", base).destroy(module),
+ Id.Void => @fieldParentPtr(Void, "base", base).destroy(module),
+ Id.Bool => @fieldParentPtr(Bool, "base", base).destroy(module),
+ Id.NoReturn => @fieldParentPtr(NoReturn, "base", base).destroy(module),
+ Id.Int => @fieldParentPtr(Int, "base", base).destroy(module),
+ Id.Float => @fieldParentPtr(Float, "base", base).destroy(module),
+ Id.Pointer => @fieldParentPtr(Pointer, "base", base).destroy(module),
+ Id.Array => @fieldParentPtr(Array, "base", base).destroy(module),
+ Id.ComptimeFloat => @fieldParentPtr(ComptimeFloat, "base", base).destroy(module),
+ Id.ComptimeInt => @fieldParentPtr(ComptimeInt, "base", base).destroy(module),
+ Id.Undefined => @fieldParentPtr(Undefined, "base", base).destroy(module),
+ Id.Null => @fieldParentPtr(Null, "base", base).destroy(module),
+ Id.Optional => @fieldParentPtr(Optional, "base", base).destroy(module),
+ Id.ErrorUnion => @fieldParentPtr(ErrorUnion, "base", base).destroy(module),
+ Id.ErrorSet => @fieldParentPtr(ErrorSet, "base", base).destroy(module),
+ Id.Enum => @fieldParentPtr(Enum, "base", base).destroy(module),
+ Id.Union => @fieldParentPtr(Union, "base", base).destroy(module),
+ Id.Namespace => @fieldParentPtr(Namespace, "base", base).destroy(module),
+ Id.Block => @fieldParentPtr(Block, "base", base).destroy(module),
+ Id.BoundFn => @fieldParentPtr(BoundFn, "base", base).destroy(module),
+ Id.ArgTuple => @fieldParentPtr(ArgTuple, "base", base).destroy(module),
+ Id.Opaque => @fieldParentPtr(Opaque, "base", base).destroy(module),
+ Id.Promise => @fieldParentPtr(Promise, "base", base).destroy(module),
+ }
+ }
+
+ pub const Struct = struct {
+ base: Type,
+ decls: *Scope.Decls,
+
+ pub fn destroy(self: *Struct, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+
+ pub const Fn = struct {
+ base: Type,
+
+ pub fn create(module: *Module) !*Fn {
+ return module.a().create(Fn{
+ .base = Type{
+ .base = Value{
+ .id = Value.Id.Type,
+ .typeof = &MetaType.get(module).base,
+ .ref_count = 1,
+ },
+ .id = builtin.TypeId.Fn,
+ },
+ });
+ }
+
+ pub fn destroy(self: *Fn, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+
+ pub const MetaType = struct {
+ base: Type,
+ value: *Type,
+
+ /// Adds 1 reference to the resulting type
+ pub fn get(module: *Module) *MetaType {
+ module.meta_type.base.base.ref();
+ return module.meta_type;
+ }
+
+ pub fn destroy(self: *MetaType, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+
+ pub const Void = struct {
+ base: Type,
+
+ /// Adds 1 reference to the resulting type
+ pub fn get(module: *Module) *Void {
+ module.void_type.base.base.ref();
+ return module.void_type;
+ }
+
+ pub fn destroy(self: *Void, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+
+ pub const Bool = struct {
+ base: Type,
+
+ /// Adds 1 reference to the resulting type
+ pub fn get(module: *Module) *Bool {
+ module.bool_type.base.base.ref();
+ return module.bool_type;
+ }
+
+ pub fn destroy(self: *Bool, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+
+ pub const NoReturn = struct {
+ base: Type,
+
+ /// Adds 1 reference to the resulting type
+ pub fn get(module: *Module) *NoReturn {
+ module.noreturn_type.base.base.ref();
+ return module.noreturn_type;
+ }
+
+ pub fn destroy(self: *NoReturn, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+
+ pub const Int = struct {
+ base: Type,
+
+ pub fn destroy(self: *Int, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+
+ pub const Float = struct {
+ base: Type,
+
+ pub fn destroy(self: *Float, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+ pub const Pointer = struct {
+ base: Type,
+
+ pub fn destroy(self: *Pointer, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+ pub const Array = struct {
+ base: Type,
+
+ pub fn destroy(self: *Array, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+ pub const ComptimeFloat = struct {
+ base: Type,
+
+ pub fn destroy(self: *ComptimeFloat, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+ pub const ComptimeInt = struct {
+ base: Type,
+
+ pub fn destroy(self: *ComptimeInt, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+ pub const Undefined = struct {
+ base: Type,
+
+ pub fn destroy(self: *Undefined, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+ pub const Null = struct {
+ base: Type,
+
+ pub fn destroy(self: *Null, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+ pub const Optional = struct {
+ base: Type,
+
+ pub fn destroy(self: *Optional, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+ pub const ErrorUnion = struct {
+ base: Type,
+
+ pub fn destroy(self: *ErrorUnion, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+ pub const ErrorSet = struct {
+ base: Type,
+
+ pub fn destroy(self: *ErrorSet, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+ pub const Enum = struct {
+ base: Type,
+
+ pub fn destroy(self: *Enum, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+ pub const Union = struct {
+ base: Type,
+
+ pub fn destroy(self: *Union, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+ pub const Namespace = struct {
+ base: Type,
+
+ pub fn destroy(self: *Namespace, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+
+ pub const Block = struct {
+ base: Type,
+
+ pub fn destroy(self: *Block, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+
+ pub const BoundFn = struct {
+ base: Type,
+
+ pub fn destroy(self: *BoundFn, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+
+ pub const ArgTuple = struct {
+ base: Type,
+
+ pub fn destroy(self: *ArgTuple, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+
+ pub const Opaque = struct {
+ base: Type,
+
+ pub fn destroy(self: *Opaque, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+
+ pub const Promise = struct {
+ base: Type,
+
+ pub fn destroy(self: *Promise, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+};
diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig
new file mode 100644
index 0000000000..b53d03d0ad
--- /dev/null
+++ b/src-self-hosted/value.zig
@@ -0,0 +1,125 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const Scope = @import("scope.zig").Scope;
+const Module = @import("module.zig").Module;
+
+/// Values are ref-counted, heap-allocated, and copy-on-write
+/// If there is only 1 ref then write need not copy
+pub const Value = struct {
+ id: Id,
+ typeof: *Type,
+ ref_count: usize,
+
+ pub fn ref(base: *Value) void {
+ base.ref_count += 1;
+ }
+
+ pub fn deref(base: *Value, module: *Module) void {
+ base.ref_count -= 1;
+ if (base.ref_count == 0) {
+ base.typeof.base.deref(module);
+ switch (base.id) {
+ Id.Type => @fieldParentPtr(Type, "base", base).destroy(module),
+ Id.Fn => @fieldParentPtr(Fn, "base", base).destroy(module),
+ Id.Void => @fieldParentPtr(Void, "base", base).destroy(module),
+ Id.Bool => @fieldParentPtr(Bool, "base", base).destroy(module),
+ Id.NoReturn => @fieldParentPtr(NoReturn, "base", base).destroy(module),
+ }
+ }
+ }
+
+ pub fn dump(base: *const Value) void {
+ std.debug.warn("{}", @tagName(base.id));
+ }
+
+ pub const Id = enum {
+ Type,
+ Fn,
+ Void,
+ Bool,
+ NoReturn,
+ };
+
+ pub const Type = @import("type.zig").Type;
+
+ pub const Fn = struct {
+ base: Value,
+
+ /// parent should be the top level decls or container decls
+ fndef_scope: *Scope.FnDef,
+
+ /// parent is scope for last parameter
+ child_scope: *Scope,
+
+ /// parent is child_scope
+ block_scope: *Scope.Block,
+
+ /// Creates a Fn value with 1 ref
+ pub fn create(module: *Module, fn_type: *Type.Fn, fndef_scope: *Scope.FnDef) !*Fn {
+ const self = try module.a().create(Fn{
+ .base = Value{
+ .id = Value.Id.Fn,
+ .typeof = &fn_type.base,
+ .ref_count = 1,
+ },
+ .fndef_scope = fndef_scope,
+ .child_scope = &fndef_scope.base,
+ .block_scope = undefined,
+ });
+ fn_type.base.base.ref();
+ fndef_scope.fn_val = self;
+ fndef_scope.base.ref();
+ return self;
+ }
+
+ pub fn destroy(self: *Fn, module: *Module) void {
+ self.fndef_scope.base.deref(module);
+ module.a().destroy(self);
+ }
+ };
+
+ pub const Void = struct {
+ base: Value,
+
+ pub fn get(module: *Module) *Void {
+ module.void_value.base.ref();
+ return module.void_value;
+ }
+
+ pub fn destroy(self: *Void, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+
+ pub const Bool = struct {
+ base: Value,
+ x: bool,
+
+ pub fn get(module: *Module, x: bool) *Bool {
+ if (x) {
+ module.true_value.base.ref();
+ return module.true_value;
+ } else {
+ module.false_value.base.ref();
+ return module.false_value;
+ }
+ }
+
+ pub fn destroy(self: *Bool, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+
+ pub const NoReturn = struct {
+ base: Value,
+
+ pub fn get(module: *Module) *NoReturn {
+ module.noreturn_value.base.ref();
+ return module.noreturn_value;
+ }
+
+ pub fn destroy(self: *NoReturn, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
+};
diff --git a/src-self-hosted/visib.zig b/src-self-hosted/visib.zig
new file mode 100644
index 0000000000..3704600cca
--- /dev/null
+++ b/src-self-hosted/visib.zig
@@ -0,0 +1,4 @@
+pub const Visib = enum {
+ Private,
+ Pub,
+};
diff --git a/std/event/future.zig b/std/event/future.zig
index b6ec861f77..23fa570c8f 100644
--- a/std/event/future.zig
+++ b/std/event/future.zig
@@ -57,7 +57,7 @@ test "std.event.Future" {
const allocator = &da.allocator;
var loop: Loop = undefined;
- try loop.initSingleThreaded(allocator);
+ try loop.initMultiThreaded(allocator);
defer loop.deinit();
const handle = try async testFuture(&loop);
diff --git a/std/zig/ast.zig b/std/zig/ast.zig
index 63518c5182..004f9278b9 100644
--- a/std/zig/ast.zig
+++ b/std/zig/ast.zig
@@ -970,14 +970,8 @@ pub const Node = struct {
pub const Defer = struct {
base: Node,
defer_token: TokenIndex,
- kind: Kind,
expr: *Node,
- const Kind = enum {
- Error,
- Unconditional,
- };
-
pub fn iterate(self: *Defer, index: usize) ?*Node {
var i = index;
diff --git a/std/zig/parse.zig b/std/zig/parse.zig
index 9f0371d4da..9842ba2a17 100644
--- a/std/zig/parse.zig
+++ b/std/zig/parse.zig
@@ -1041,11 +1041,6 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const node = try arena.create(ast.Node.Defer{
.base = ast.Node{ .id = ast.Node.Id.Defer },
.defer_token = token_index,
- .kind = switch (token_ptr.id) {
- Token.Id.Keyword_defer => ast.Node.Defer.Kind.Unconditional,
- Token.Id.Keyword_errdefer => ast.Node.Defer.Kind.Error,
- else => unreachable,
- },
.expr = undefined,
});
const node_ptr = try block.statements.addOne();
--
cgit v1.2.3
From e78b1b810fd15dfd135c80d06d621851a59f42c6 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 13 Jul 2018 21:56:38 -0400
Subject: self-hosted: basic IR pass2
---
src-self-hosted/errmsg.zig | 7 +
src-self-hosted/ir.zig | 750 ++++++++++++++++++++++++++++++++++-----------
src-self-hosted/main.zig | 2 +-
src-self-hosted/module.zig | 85 +++--
src-self-hosted/type.zig | 33 ++
src-self-hosted/value.zig | 21 ++
std/event/loop.zig | 15 +
7 files changed, 708 insertions(+), 205 deletions(-)
(limited to 'src-self-hosted/module.zig')
diff --git a/src-self-hosted/errmsg.zig b/src-self-hosted/errmsg.zig
index a92b5145ce..4e353bfb14 100644
--- a/src-self-hosted/errmsg.zig
+++ b/src-self-hosted/errmsg.zig
@@ -14,6 +14,13 @@ pub const Color = enum {
pub const Span = struct {
first: ast.TokenIndex,
last: ast.TokenIndex,
+
+ pub fn token(i: TokenIndex) Span {
+ return Span {
+ .first = i,
+ .last = i,
+ };
+ }
};
pub const Msg = struct {
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index 19bb018472..22161a0c27 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -9,31 +9,34 @@ const Type = Value.Type;
const assert = std.debug.assert;
const Token = std.zig.Token;
const ParsedFile = @import("parsed_file.zig").ParsedFile;
+const Span = @import("errmsg.zig").Span;
pub const LVal = enum {
None,
Ptr,
};
-pub const Mut = enum {
- Mut,
- Const,
-};
-
-pub const Volatility = enum {
- NonVolatile,
- Volatile,
-};
-
pub const IrVal = union(enum) {
Unknown,
- Known: *Value,
+ KnownType: *Type,
+ KnownValue: *Value,
+
+ const Init = enum {
+ Unknown,
+ NoReturn,
+ Void,
+ };
pub fn dump(self: IrVal) void {
switch (self) {
- IrVal.Unknown => std.debug.warn("Unknown"),
- IrVal.Known => |value| {
- std.debug.warn("Known(");
+ IrVal.Unknown => typeof.dump(),
+ IrVal.KnownType => |typeof| {
+ std.debug.warn("KnownType(");
+ typeof.dump();
+ std.debug.warn(")");
+ },
+ IrVal.KnownValue => |value| {
+ std.debug.warn("KnownValue(");
value.dump();
std.debug.warn(")");
},
@@ -46,10 +49,18 @@ pub const Instruction = struct {
scope: *Scope,
debug_id: usize,
val: IrVal,
+ ref_count: usize,
+ span: Span,
/// true if this instruction was generated by zig and not from user code
is_generated: bool,
+ /// the instruction that is derived from this one in analysis
+ child: ?*Instruction,
+
+ /// the instruction that this one derives from in analysis
+ parent: ?*Instruction,
+
pub fn cast(base: *Instruction, comptime T: type) ?*T {
if (base.id == comptime typeToId(T)) {
return @fieldParentPtr(T, "base", base);
@@ -81,6 +92,47 @@ pub const Instruction = struct {
unreachable;
}
+ pub fn hasSideEffects(base: *const Instruction) bool {
+ comptime var i = 0;
+ inline while (i < @memberCount(Id)) : (i += 1) {
+ if (base.id == @field(Id, @memberName(Id, i))) {
+ const T = @field(Instruction, @memberName(Id, i));
+ return @fieldParentPtr(T, "base", base).hasSideEffects();
+ }
+ }
+ unreachable;
+ }
+
+ pub fn analyze(base: *Instruction, ira: *Analyze) Analyze.Error!*Instruction {
+ comptime var i = 0;
+ inline while (i < @memberCount(Id)) : (i += 1) {
+ if (base.id == @field(Id, @memberName(Id, i))) {
+ const T = @field(Instruction, @memberName(Id, i));
+ const new_inst = try @fieldParentPtr(T, "base", base).analyze(ira);
+ new_inst.linkToParent(base);
+ return new_inst;
+ }
+ }
+ unreachable;
+ }
+
+ fn getAsParam(param: *Instruction) !*Instruction {
+ const child = param.child orelse return error.SemanticAnalysisFailed;
+ switch (child.val) {
+ IrVal.Unknown => return error.SemanticAnalysisFailed,
+ else => return child,
+ }
+ }
+
+ /// asserts that the type is known
+ fn getKnownType(self: *Instruction) *Type {
+ switch (self.val) {
+ IrVal.KnownType => |typeof| return typeof,
+ IrVal.KnownValue => |value| return value.typeof,
+ IrVal.Unknown => unreachable,
+ }
+ }
+
pub fn setGenerated(base: *Instruction) void {
base.is_generated = true;
}
@@ -88,10 +140,18 @@ pub const Instruction = struct {
pub fn isNoReturn(base: *const Instruction) bool {
switch (base.val) {
IrVal.Unknown => return false,
- IrVal.Known => |x| return x.typeof.id == Type.Id.NoReturn,
+ IrVal.KnownValue => |x| return x.typeof.id == Type.Id.NoReturn,
+ IrVal.KnownType => |typeof| return typeof.id == Type.Id.NoReturn,
}
}
+ pub fn linkToParent(self: *Instruction, parent: *Instruction) void {
+ assert(self.parent == null);
+ assert(parent.child == null);
+ self.parent = parent;
+ parent.child = self;
+ }
+
pub const Id = enum {
Return,
Const,
@@ -100,196 +160,231 @@ pub const Instruction = struct {
CheckVoidStmt,
Phi,
Br,
+ AddImplicitReturnType,
};
pub const Const = struct {
base: Instruction,
+ params: Params,
- pub fn buildBool(irb: *Builder, scope: *Scope, val: bool) !*Instruction {
- const inst = try irb.arena().create(Const{
- .base = Instruction{
- .id = Instruction.Id.Const,
- .is_generated = false,
- .scope = scope,
- .debug_id = irb.next_debug_id,
- .val = IrVal{ .Known = &Value.Bool.get(irb.module, val).base },
- },
- });
- irb.next_debug_id += 1;
- try irb.current_basic_block.instruction_list.append(&inst.base);
- return &inst.base;
- }
-
- pub fn buildVoid(irb: *Builder, scope: *Scope, is_generated: bool) !*Instruction {
- const inst = try irb.arena().create(Const{
- .base = Instruction{
- .id = Instruction.Id.Const,
- .is_generated = is_generated,
- .scope = scope,
- .debug_id = irb.next_debug_id,
- .val = IrVal{ .Known = &Value.Void.get(irb.module).base },
- },
- });
- irb.next_debug_id += 1;
- try irb.current_basic_block.instruction_list.append(&inst.base);
- return &inst.base;
+ const Params = struct {};
+
+ // Use Builder.buildConst* methods, or, after building a Const instruction,
+ // manually set the ir_val field.
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(self: *const Const) void {
+ self.base.val.KnownValue.dump();
}
- pub fn dump(inst: *const Const) void {
- inst.base.val.Known.dump();
+ pub fn hasSideEffects(self: *const Const) bool {
+ return false;
+ }
+
+ pub fn analyze(self: *const Const, ira: *Analyze) !*Instruction {
+ const new_inst = try ira.irb.build(Const, self.base.scope, self.base.span, Params{});
+ new_inst.val = IrVal{ .KnownValue = self.base.val.KnownValue.getRef() };
+ return new_inst;
}
};
pub const Return = struct {
base: Instruction,
- return_value: *Instruction,
-
- pub fn build(irb: *Builder, scope: *Scope, return_value: *Instruction) !*Instruction {
- const inst = try irb.arena().create(Return{
- .base = Instruction{
- .id = Instruction.Id.Return,
- .is_generated = false,
- .scope = scope,
- .debug_id = irb.next_debug_id,
- .val = IrVal{ .Known = &Value.Void.get(irb.module).base },
- },
- .return_value = return_value,
- });
- irb.next_debug_id += 1;
- try irb.current_basic_block.instruction_list.append(&inst.base);
- return &inst.base;
+ params: Params,
+
+ const Params = struct {
+ return_value: *Instruction,
+ };
+
+ const ir_val_init = IrVal.Init.NoReturn;
+
+ pub fn dump(self: *const Return) void {
+ std.debug.warn("#{}", self.params.return_value.debug_id);
}
- pub fn dump(inst: *const Return) void {
- std.debug.warn("#{}", inst.return_value.debug_id);
+ pub fn hasSideEffects(self: *const Return) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const Return, ira: *Analyze) !*Instruction {
+ const value = try self.params.return_value.getAsParam();
+ const casted_value = try ira.implicitCast(value, ira.explicit_return_type);
+
+ // TODO detect returning local variable address
+
+ return ira.irb.build(Return, self.base.scope, self.base.span, Params{ .return_value = casted_value });
}
};
pub const Ref = struct {
base: Instruction,
- target: *Instruction,
- mut: Mut,
- volatility: Volatility,
+ params: Params,
- pub fn build(
- irb: *Builder,
- scope: *Scope,
+ const Params = struct {
target: *Instruction,
- mut: Mut,
- volatility: Volatility,
- ) !*Instruction {
- const inst = try irb.arena().create(Ref{
- .base = Instruction{
- .id = Instruction.Id.Ref,
- .is_generated = false,
- .scope = scope,
- .debug_id = irb.next_debug_id,
- .val = IrVal.Unknown,
- },
+ mut: Type.Pointer.Mut,
+ volatility: Type.Pointer.Vol,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const Ref) void {}
+
+ pub fn hasSideEffects(inst: *const Ref) bool {
+ return false;
+ }
+
+ pub fn analyze(self: *const Ref, ira: *Analyze) !*Instruction {
+ const target = try self.params.target.getAsParam();
+
+ if (ira.getCompTimeValOrNullUndefOk(target)) |val| {
+ return ira.getCompTimeRef(
+ val,
+ Value.Ptr.Mut.CompTimeConst,
+ self.params.mut,
+ self.params.volatility,
+ val.typeof.getAbiAlignment(ira.irb.module),
+ );
+ }
+
+ const new_inst = try ira.irb.build(Ref, self.base.scope, self.base.span, Params{
.target = target,
- .mut = mut,
- .volatility = volatility,
+ .mut = self.params.mut,
+ .volatility = self.params.volatility,
});
- irb.next_debug_id += 1;
- try irb.current_basic_block.instruction_list.append(&inst.base);
- return &inst.base;
+ const elem_type = target.getKnownType();
+ const ptr_type = Type.Pointer.get(
+ ira.irb.module,
+ elem_type,
+ self.params.mut,
+ self.params.volatility,
+ Type.Pointer.Size.One,
+ elem_type.getAbiAlignment(ira.irb.module),
+ );
+ // TODO: potentially set the hint that this is a stack pointer. But it might not be - this
+ // could be a ref of a global, for example
+ new_inst.val = IrVal{ .KnownType = &ptr_type.base };
+ // TODO potentially add an alloca entry here
+ return new_inst;
}
-
- pub fn dump(inst: *const Ref) void {}
};
pub const DeclVar = struct {
base: Instruction,
- variable: *Variable,
+ params: Params,
+
+ const Params = struct {
+ variable: *Variable,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
pub fn dump(inst: *const DeclVar) void {}
+
+ pub fn hasSideEffects(inst: *const DeclVar) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const DeclVar, ira: *Analyze) !*Instruction {
+ return error.Unimplemented; // TODO
+ }
};
pub const CheckVoidStmt = struct {
base: Instruction,
- target: *Instruction,
+ params: Params,
- pub fn build(
- irb: *Builder,
- scope: *Scope,
+ const Params = struct {
target: *Instruction,
- ) !*Instruction {
- const inst = try irb.arena().create(CheckVoidStmt{
- .base = Instruction{
- .id = Instruction.Id.CheckVoidStmt,
- .is_generated = true,
- .scope = scope,
- .debug_id = irb.next_debug_id,
- .val = IrVal{ .Known = &Value.Void.get(irb.module).base },
- },
- .target = target,
- });
- irb.next_debug_id += 1;
- try irb.current_basic_block.instruction_list.append(&inst.base);
- return &inst.base;
- }
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
pub fn dump(inst: *const CheckVoidStmt) void {}
+
+ pub fn hasSideEffects(inst: *const CheckVoidStmt) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const CheckVoidStmt, ira: *Analyze) !*Instruction {
+ return error.Unimplemented; // TODO
+ }
};
pub const Phi = struct {
base: Instruction,
- incoming_blocks: []*BasicBlock,
- incoming_values: []*Instruction,
+ params: Params,
- pub fn build(
- irb: *Builder,
- scope: *Scope,
+ const Params = struct {
incoming_blocks: []*BasicBlock,
incoming_values: []*Instruction,
- ) !*Instruction {
- const inst = try irb.arena().create(Phi{
- .base = Instruction{
- .id = Instruction.Id.Phi,
- .is_generated = false,
- .scope = scope,
- .debug_id = irb.next_debug_id,
- .val = IrVal.Unknown,
- },
- .incoming_blocks = incoming_blocks,
- .incoming_values = incoming_values,
- });
- irb.next_debug_id += 1;
- try irb.current_basic_block.instruction_list.append(&inst.base);
- return &inst.base;
- }
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
pub fn dump(inst: *const Phi) void {}
+
+ pub fn hasSideEffects(inst: *const Phi) bool {
+ return false;
+ }
+
+ pub fn analyze(self: *const Phi, ira: *Analyze) !*Instruction {
+ return error.Unimplemented; // TODO
+ }
};
pub const Br = struct {
base: Instruction,
- dest_block: *BasicBlock,
- is_comptime: *Instruction,
+ params: Params,
- pub fn build(
- irb: *Builder,
- scope: *Scope,
+ const Params = struct {
dest_block: *BasicBlock,
is_comptime: *Instruction,
- ) !*Instruction {
- const inst = try irb.arena().create(Br{
- .base = Instruction{
- .id = Instruction.Id.Br,
- .is_generated = false,
- .scope = scope,
- .debug_id = irb.next_debug_id,
- .val = IrVal{ .Known = &Value.NoReturn.get(irb.module).base },
- },
- .dest_block = dest_block,
- .is_comptime = is_comptime,
- });
- irb.next_debug_id += 1;
- try irb.current_basic_block.instruction_list.append(&inst.base);
- return &inst.base;
- }
+ };
+
+ const ir_val_init = IrVal.Init.NoReturn;
pub fn dump(inst: *const Br) void {}
+
+ pub fn hasSideEffects(inst: *const Br) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const Br, ira: *Analyze) !*Instruction {
+ return error.Unimplemented; // TODO
+ }
+ };
+
+ pub const AddImplicitReturnType = struct {
+ base: Instruction,
+ params: Params,
+
+ pub const Params = struct {
+ target: *Instruction,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const AddImplicitReturnType) void {
+ std.debug.warn("#{}", inst.params.target.debug_id);
+ }
+
+ pub fn hasSideEffects(inst: *const AddImplicitReturnType) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const AddImplicitReturnType, ira: *Analyze) !*Instruction {
+ const target = try self.params.target.getAsParam();
+
+ try ira.src_implicit_return_type_list.append(target);
+
+ return ira.irb.build(
+ AddImplicitReturnType,
+ self.base.scope,
+ self.base.span,
+ Params{ .target = target },
+ );
+ }
};
};
@@ -303,16 +398,31 @@ pub const BasicBlock = struct {
debug_id: usize,
scope: *Scope,
instruction_list: std.ArrayList(*Instruction),
+ ref_instruction: ?*Instruction,
+
+ /// the basic block that is derived from this one in analysis
+ child: ?*BasicBlock,
+
+ /// the basic block that this one derives from in analysis
+ parent: ?*BasicBlock,
pub fn ref(self: *BasicBlock) void {
self.ref_count += 1;
}
+
+ pub fn linkToParent(self: *BasicBlock, parent: *BasicBlock) void {
+ assert(self.parent == null);
+ assert(parent.child == null);
+ self.parent = parent;
+ parent.child = self;
+ }
};
/// Stuff that survives longer than Builder
pub const Code = struct {
basic_block_list: std.ArrayList(*BasicBlock),
arena: std.heap.ArenaAllocator,
+ return_type: ?*Type,
/// allocator is module.a()
pub fn destroy(self: *Code, allocator: *Allocator) void {
@@ -341,15 +451,13 @@ pub const Builder = struct {
parsed_file: *ParsedFile,
is_comptime: bool,
- pub const Error = error{
- OutOfMemory,
- Unimplemented,
- };
+ pub const Error = Analyze.Error;
pub fn init(module: *Module, parsed_file: *ParsedFile) !Builder {
const code = try module.a().create(Code{
.basic_block_list = undefined,
.arena = std.heap.ArenaAllocator.init(module.a()),
+ .return_type = null,
});
code.basic_block_list = std.ArrayList(*BasicBlock).init(&code.arena.allocator);
errdefer code.destroy(module.a());
@@ -381,6 +489,9 @@ pub const Builder = struct {
.debug_id = self.next_debug_id,
.scope = scope,
.instruction_list = std.ArrayList(*Instruction).init(self.arena()),
+ .child = null,
+ .parent = null,
+ .ref_instruction = null,
});
self.next_debug_id += 1;
return basic_block;
@@ -490,14 +601,18 @@ pub const Builder = struct {
if (block.statements.len == 0) {
// {}
- return Instruction.Const.buildVoid(irb, child_scope, false);
+ return irb.buildConstVoid(child_scope, Span.token(block.lbrace), false);
}
if (block.label) |label| {
block_scope.incoming_values = std.ArrayList(*Instruction).init(irb.arena());
block_scope.incoming_blocks = std.ArrayList(*BasicBlock).init(irb.arena());
block_scope.end_block = try irb.createBasicBlock(parent_scope, "BlockEnd");
- block_scope.is_comptime = try Instruction.Const.buildBool(irb, parent_scope, irb.isCompTime(parent_scope));
+ block_scope.is_comptime = try irb.buildConstBool(
+ parent_scope,
+ Span.token(block.lbrace),
+ irb.isCompTime(parent_scope),
+ );
}
var is_continuation_unreachable = false;
@@ -530,10 +645,15 @@ pub const Builder = struct {
if (statement_value.cast(Instruction.DeclVar)) |decl_var| {
// variable declarations start a new scope
- child_scope = decl_var.variable.child_scope;
+ child_scope = decl_var.params.variable.child_scope;
} else if (!is_continuation_unreachable) {
// this statement's value must be void
- _ = Instruction.CheckVoidStmt.build(irb, child_scope, statement_value);
+ _ = irb.build(
+ Instruction.CheckVoidStmt,
+ child_scope,
+ statement_value.span,
+ Instruction.CheckVoidStmt.Params{ .target = statement_value },
+ );
}
}
@@ -544,37 +664,34 @@ pub const Builder = struct {
}
try irb.setCursorAtEndAndAppendBlock(block_scope.end_block);
- return Instruction.Phi.build(
- irb,
- parent_scope,
- block_scope.incoming_blocks.toOwnedSlice(),
- block_scope.incoming_values.toOwnedSlice(),
- );
+ return irb.build(Instruction.Phi, parent_scope, Span.token(block.rbrace), Instruction.Phi.Params{
+ .incoming_blocks = block_scope.incoming_blocks.toOwnedSlice(),
+ .incoming_values = block_scope.incoming_values.toOwnedSlice(),
+ });
}
if (block.label) |label| {
try block_scope.incoming_blocks.append(irb.current_basic_block);
try block_scope.incoming_values.append(
- try Instruction.Const.buildVoid(irb, parent_scope, true),
+ try irb.buildConstVoid(parent_scope, Span.token(block.rbrace), true),
);
_ = try irb.genDefersForBlock(child_scope, outer_block_scope, Scope.Defer.Kind.ScopeExit);
- (try Instruction.Br.build(
- irb,
- parent_scope,
- block_scope.end_block,
- block_scope.is_comptime,
- )).setGenerated();
+
+ _ = try irb.buildGen(Instruction.Br, parent_scope, Span.token(block.rbrace), Instruction.Br.Params{
+ .dest_block = block_scope.end_block,
+ .is_comptime = block_scope.is_comptime,
+ });
+
try irb.setCursorAtEndAndAppendBlock(block_scope.end_block);
- return Instruction.Phi.build(
- irb,
- parent_scope,
- block_scope.incoming_blocks.toOwnedSlice(),
- block_scope.incoming_values.toOwnedSlice(),
- );
+
+ return irb.build(Instruction.Phi, parent_scope, Span.token(block.rbrace), Instruction.Phi.Params{
+ .incoming_blocks = block_scope.incoming_blocks.toOwnedSlice(),
+ .incoming_values = block_scope.incoming_values.toOwnedSlice(),
+ });
}
_ = try irb.genDefersForBlock(child_scope, outer_block_scope, Scope.Defer.Kind.ScopeExit);
- return try Instruction.Const.buildVoid(irb, child_scope, true);
+ return irb.buildConstVoid(child_scope, Span.token(block.rbrace), true);
}
fn genDefersForBlock(
@@ -603,7 +720,12 @@ pub const Builder = struct {
if (instruction.isNoReturn()) {
is_noreturn = true;
} else {
- _ = Instruction.CheckVoidStmt.build(irb, &defer_expr_scope.base, instruction);
+ _ = try irb.build(
+ Instruction.CheckVoidStmt,
+ &defer_expr_scope.base,
+ Span.token(defer_expr_scope.expr_node.lastToken()),
+ Instruction.CheckVoidStmt.Params{ .target = instruction },
+ );
}
}
},
@@ -626,7 +748,11 @@ pub const Builder = struct {
LVal.Ptr => {
// We needed a pointer to a value, but we got a value. So we create
// an instruction which just makes a const pointer of it.
- return Instruction.Ref.build(irb, scope, instruction, Mut.Const, Volatility.NonVolatile);
+ return irb.build(Instruction.Ref, scope, instruction.span, Instruction.Ref.Params{
+ .target = instruction,
+ .mut = Type.Pointer.Mut.Const,
+ .volatility = Type.Pointer.Vol.Non,
+ });
},
}
}
@@ -634,9 +760,218 @@ pub const Builder = struct {
fn arena(self: *Builder) *Allocator {
return &self.code.arena.allocator;
}
+
+ fn buildExtra(
+ self: *Builder,
+ comptime I: type,
+ scope: *Scope,
+ span: Span,
+ params: I.Params,
+ is_generated: bool,
+ ) !*Instruction {
+ const inst = try self.arena().create(I{
+ .base = Instruction{
+ .id = Instruction.typeToId(I),
+ .is_generated = is_generated,
+ .scope = scope,
+ .debug_id = self.next_debug_id,
+ .val = switch (I.ir_val_init) {
+ IrVal.Init.Unknown => IrVal.Unknown,
+ IrVal.Init.NoReturn => IrVal{ .KnownValue = &Value.NoReturn.get(self.module).base },
+ IrVal.Init.Void => IrVal{ .KnownValue = &Value.Void.get(self.module).base },
+ },
+ .ref_count = 0,
+ .span = span,
+ .child = null,
+ .parent = null,
+ },
+ .params = params,
+ });
+
+ // Look at the params and ref() other instructions
+ comptime var i = 0;
+ inline while (i < @memberCount(I.Params)) : (i += 1) {
+ const FieldType = comptime @typeOf(@field(I.Params(undefined), @memberName(I.Params, i)));
+ switch (FieldType) {
+ *Instruction => @field(inst.params, @memberName(I.Params, i)).ref_count += 1,
+ ?*Instruction => if (@field(inst.params, @memberName(I.Params, i))) |other| other.ref_count += 1,
+ else => {},
+ }
+ }
+
+ self.next_debug_id += 1;
+ try self.current_basic_block.instruction_list.append(&inst.base);
+ return &inst.base;
+ }
+
+ fn build(
+ self: *Builder,
+ comptime I: type,
+ scope: *Scope,
+ span: Span,
+ params: I.Params,
+ ) !*Instruction {
+ return self.buildExtra(I, scope, span, params, false);
+ }
+
+ fn buildGen(
+ self: *Builder,
+ comptime I: type,
+ scope: *Scope,
+ span: Span,
+ params: I.Params,
+ ) !*Instruction {
+ return self.buildExtra(I, scope, span, params, true);
+ }
+
+ fn buildConstBool(self: *Builder, scope: *Scope, span: Span, x: bool) !*Instruction {
+ const inst = try self.build(Instruction.Const, scope, span, Instruction.Const.Params{});
+ inst.val = IrVal{ .KnownValue = &Value.Bool.get(self.module, x).base };
+ return inst;
+ }
+
+ fn buildConstVoid(self: *Builder, scope: *Scope, span: Span, is_generated: bool) !*Instruction {
+ const inst = try self.buildExtra(Instruction.Const, scope, span, Instruction.Const.Params{}, is_generated);
+ inst.val = IrVal{ .KnownValue = &Value.Void.get(self.module).base };
+ return inst;
+ }
+};
+
+const Analyze = struct {
+ irb: Builder,
+ old_bb_index: usize,
+ const_predecessor_bb: ?*BasicBlock,
+ parent_basic_block: *BasicBlock,
+ instruction_index: usize,
+ src_implicit_return_type_list: std.ArrayList(*Instruction),
+ explicit_return_type: ?*Type,
+
+ pub const Error = error{
+ /// This is only for when we have already reported a compile error. It is the poison value.
+ SemanticAnalysisFailed,
+
+ /// This is a placeholder - it is useful to use instead of panicking but once the compiler is
+ /// done this error code will be removed.
+ Unimplemented,
+
+ OutOfMemory,
+ };
+
+ pub fn init(module: *Module, parsed_file: *ParsedFile, explicit_return_type: ?*Type) !Analyze {
+ var irb = try Builder.init(module, parsed_file);
+ errdefer irb.abort();
+
+ return Analyze{
+ .irb = irb,
+ .old_bb_index = 0,
+ .const_predecessor_bb = null,
+ .parent_basic_block = undefined, // initialized with startBasicBlock
+ .instruction_index = undefined, // initialized with startBasicBlock
+ .src_implicit_return_type_list = std.ArrayList(*Instruction).init(irb.arena()),
+ .explicit_return_type = explicit_return_type,
+ };
+ }
+
+ pub fn abort(self: *Analyze) void {
+ self.irb.abort();
+ }
+
+ pub fn getNewBasicBlock(self: *Analyze, old_bb: *BasicBlock, ref_old_instruction: ?*Instruction) !*BasicBlock {
+ if (old_bb.child) |child| {
+ if (ref_old_instruction == null or child.ref_instruction != ref_old_instruction)
+ return child;
+ }
+
+ const new_bb = try self.irb.createBasicBlock(old_bb.scope, old_bb.name_hint);
+ new_bb.linkToParent(old_bb);
+ new_bb.ref_instruction = ref_old_instruction;
+ return new_bb;
+ }
+
+ pub fn startBasicBlock(self: *Analyze, old_bb: *BasicBlock, const_predecessor_bb: ?*BasicBlock) void {
+ self.instruction_index = 0;
+ self.parent_basic_block = old_bb;
+ self.const_predecessor_bb = const_predecessor_bb;
+ }
+
+ pub fn finishBasicBlock(ira: *Analyze, old_code: *Code) !void {
+ try ira.irb.code.basic_block_list.append(ira.irb.current_basic_block);
+ ira.instruction_index += 1;
+
+ while (ira.instruction_index < ira.parent_basic_block.instruction_list.len) {
+ const next_instruction = ira.parent_basic_block.instruction_list.at(ira.instruction_index);
+
+ if (!next_instruction.is_generated) {
+ try ira.addCompileError(next_instruction.span, "unreachable code");
+ break;
+ }
+ ira.instruction_index += 1;
+ }
+
+ ira.old_bb_index += 1;
+
+ var need_repeat = true;
+ while (true) {
+ while (ira.old_bb_index < old_code.basic_block_list.len) {
+ const old_bb = old_code.basic_block_list.at(ira.old_bb_index);
+ const new_bb = old_bb.child orelse {
+ ira.old_bb_index += 1;
+ continue;
+ };
+ if (new_bb.instruction_list.len != 0) {
+ ira.old_bb_index += 1;
+ continue;
+ }
+ ira.irb.current_basic_block = new_bb;
+
+ ira.startBasicBlock(old_bb, null);
+ return;
+ }
+ if (!need_repeat)
+ return;
+ need_repeat = false;
+ ira.old_bb_index = 0;
+ continue;
+ }
+ }
+
+ fn addCompileError(self: *Analyze, span: Span, comptime fmt: []const u8, args: ...) !void {
+ return self.irb.module.addCompileError(self.irb.parsed_file, span, fmt, args);
+ }
+
+ fn resolvePeerTypes(self: *Analyze, expected_type: ?*Type, peers: []const *Instruction) Analyze.Error!*Type {
+ // TODO actual implementation
+ return &Type.Void.get(self.irb.module).base;
+ }
+
+ fn implicitCast(self: *Analyze, target: *Instruction, optional_dest_type: ?*Type) Analyze.Error!*Instruction {
+ const dest_type = optional_dest_type orelse return target;
+ @panic("TODO implicitCast");
+ }
+
+ fn getCompTimeValOrNullUndefOk(self: *Analyze, target: *Instruction) ?*Value {
+ @panic("TODO getCompTimeValOrNullUndefOk");
+ }
+
+ fn getCompTimeRef(
+ self: *Analyze,
+ value: *Value,
+ ptr_mut: Value.Ptr.Mut,
+ mut: Type.Pointer.Mut,
+ volatility: Type.Pointer.Vol,
+ ptr_align: u32,
+ ) Analyze.Error!*Instruction {
+ @panic("TODO getCompTimeRef");
+ }
};
-pub async fn gen(module: *Module, body_node: *ast.Node, scope: *Scope, parsed_file: *ParsedFile) !*Code {
+pub async fn gen(
+ module: *Module,
+ body_node: *ast.Node,
+ scope: *Scope,
+ end_span: Span,
+ parsed_file: *ParsedFile,
+) !*Code {
var irb = try Builder.init(module, parsed_file);
errdefer irb.abort();
@@ -646,8 +981,61 @@ pub async fn gen(module: *Module, body_node: *ast.Node, scope: *Scope, parsed_fi
const result = try irb.genNode(body_node, scope, LVal.None);
if (!result.isNoReturn()) {
- (try Instruction.Return.build(&irb, scope, result)).setGenerated();
+ _ = irb.buildGen(
+ Instruction.AddImplicitReturnType,
+ scope,
+ end_span,
+ Instruction.AddImplicitReturnType.Params{ .target = result },
+ );
+ _ = irb.buildGen(
+ Instruction.Return,
+ scope,
+ end_span,
+ Instruction.Return.Params{ .return_value = result },
+ );
}
return irb.finish();
}
+
+pub async fn analyze(module: *Module, parsed_file: *ParsedFile, old_code: *Code, expected_type: ?*Type) !*Code {
+ var ira = try Analyze.init(module, parsed_file, expected_type);
+ errdefer ira.abort();
+
+ const old_entry_bb = old_code.basic_block_list.at(0);
+
+ const new_entry_bb = try ira.getNewBasicBlock(old_entry_bb, null);
+ new_entry_bb.ref();
+
+ ira.irb.current_basic_block = new_entry_bb;
+
+ ira.startBasicBlock(old_entry_bb, null);
+
+ while (ira.old_bb_index < old_code.basic_block_list.len) {
+ const old_instruction = ira.parent_basic_block.instruction_list.at(ira.instruction_index);
+
+ if (old_instruction.ref_count == 0 and !old_instruction.hasSideEffects()) {
+ ira.instruction_index += 1;
+ continue;
+ }
+
+ const return_inst = try old_instruction.analyze(&ira);
+ // Note: if we ever modify the above to handle error.CompileError by continuing analysis,
+ // then here we want to check if ira.isCompTime() and return early if true
+
+ if (return_inst.isNoReturn()) {
+ try ira.finishBasicBlock(old_code);
+ continue;
+ }
+
+ ira.instruction_index += 1;
+ }
+
+ if (ira.src_implicit_return_type_list.len == 0) {
+ ira.irb.code.return_type = &Type.NoReturn.get(module).base;
+ return ira.irb.finish();
+ }
+
+ ira.irb.code.return_type = try ira.resolvePeerTypes(expected_type, ira.src_implicit_return_type_list.toSliceConst());
+ return ira.irb.finish();
+}
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index d7ead0ba32..058459a2d8 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -497,7 +497,7 @@ async fn processBuildEvents(module: *Module, color: errmsg.Color) void {
},
Module.Event.Error => |err| {
std.debug.warn("build failed: {}\n", @errorName(err));
- @panic("TODO error return trace");
+ os.exit(1);
},
Module.Event.Fail => |msgs| {
for (msgs) |msg| {
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index e74c84e02c..3fbe6d54ad 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -24,6 +24,7 @@ const Visib = @import("visib.zig").Visib;
const ParsedFile = @import("parsed_file.zig").ParsedFile;
const Value = @import("value.zig").Value;
const Type = Value.Type;
+const Span = errmsg.Span;
pub const Module = struct {
loop: *event.Loop,
@@ -148,13 +149,14 @@ pub const Module = struct {
Overflow,
NotSupported,
BufferTooSmall,
- Unimplemented,
+ Unimplemented, // TODO remove this one
+ SemanticAnalysisFailed, // TODO remove this one
};
pub const Event = union(enum) {
Ok,
- Fail: []*errmsg.Msg,
Error: BuildError,
+ Fail: []*errmsg.Msg,
};
pub const DarwinVersionMin = union(enum) {
@@ -413,21 +415,32 @@ pub const Module = struct {
while (true) {
// TODO directly awaiting async should guarantee memory allocation elision
// TODO also async before suspending should guarantee memory allocation elision
- (await (async self.addRootSrc() catch unreachable)) catch |err| {
- await (async self.events.put(Event{ .Error = err }) catch unreachable);
- return;
- };
+ const build_result = await (async self.addRootSrc() catch unreachable);
+
+ // this makes a handy error return trace and stack trace in debug mode
+ if (std.debug.runtime_safety) {
+ build_result catch unreachable;
+ }
+
const compile_errors = blk: {
const held = await (async self.compile_errors.acquire() catch unreachable);
defer held.release();
break :blk held.value.toOwnedSlice();
};
- if (compile_errors.len == 0) {
- await (async self.events.put(Event.Ok) catch unreachable);
- } else {
- await (async self.events.put(Event{ .Fail = compile_errors }) catch unreachable);
+ if (build_result) |_| {
+ if (compile_errors.len == 0) {
+ await (async self.events.put(Event.Ok) catch unreachable);
+ } else {
+ await (async self.events.put(Event{ .Fail = compile_errors }) catch unreachable);
+ }
+ } else |err| {
+ // if there's an error then the compile errors have dangling references
+ self.a().free(compile_errors);
+
+ await (async self.events.put(Event{ .Error = err }) catch unreachable);
}
+
// for now we stop after 1
return;
}
@@ -477,7 +490,7 @@ pub const Module = struct {
const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
const name = if (fn_proto.name_token) |name_token| tree.tokenSlice(name_token) else {
- try self.addCompileError(parsed_file, errmsg.Span{
+ try self.addCompileError(parsed_file, Span{
.first = fn_proto.fn_token,
.last = fn_proto.fn_token + 1,
}, "missing function name");
@@ -518,27 +531,23 @@ pub const Module = struct {
}
}
- fn addCompileError(self: *Module, parsed_file: *ParsedFile, span: errmsg.Span, comptime fmt: []const u8, args: ...) !void {
+ fn addCompileError(self: *Module, parsed_file: *ParsedFile, span: Span, comptime fmt: []const u8, args: ...) !void {
const text = try std.fmt.allocPrint(self.loop.allocator, fmt, args);
errdefer self.loop.allocator.free(text);
- try self.build_group.call(addCompileErrorAsync, self, parsed_file, span.first, span.last, text);
+ try self.build_group.call(addCompileErrorAsync, self, parsed_file, span, text);
}
async fn addCompileErrorAsync(
self: *Module,
parsed_file: *ParsedFile,
- first_token: ast.TokenIndex,
- last_token: ast.TokenIndex,
+ span: Span,
text: []u8,
) !void {
const msg = try self.loop.allocator.create(errmsg.Msg{
.path = parsed_file.realpath,
.text = text,
- .span = errmsg.Span{
- .first = first_token,
- .last = last_token,
- },
+ .span = span,
.tree = &parsed_file.tree,
});
errdefer self.loop.allocator.destroy(msg);
@@ -624,6 +633,7 @@ pub async fn resolveDecl(module: *Module, decl: *Decl) !void {
if (@atomicRmw(u8, &decl.resolution_in_progress, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) == 0) {
decl.resolution.data = await (async generateDecl(module, decl) catch unreachable);
decl.resolution.resolve();
+ return decl.resolution.data;
} else {
return (await (async decl.resolution.get() catch unreachable)).*;
}
@@ -655,12 +665,41 @@ async fn generateDeclFn(module: *Module, fn_decl: *Decl.Fn) !void {
fn_decl.value = Decl.Fn.Val{ .Ok = fn_val };
- const code = try await (async ir.gen(
+ const unanalyzed_code = (await (async ir.gen(
module,
body_node,
&fndef_scope.base,
+ Span.token(body_node.lastToken()),
+ fn_decl.base.parsed_file,
+ ) catch unreachable)) catch |err| switch (err) {
+ // This poison value should not cause the errdefers to run. It simply means
+ // that self.compile_errors is populated.
+ error.SemanticAnalysisFailed => return {},
+ else => return err,
+ };
+ defer unanalyzed_code.destroy(module.a());
+
+ if (module.verbose_ir) {
+ std.debug.warn("unanalyzed:\n");
+ unanalyzed_code.dump();
+ }
+
+ const analyzed_code = (await (async ir.analyze(
+ module,
fn_decl.base.parsed_file,
- ) catch unreachable);
- //code.dump();
- //try await (async irAnalyze(module, func) catch unreachable);
+ unanalyzed_code,
+ null,
+ ) catch unreachable)) catch |err| switch (err) {
+ // This poison value should not cause the errdefers to run. It simply means
+ // that self.compile_errors is populated.
+ error.SemanticAnalysisFailed => return {},
+ else => return err,
+ };
+ defer analyzed_code.destroy(module.a());
+
+ if (module.verbose_ir) {
+ std.debug.warn("analyzed:\n");
+ analyzed_code.dump();
+ }
+ // TODO now render to LLVM module
}
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
index 4b3918854d..66e1470cc0 100644
--- a/src-self-hosted/type.zig
+++ b/src-self-hosted/type.zig
@@ -39,6 +39,14 @@ pub const Type = struct {
}
}
+ pub fn dump(base: *const Type) void {
+ std.debug.warn("{}", @tagName(base.id));
+ }
+
+ pub fn getAbiAlignment(base: *Type, module: *Module) u32 {
+ @panic("TODO getAbiAlignment");
+ }
+
pub const Struct = struct {
base: Type,
decls: *Scope.Decls,
@@ -143,10 +151,35 @@ pub const Type = struct {
};
pub const Pointer = struct {
base: Type,
+ mut: Mut,
+ vol: Vol,
+ size: Size,
+ alignment: u32,
+
+ pub const Mut = enum {
+ Mut,
+ Const,
+ };
+ pub const Vol = enum {
+ Non,
+ Volatile,
+ };
+ pub const Size = builtin.TypeInfo.Pointer.Size;
pub fn destroy(self: *Pointer, module: *Module) void {
module.a().destroy(self);
}
+
+ pub fn get(
+ module: *Module,
+ elem_type: *Type,
+ mut: Mut,
+ vol: Vol,
+ size: Size,
+ alignment: u32,
+ ) *Pointer {
+ @panic("TODO get pointer");
+ }
};
pub const Array = struct {
base: Type,
diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig
index b53d03d0ad..7ee594b41c 100644
--- a/src-self-hosted/value.zig
+++ b/src-self-hosted/value.zig
@@ -24,10 +24,16 @@ pub const Value = struct {
Id.Void => @fieldParentPtr(Void, "base", base).destroy(module),
Id.Bool => @fieldParentPtr(Bool, "base", base).destroy(module),
Id.NoReturn => @fieldParentPtr(NoReturn, "base", base).destroy(module),
+ Id.Ptr => @fieldParentPtr(Ptr, "base", base).destroy(module),
}
}
}
+ pub fn getRef(base: *Value) *Value {
+ base.ref();
+ return base;
+ }
+
pub fn dump(base: *const Value) void {
std.debug.warn("{}", @tagName(base.id));
}
@@ -38,6 +44,7 @@ pub const Value = struct {
Void,
Bool,
NoReturn,
+ Ptr,
};
pub const Type = @import("type.zig").Type;
@@ -122,4 +129,18 @@ pub const Value = struct {
module.a().destroy(self);
}
};
+
+ pub const Ptr = struct {
+ base: Value,
+
+ pub const Mut = enum {
+ CompTimeConst,
+ CompTimeVar,
+ RunTime,
+ };
+
+ pub fn destroy(self: *Ptr, module: *Module) void {
+ module.a().destroy(self);
+ }
+ };
};
diff --git a/std/event/loop.zig b/std/event/loop.zig
index 07575cf2e8..ba75109a72 100644
--- a/std/event/loop.zig
+++ b/std/event/loop.zig
@@ -382,6 +382,21 @@ pub const Loop = struct {
return async S.asyncFunc(self, &handle, args);
}
+ /// Awaiting a yield lets the event loop run, starting any unstarted async operations.
+ /// Note that async operations automatically start when a function yields for any other reason,
+ /// for example, when async I/O is performed. This function is intended to be used only when
+ /// CPU bound tasks would be waiting in the event loop but never get started because no async I/O
+ /// is performed.
+ pub async fn yield(self: *Loop) void {
+ suspend |p| {
+ var my_tick_node = Loop.NextTickNode{
+ .next = undefined,
+ .data = p,
+ };
+ loop.onNextTick(&my_tick_node);
+ }
+ }
+
fn workerRun(self: *Loop) void {
start_over: while (true) {
if (@atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) == 0) {
--
cgit v1.2.3
From 278829fc2cc23e55b09915ce07ce1ec2dbf7e68b Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 14 Jul 2018 15:45:15 -0400
Subject: self-hosted: adding a fn to an llvm module
---
src-self-hosted/codegen.zig | 61 ++++++++++++++++++
src-self-hosted/ir.zig | 9 +--
src-self-hosted/llvm.zig | 23 ++++++-
src-self-hosted/main.zig | 7 ++-
src-self-hosted/module.zig | 112 ++++++++++++++++++++++-----------
src-self-hosted/test.zig | 11 +++-
src-self-hosted/type.zig | 147 +++++++++++++++++++++++++++++++++++++++++++-
src-self-hosted/value.zig | 20 ++++--
std/atomic/int.zig | 18 ++++--
std/event/loop.zig | 1 -
10 files changed, 346 insertions(+), 63 deletions(-)
create mode 100644 src-self-hosted/codegen.zig
(limited to 'src-self-hosted/module.zig')
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
new file mode 100644
index 0000000000..df8f451856
--- /dev/null
+++ b/src-self-hosted/codegen.zig
@@ -0,0 +1,61 @@
+const std = @import("std");
+// TODO codegen pretends that Module is renamed to Build because I plan to
+// do that refactor at some point
+const Build = @import("module.zig").Module;
+// we go through llvm instead of c for 2 reasons:
+// 1. to avoid accidentally calling the non-thread-safe functions
+// 2. patch up some of the types to remove nullability
+const llvm = @import("llvm.zig");
+const ir = @import("ir.zig");
+const Value = @import("value.zig").Value;
+const Type = @import("type.zig").Type;
+const event = std.event;
+
+pub async fn renderToLlvm(build: *Build, fn_val: *Value.Fn, code: *ir.Code) !void {
+ fn_val.base.ref();
+ defer fn_val.base.deref(build);
+ defer code.destroy(build.a());
+
+ const llvm_handle = try build.event_loop_local.getAnyLlvmContext();
+ defer llvm_handle.release(build.event_loop_local);
+
+ const context = llvm_handle.node.data;
+
+ const module = llvm.ModuleCreateWithNameInContext(build.name.ptr(), context) orelse return error.OutOfMemory;
+ defer llvm.DisposeModule(module);
+
+ const builder = llvm.CreateBuilderInContext(context) orelse return error.OutOfMemory;
+ defer llvm.DisposeBuilder(builder);
+
+ var cunit = CompilationUnit{
+ .build = build,
+ .module = module,
+ .builder = builder,
+ .context = context,
+ .lock = event.Lock.init(build.loop),
+ };
+
+ try renderToLlvmModule(&cunit, fn_val, code);
+
+ if (build.verbose_llvm_ir) {
+ llvm.DumpModule(cunit.module);
+ }
+}
+
+pub const CompilationUnit = struct {
+ build: *Build,
+ module: llvm.ModuleRef,
+ builder: llvm.BuilderRef,
+ context: llvm.ContextRef,
+ lock: event.Lock,
+
+ fn a(self: *CompilationUnit) *std.mem.Allocator {
+ return self.build.a();
+ }
+};
+
+pub fn renderToLlvmModule(cunit: *CompilationUnit, fn_val: *Value.Fn, code: *ir.Code) !void {
+ // TODO audit more of codegen.cpp:fn_llvm_value and port more logic
+ const llvm_fn_type = try fn_val.base.typeof.getLlvmType(cunit);
+ const llvm_fn = llvm.AddFunction(cunit.module, fn_val.symbol_name.ptr(), llvm_fn_type);
+}
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index 22161a0c27..f1c395a790 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -375,15 +375,8 @@ pub const Instruction = struct {
pub fn analyze(self: *const AddImplicitReturnType, ira: *Analyze) !*Instruction {
const target = try self.params.target.getAsParam();
-
try ira.src_implicit_return_type_list.append(target);
-
- return ira.irb.build(
- AddImplicitReturnType,
- self.base.scope,
- self.base.span,
- Params{ .target = target },
- );
+ return ira.irb.buildConstVoid(self.base.scope, self.base.span, true);
}
};
};
diff --git a/src-self-hosted/llvm.zig b/src-self-hosted/llvm.zig
index 391a92cd63..b815f75b05 100644
--- a/src-self-hosted/llvm.zig
+++ b/src-self-hosted/llvm.zig
@@ -2,10 +2,27 @@ const builtin = @import("builtin");
const c = @import("c.zig");
const assert = @import("std").debug.assert;
-pub const ValueRef = removeNullability(c.LLVMValueRef);
-pub const ModuleRef = removeNullability(c.LLVMModuleRef);
-pub const ContextRef = removeNullability(c.LLVMContextRef);
pub const BuilderRef = removeNullability(c.LLVMBuilderRef);
+pub const ContextRef = removeNullability(c.LLVMContextRef);
+pub const ModuleRef = removeNullability(c.LLVMModuleRef);
+pub const ValueRef = removeNullability(c.LLVMValueRef);
+pub const TypeRef = removeNullability(c.LLVMTypeRef);
+
+pub const AddFunction = c.LLVMAddFunction;
+pub const CreateBuilderInContext = c.LLVMCreateBuilderInContext;
+pub const DisposeBuilder = c.LLVMDisposeBuilder;
+pub const DisposeModule = c.LLVMDisposeModule;
+pub const DumpModule = c.LLVMDumpModule;
+pub const ModuleCreateWithNameInContext = c.LLVMModuleCreateWithNameInContext;
+pub const VoidTypeInContext = c.LLVMVoidTypeInContext;
+
+pub const FunctionType = LLVMFunctionType;
+extern fn LLVMFunctionType(
+ ReturnType: TypeRef,
+ ParamTypes: [*]TypeRef,
+ ParamCount: c_uint,
+ IsVarArg: c_int,
+) ?TypeRef;
fn removeNullability(comptime T: type) type {
comptime assert(@typeId(T) == builtin.TypeId.Optional);
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 972aaae9ac..77ec7f6d32 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -14,6 +14,7 @@ const c = @import("c.zig");
const introspect = @import("introspect.zig");
const Args = arg.Args;
const Flag = arg.Flag;
+const EventLoopLocal = @import("module.zig").EventLoopLocal;
const Module = @import("module.zig").Module;
const Target = @import("target.zig").Target;
const errmsg = @import("errmsg.zig");
@@ -386,9 +387,13 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
var loop: event.Loop = undefined;
try loop.initMultiThreaded(allocator);
+ defer loop.deinit();
+
+ var event_loop_local = EventLoopLocal.init(&loop);
+ defer event_loop_local.deinit();
var module = try Module.create(
- &loop,
+ &event_loop_local,
root_name,
root_source_file,
Target.Native,
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index 3fbe6d54ad..617bd0d44a 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -25,14 +25,58 @@ const ParsedFile = @import("parsed_file.zig").ParsedFile;
const Value = @import("value.zig").Value;
const Type = Value.Type;
const Span = errmsg.Span;
+const codegen = @import("codegen.zig");
+
+/// Data that is local to the event loop.
+pub const EventLoopLocal = struct {
+ loop: *event.Loop,
+ llvm_handle_pool: std.atomic.Stack(llvm.ContextRef),
+
+ fn init(loop: *event.Loop) EventLoopLocal {
+ return EventLoopLocal{
+ .loop = loop,
+ .llvm_handle_pool = std.atomic.Stack(llvm.ContextRef).init(),
+ };
+ }
+
+ fn deinit(self: *EventLoopLocal) void {
+ while (self.llvm_handle_pool.pop()) |node| {
+ c.LLVMContextDispose(node.data);
+ self.loop.allocator.destroy(node);
+ }
+ }
+
+ /// Gets an exclusive handle on any LlvmContext.
+ /// Caller must release the handle when done.
+ pub fn getAnyLlvmContext(self: *EventLoopLocal) !LlvmHandle {
+ if (self.llvm_handle_pool.pop()) |node| return LlvmHandle{ .node = node };
+
+ const context_ref = c.LLVMContextCreate() orelse return error.OutOfMemory;
+ errdefer c.LLVMContextDispose(context_ref);
+
+ const node = try self.loop.allocator.create(std.atomic.Stack(llvm.ContextRef).Node{
+ .next = undefined,
+ .data = context_ref,
+ });
+ errdefer self.loop.allocator.destroy(node);
+
+ return LlvmHandle{ .node = node };
+ }
+};
+
+pub const LlvmHandle = struct {
+ node: *std.atomic.Stack(llvm.ContextRef).Node,
+
+ pub fn release(self: LlvmHandle, event_loop_local: *EventLoopLocal) void {
+ event_loop_local.llvm_handle_pool.push(self.node);
+ }
+};
pub const Module = struct {
+ event_loop_local: *EventLoopLocal,
loop: *event.Loop,
name: Buffer,
root_src_path: ?[]const u8,
- llvm_module: llvm.ModuleRef,
- context: llvm.ContextRef,
- builder: llvm.BuilderRef,
target: Target,
build_mode: builtin.Mode,
zig_lib_dir: []const u8,
@@ -187,7 +231,7 @@ pub const Module = struct {
};
pub fn create(
- loop: *event.Loop,
+ event_loop_local: *EventLoopLocal,
name: []const u8,
root_src_path: ?[]const u8,
target: *const Target,
@@ -196,29 +240,20 @@ pub const Module = struct {
zig_lib_dir: []const u8,
cache_dir: []const u8,
) !*Module {
+ const loop = event_loop_local.loop;
+
var name_buffer = try Buffer.init(loop.allocator, name);
errdefer name_buffer.deinit();
- const context = c.LLVMContextCreate() orelse return error.OutOfMemory;
- errdefer c.LLVMContextDispose(context);
-
- const llvm_module = c.LLVMModuleCreateWithNameInContext(name_buffer.ptr(), context) orelse return error.OutOfMemory;
- errdefer c.LLVMDisposeModule(llvm_module);
-
- const builder = c.LLVMCreateBuilderInContext(context) orelse return error.OutOfMemory;
- errdefer c.LLVMDisposeBuilder(builder);
-
const events = try event.Channel(Event).create(loop, 0);
errdefer events.destroy();
const module = try loop.allocator.create(Module{
.loop = loop,
+ .event_loop_local = event_loop_local,
.events = events,
.name = name_buffer,
.root_src_path = root_src_path,
- .llvm_module = llvm_module,
- .context = context,
- .builder = builder,
.target = target.*,
.kind = kind,
.build_mode = build_mode,
@@ -290,7 +325,7 @@ pub const Module = struct {
.base = Value{
.id = Value.Id.Type,
.typeof = undefined,
- .ref_count = 3, // 3 because it references itself twice
+ .ref_count = std.atomic.Int(usize).init(3), // 3 because it references itself twice
},
.id = builtin.TypeId.Type,
},
@@ -305,7 +340,7 @@ pub const Module = struct {
.base = Value{
.id = Value.Id.Type,
.typeof = &Type.MetaType.get(module).base,
- .ref_count = 1,
+ .ref_count = std.atomic.Int(usize).init(1),
},
.id = builtin.TypeId.Void,
},
@@ -317,7 +352,7 @@ pub const Module = struct {
.base = Value{
.id = Value.Id.Type,
.typeof = &Type.MetaType.get(module).base,
- .ref_count = 1,
+ .ref_count = std.atomic.Int(usize).init(1),
},
.id = builtin.TypeId.NoReturn,
},
@@ -329,7 +364,7 @@ pub const Module = struct {
.base = Value{
.id = Value.Id.Type,
.typeof = &Type.MetaType.get(module).base,
- .ref_count = 1,
+ .ref_count = std.atomic.Int(usize).init(1),
},
.id = builtin.TypeId.Bool,
},
@@ -340,7 +375,7 @@ pub const Module = struct {
.base = Value{
.id = Value.Id.Void,
.typeof = &Type.Void.get(module).base,
- .ref_count = 1,
+ .ref_count = std.atomic.Int(usize).init(1),
},
});
errdefer module.a().destroy(module.void_value);
@@ -349,7 +384,7 @@ pub const Module = struct {
.base = Value{
.id = Value.Id.Bool,
.typeof = &Type.Bool.get(module).base,
- .ref_count = 1,
+ .ref_count = std.atomic.Int(usize).init(1),
},
.x = true,
});
@@ -359,7 +394,7 @@ pub const Module = struct {
.base = Value{
.id = Value.Id.Bool,
.typeof = &Type.Bool.get(module).base,
- .ref_count = 1,
+ .ref_count = std.atomic.Int(usize).init(1),
},
.x = false,
});
@@ -369,16 +404,12 @@ pub const Module = struct {
.base = Value{
.id = Value.Id.NoReturn,
.typeof = &Type.NoReturn.get(module).base,
- .ref_count = 1,
+ .ref_count = std.atomic.Int(usize).init(1),
},
});
errdefer module.a().destroy(module.noreturn_value);
}
- fn dump(self: *Module) void {
- c.LLVMDumpModule(self.module);
- }
-
pub fn destroy(self: *Module) void {
self.noreturn_value.base.deref(self);
self.void_value.base.deref(self);
@@ -389,9 +420,6 @@ pub const Module = struct {
self.meta_type.base.base.deref(self);
self.events.destroy();
- c.LLVMDisposeBuilder(self.builder);
- c.LLVMDisposeModule(self.llvm_module);
- c.LLVMContextDispose(self.context);
self.name.deinit();
self.a().destroy(self);
@@ -657,10 +685,19 @@ async fn generateDeclFn(module: *Module, fn_decl: *Decl.Fn) !void {
const fndef_scope = try Scope.FnDef.create(module, fn_decl.base.parent_scope);
defer fndef_scope.base.deref(module);
- const fn_type = try Type.Fn.create(module);
+ // TODO actually look at the return type of the AST
+ const return_type = &Type.Void.get(module).base;
+ defer return_type.base.deref(module);
+
+ const is_var_args = false;
+ const params = ([*]Type.Fn.Param)(undefined)[0..0];
+ const fn_type = try Type.Fn.create(module, return_type, params, is_var_args);
defer fn_type.base.base.deref(module);
- const fn_val = try Value.Fn.create(module, fn_type, fndef_scope);
+ var symbol_name = try std.Buffer.init(module.a(), fn_decl.base.name);
+ errdefer symbol_name.deinit();
+
+ const fn_val = try Value.Fn.create(module, fn_type, fndef_scope, symbol_name);
defer fn_val.base.deref(module);
fn_decl.value = Decl.Fn.Val{ .Ok = fn_val };
@@ -674,6 +711,7 @@ async fn generateDeclFn(module: *Module, fn_decl: *Decl.Fn) !void {
) catch unreachable)) catch |err| switch (err) {
// This poison value should not cause the errdefers to run. It simply means
// that self.compile_errors is populated.
+ // TODO https://github.com/ziglang/zig/issues/769
error.SemanticAnalysisFailed => return {},
else => return err,
};
@@ -692,14 +730,18 @@ async fn generateDeclFn(module: *Module, fn_decl: *Decl.Fn) !void {
) catch unreachable)) catch |err| switch (err) {
// This poison value should not cause the errdefers to run. It simply means
// that self.compile_errors is populated.
+ // TODO https://github.com/ziglang/zig/issues/769
error.SemanticAnalysisFailed => return {},
else => return err,
};
- defer analyzed_code.destroy(module.a());
+ errdefer analyzed_code.destroy(module.a());
if (module.verbose_ir) {
std.debug.warn("analyzed:\n");
analyzed_code.dump();
}
- // TODO now render to LLVM module
+
+ // Kick off rendering to LLVM module, but it doesn't block the fn decl
+ // analysis from being complete.
+ try module.build_group.call(codegen.renderToLlvm, module, fn_val, analyzed_code);
}
diff --git a/src-self-hosted/test.zig b/src-self-hosted/test.zig
index 4455352f95..e609eb2791 100644
--- a/src-self-hosted/test.zig
+++ b/src-self-hosted/test.zig
@@ -6,6 +6,7 @@ const Module = @import("module.zig").Module;
const introspect = @import("introspect.zig");
const assertOrPanic = std.debug.assertOrPanic;
const errmsg = @import("errmsg.zig");
+const EventLoopLocal = @import("module.zig").EventLoopLocal;
test "compile errors" {
var ctx: TestContext = undefined;
@@ -22,6 +23,7 @@ const allocator = std.heap.c_allocator;
pub const TestContext = struct {
loop: std.event.Loop,
+ event_loop_local: EventLoopLocal,
zig_lib_dir: []u8,
zig_cache_dir: []u8,
file_index: std.atomic.Int(usize),
@@ -34,6 +36,7 @@ pub const TestContext = struct {
self.* = TestContext{
.any_err = {},
.loop = undefined,
+ .event_loop_local = undefined,
.zig_lib_dir = undefined,
.zig_cache_dir = undefined,
.group = undefined,
@@ -43,6 +46,9 @@ pub const TestContext = struct {
try self.loop.initMultiThreaded(allocator);
errdefer self.loop.deinit();
+ self.event_loop_local = EventLoopLocal.init(&self.loop);
+ errdefer self.event_loop_local.deinit();
+
self.group = std.event.Group(error!void).init(&self.loop);
errdefer self.group.cancelAll();
@@ -60,6 +66,7 @@ pub const TestContext = struct {
std.os.deleteTree(allocator, tmp_dir_name) catch {};
allocator.free(self.zig_cache_dir);
allocator.free(self.zig_lib_dir);
+ self.event_loop_local.deinit();
self.loop.deinit();
}
@@ -83,7 +90,7 @@ pub const TestContext = struct {
msg: []const u8,
) !void {
var file_index_buf: [20]u8 = undefined;
- const file_index = try std.fmt.bufPrint(file_index_buf[0..], "{}", self.file_index.next());
+ const file_index = try std.fmt.bufPrint(file_index_buf[0..], "{}", self.file_index.incr());
const file1_path = try std.os.path.join(allocator, tmp_dir_name, file_index, file1);
if (std.os.path.dirname(file1_path)) |dirname| {
@@ -94,7 +101,7 @@ pub const TestContext = struct {
try std.io.writeFile(allocator, file1_path, source);
var module = try Module.create(
- &self.loop,
+ &self.event_loop_local,
"test",
file1_path,
Target.Native,
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
index 66e1470cc0..e4c31018a3 100644
--- a/src-self-hosted/type.zig
+++ b/src-self-hosted/type.zig
@@ -1,7 +1,10 @@
+const std = @import("std");
const builtin = @import("builtin");
const Scope = @import("scope.zig").Scope;
const Module = @import("module.zig").Module;
const Value = @import("value.zig").Value;
+const llvm = @import("llvm.zig");
+const CompilationUnit = @import("codegen.zig").CompilationUnit;
pub const Type = struct {
base: Value,
@@ -39,6 +42,36 @@ pub const Type = struct {
}
}
+ pub fn getLlvmType(base: *Type, cunit: *CompilationUnit) (error{OutOfMemory}!llvm.TypeRef) {
+ switch (base.id) {
+ Id.Struct => return @fieldParentPtr(Struct, "base", base).getLlvmType(cunit),
+ Id.Fn => return @fieldParentPtr(Fn, "base", base).getLlvmType(cunit),
+ Id.Type => unreachable,
+ Id.Void => unreachable,
+ Id.Bool => return @fieldParentPtr(Bool, "base", base).getLlvmType(cunit),
+ Id.NoReturn => unreachable,
+ Id.Int => return @fieldParentPtr(Int, "base", base).getLlvmType(cunit),
+ Id.Float => return @fieldParentPtr(Float, "base", base).getLlvmType(cunit),
+ Id.Pointer => return @fieldParentPtr(Pointer, "base", base).getLlvmType(cunit),
+ Id.Array => return @fieldParentPtr(Array, "base", base).getLlvmType(cunit),
+ Id.ComptimeFloat => unreachable,
+ Id.ComptimeInt => unreachable,
+ Id.Undefined => unreachable,
+ Id.Null => unreachable,
+ Id.Optional => return @fieldParentPtr(Optional, "base", base).getLlvmType(cunit),
+ Id.ErrorUnion => return @fieldParentPtr(ErrorUnion, "base", base).getLlvmType(cunit),
+ Id.ErrorSet => return @fieldParentPtr(ErrorSet, "base", base).getLlvmType(cunit),
+ Id.Enum => return @fieldParentPtr(Enum, "base", base).getLlvmType(cunit),
+ Id.Union => return @fieldParentPtr(Union, "base", base).getLlvmType(cunit),
+ Id.Namespace => unreachable,
+ Id.Block => unreachable,
+ Id.BoundFn => return @fieldParentPtr(BoundFn, "base", base).getLlvmType(cunit),
+ Id.ArgTuple => unreachable,
+ Id.Opaque => return @fieldParentPtr(Opaque, "base", base).getLlvmType(cunit),
+ Id.Promise => return @fieldParentPtr(Promise, "base", base).getLlvmType(cunit),
+ }
+ }
+
pub fn dump(base: *const Type) void {
std.debug.warn("{}", @tagName(base.id));
}
@@ -54,27 +87,72 @@ pub const Type = struct {
pub fn destroy(self: *Struct, module: *Module) void {
module.a().destroy(self);
}
+
+ pub fn getLlvmType(self: *Struct, cunit: *CompilationUnit) llvm.TypeRef {
+ @panic("TODO");
+ }
};
pub const Fn = struct {
base: Type,
+ return_type: *Type,
+ params: []Param,
+ is_var_args: bool,
- pub fn create(module: *Module) !*Fn {
- return module.a().create(Fn{
+ pub const Param = struct {
+ is_noalias: bool,
+ typeof: *Type,
+ };
+
+ pub fn create(module: *Module, return_type: *Type, params: []Param, is_var_args: bool) !*Fn {
+ const result = try module.a().create(Fn{
.base = Type{
.base = Value{
.id = Value.Id.Type,
.typeof = &MetaType.get(module).base,
- .ref_count = 1,
+ .ref_count = std.atomic.Int(usize).init(1),
},
.id = builtin.TypeId.Fn,
},
+ .return_type = return_type,
+ .params = params,
+ .is_var_args = is_var_args,
});
+ errdefer module.a().destroy(result);
+
+ result.return_type.base.ref();
+ for (result.params) |param| {
+ param.typeof.base.ref();
+ }
+ return result;
}
pub fn destroy(self: *Fn, module: *Module) void {
+ self.return_type.base.deref(module);
+ for (self.params) |param| {
+ param.typeof.base.deref(module);
+ }
module.a().destroy(self);
}
+
+ pub fn getLlvmType(self: *Fn, cunit: *CompilationUnit) !llvm.TypeRef {
+ const llvm_return_type = switch (self.return_type.id) {
+ Type.Id.Void => llvm.VoidTypeInContext(cunit.context) orelse return error.OutOfMemory,
+ else => try self.return_type.getLlvmType(cunit),
+ };
+ const llvm_param_types = try cunit.a().alloc(llvm.TypeRef, self.params.len);
+ defer cunit.a().free(llvm_param_types);
+ for (llvm_param_types) |*llvm_param_type, i| {
+ llvm_param_type.* = try self.params[i].typeof.getLlvmType(cunit);
+ }
+
+ return llvm.FunctionType(
+ llvm_return_type,
+ llvm_param_types.ptr,
+ @intCast(c_uint, llvm_param_types.len),
+ @boolToInt(self.is_var_args),
+ ) orelse error.OutOfMemory;
+ }
};
pub const MetaType = struct {
@@ -118,6 +196,10 @@ pub const Type = struct {
pub fn destroy(self: *Bool, module: *Module) void {
module.a().destroy(self);
}
+
+ pub fn getLlvmType(self: *Bool, cunit: *CompilationUnit) llvm.TypeRef {
+ @panic("TODO");
+ }
};
pub const NoReturn = struct {
@@ -140,6 +222,10 @@ pub const Type = struct {
pub fn destroy(self: *Int, module: *Module) void {
module.a().destroy(self);
}
+
+ pub fn getLlvmType(self: *Int, cunit: *CompilationUnit) llvm.TypeRef {
+ @panic("TODO");
+ }
};
pub const Float = struct {
@@ -148,6 +234,10 @@ pub const Type = struct {
pub fn destroy(self: *Float, module: *Module) void {
module.a().destroy(self);
}
+
+ pub fn getLlvmType(self: *Float, cunit: *CompilationUnit) llvm.TypeRef {
+ @panic("TODO");
+ }
};
pub const Pointer = struct {
base: Type,
@@ -180,14 +270,24 @@ pub const Type = struct {
) *Pointer {
@panic("TODO get pointer");
}
+
+ pub fn getLlvmType(self: *Pointer, cunit: *CompilationUnit) llvm.TypeRef {
+ @panic("TODO");
+ }
};
+
pub const Array = struct {
base: Type,
pub fn destroy(self: *Array, module: *Module) void {
module.a().destroy(self);
}
+
+ pub fn getLlvmType(self: *Array, cunit: *CompilationUnit) llvm.TypeRef {
+ @panic("TODO");
+ }
};
+
pub const ComptimeFloat = struct {
base: Type,
@@ -195,6 +295,7 @@ pub const Type = struct {
module.a().destroy(self);
}
};
+
pub const ComptimeInt = struct {
base: Type,
@@ -202,6 +303,7 @@ pub const Type = struct {
module.a().destroy(self);
}
};
+
pub const Undefined = struct {
base: Type,
@@ -209,6 +311,7 @@ pub const Type = struct {
module.a().destroy(self);
}
};
+
pub const Null = struct {
base: Type,
@@ -216,41 +319,67 @@ pub const Type = struct {
module.a().destroy(self);
}
};
+
pub const Optional = struct {
base: Type,
pub fn destroy(self: *Optional, module: *Module) void {
module.a().destroy(self);
}
+
+ pub fn getLlvmType(self: *Optional, cunit: *CompilationUnit) llvm.TypeRef {
+ @panic("TODO");
+ }
};
+
pub const ErrorUnion = struct {
base: Type,
pub fn destroy(self: *ErrorUnion, module: *Module) void {
module.a().destroy(self);
}
+
+ pub fn getLlvmType(self: *ErrorUnion, cunit: *CompilationUnit) llvm.TypeRef {
+ @panic("TODO");
+ }
};
+
pub const ErrorSet = struct {
base: Type,
pub fn destroy(self: *ErrorSet, module: *Module) void {
module.a().destroy(self);
}
+
+ pub fn getLlvmType(self: *ErrorSet, cunit: *CompilationUnit) llvm.TypeRef {
+ @panic("TODO");
+ }
};
+
pub const Enum = struct {
base: Type,
pub fn destroy(self: *Enum, module: *Module) void {
module.a().destroy(self);
}
+
+ pub fn getLlvmType(self: *Enum, cunit: *CompilationUnit) llvm.TypeRef {
+ @panic("TODO");
+ }
};
+
pub const Union = struct {
base: Type,
pub fn destroy(self: *Union, module: *Module) void {
module.a().destroy(self);
}
+
+ pub fn getLlvmType(self: *Union, cunit: *CompilationUnit) llvm.TypeRef {
+ @panic("TODO");
+ }
};
+
pub const Namespace = struct {
base: Type,
@@ -273,6 +402,10 @@ pub const Type = struct {
pub fn destroy(self: *BoundFn, module: *Module) void {
module.a().destroy(self);
}
+
+ pub fn getLlvmType(self: *BoundFn, cunit: *CompilationUnit) llvm.TypeRef {
+ @panic("TODO");
+ }
};
pub const ArgTuple = struct {
@@ -289,6 +422,10 @@ pub const Type = struct {
pub fn destroy(self: *Opaque, module: *Module) void {
module.a().destroy(self);
}
+
+ pub fn getLlvmType(self: *Opaque, cunit: *CompilationUnit) llvm.TypeRef {
+ @panic("TODO");
+ }
};
pub const Promise = struct {
@@ -297,5 +434,9 @@ pub const Type = struct {
pub fn destroy(self: *Promise, module: *Module) void {
module.a().destroy(self);
}
+
+ pub fn getLlvmType(self: *Promise, cunit: *CompilationUnit) llvm.TypeRef {
+ @panic("TODO");
+ }
};
};
diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig
index 7ee594b41c..779e5c2e45 100644
--- a/src-self-hosted/value.zig
+++ b/src-self-hosted/value.zig
@@ -8,15 +8,16 @@ const Module = @import("module.zig").Module;
pub const Value = struct {
id: Id,
typeof: *Type,
- ref_count: usize,
+ ref_count: std.atomic.Int(usize),
+ /// Thread-safe
pub fn ref(base: *Value) void {
- base.ref_count += 1;
+ _ = base.ref_count.incr();
}
+ /// Thread-safe
pub fn deref(base: *Value, module: *Module) void {
- base.ref_count -= 1;
- if (base.ref_count == 0) {
+ if (base.ref_count.decr() == 1) {
base.typeof.base.deref(module);
switch (base.id) {
Id.Type => @fieldParentPtr(Type, "base", base).destroy(module),
@@ -52,6 +53,10 @@ pub const Value = struct {
pub const Fn = struct {
base: Value,
+ /// The main external name that is used in the .o file.
+ /// TODO https://github.com/ziglang/zig/issues/265
+ symbol_name: std.Buffer,
+
/// parent should be the top level decls or container decls
fndef_scope: *Scope.FnDef,
@@ -62,16 +67,18 @@ pub const Value = struct {
block_scope: *Scope.Block,
/// Creates a Fn value with 1 ref
- pub fn create(module: *Module, fn_type: *Type.Fn, fndef_scope: *Scope.FnDef) !*Fn {
+ /// Takes ownership of symbol_name
+ pub fn create(module: *Module, fn_type: *Type.Fn, fndef_scope: *Scope.FnDef, symbol_name: std.Buffer) !*Fn {
const self = try module.a().create(Fn{
.base = Value{
.id = Value.Id.Fn,
.typeof = &fn_type.base,
- .ref_count = 1,
+ .ref_count = std.atomic.Int(usize).init(1),
},
.fndef_scope = fndef_scope,
.child_scope = &fndef_scope.base,
.block_scope = undefined,
+ .symbol_name = symbol_name,
});
fn_type.base.base.ref();
fndef_scope.fn_val = self;
@@ -81,6 +88,7 @@ pub const Value = struct {
pub fn destroy(self: *Fn, module: *Module) void {
self.fndef_scope.base.deref(module);
+ self.symbol_name.deinit();
module.a().destroy(self);
}
};
diff --git a/std/atomic/int.zig b/std/atomic/int.zig
index 7042bca78d..d51454c673 100644
--- a/std/atomic/int.zig
+++ b/std/atomic/int.zig
@@ -4,16 +4,26 @@ const AtomicOrder = builtin.AtomicOrder;
/// Thread-safe, lock-free integer
pub fn Int(comptime T: type) type {
return struct {
- value: T,
+ unprotected_value: T,
pub const Self = this;
pub fn init(init_val: T) Self {
- return Self{ .value = init_val };
+ return Self{ .unprotected_value = init_val };
}
- pub fn next(self: *Self) T {
- return @atomicRmw(T, &self.value, builtin.AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ /// Returns previous value
+ pub fn incr(self: *Self) T {
+ return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ }
+
+ /// Returns previous value
+ pub fn decr(self: *Self) T {
+ return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ }
+
+ pub fn get(self: *Self) T {
+ return @atomicLoad(T, &self.unprotected_value, AtomicOrder.SeqCst);
}
};
}
diff --git a/std/event/loop.zig b/std/event/loop.zig
index ba75109a72..fc927592b9 100644
--- a/std/event/loop.zig
+++ b/std/event/loop.zig
@@ -101,7 +101,6 @@ pub const Loop = struct {
errdefer self.deinitOsData();
}
- /// must call stop before deinit
pub fn deinit(self: *Loop) void {
self.deinitOsData();
self.allocator.free(self.extra_threads);
--
cgit v1.2.3
From 28c3d4809bc6d497ac81892bc7eb03b95d8c2b32 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 14 Jul 2018 16:12:41 -0400
Subject: rename Module to Compilation
and CompilationUnit to ObjectFile
---
src-self-hosted/codegen.zig | 42 ++-
src-self-hosted/compilation.zig | 747 ++++++++++++++++++++++++++++++++++++++++
src-self-hosted/decl.zig | 4 +-
src-self-hosted/ir.zig | 56 +--
src-self-hosted/main.zig | 114 +++---
src-self-hosted/module.zig | 747 ----------------------------------------
src-self-hosted/scope.zig | 72 ++--
src-self-hosted/test.zig | 26 +-
src-self-hosted/type.zig | 268 +++++++-------
src-self-hosted/value.zig | 66 ++--
10 files changed, 1070 insertions(+), 1072 deletions(-)
create mode 100644 src-self-hosted/compilation.zig
delete mode 100644 src-self-hosted/module.zig
(limited to 'src-self-hosted/module.zig')
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index df8f451856..a07485e74e 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -1,7 +1,5 @@
const std = @import("std");
-// TODO codegen pretends that Module is renamed to Build because I plan to
-// do that refactor at some point
-const Build = @import("module.zig").Module;
+const Compilation = @import("compilation.zig").Compilation;
// we go through llvm instead of c for 2 reasons:
// 1. to avoid accidentally calling the non-thread-safe functions
// 2. patch up some of the types to remove nullability
@@ -11,51 +9,51 @@ const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
const event = std.event;
-pub async fn renderToLlvm(build: *Build, fn_val: *Value.Fn, code: *ir.Code) !void {
+pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code) !void {
fn_val.base.ref();
- defer fn_val.base.deref(build);
- defer code.destroy(build.a());
+ defer fn_val.base.deref(comp);
+ defer code.destroy(comp.a());
- const llvm_handle = try build.event_loop_local.getAnyLlvmContext();
- defer llvm_handle.release(build.event_loop_local);
+ const llvm_handle = try comp.event_loop_local.getAnyLlvmContext();
+ defer llvm_handle.release(comp.event_loop_local);
const context = llvm_handle.node.data;
- const module = llvm.ModuleCreateWithNameInContext(build.name.ptr(), context) orelse return error.OutOfMemory;
+ const module = llvm.ModuleCreateWithNameInContext(comp.name.ptr(), context) orelse return error.OutOfMemory;
defer llvm.DisposeModule(module);
const builder = llvm.CreateBuilderInContext(context) orelse return error.OutOfMemory;
defer llvm.DisposeBuilder(builder);
- var cunit = CompilationUnit{
- .build = build,
+ var ofile = ObjectFile{
+ .comp = comp,
.module = module,
.builder = builder,
.context = context,
- .lock = event.Lock.init(build.loop),
+ .lock = event.Lock.init(comp.loop),
};
- try renderToLlvmModule(&cunit, fn_val, code);
+ try renderToLlvmModule(&ofile, fn_val, code);
- if (build.verbose_llvm_ir) {
- llvm.DumpModule(cunit.module);
+ if (comp.verbose_llvm_ir) {
+ llvm.DumpModule(ofile.module);
}
}
-pub const CompilationUnit = struct {
- build: *Build,
+pub const ObjectFile = struct {
+ comp: *Compilation,
module: llvm.ModuleRef,
builder: llvm.BuilderRef,
context: llvm.ContextRef,
lock: event.Lock,
- fn a(self: *CompilationUnit) *std.mem.Allocator {
- return self.build.a();
+ fn a(self: *ObjectFile) *std.mem.Allocator {
+ return self.comp.a();
}
};
-pub fn renderToLlvmModule(cunit: *CompilationUnit, fn_val: *Value.Fn, code: *ir.Code) !void {
+pub fn renderToLlvmModule(ofile: *ObjectFile, fn_val: *Value.Fn, code: *ir.Code) !void {
// TODO audit more of codegen.cpp:fn_llvm_value and port more logic
- const llvm_fn_type = try fn_val.base.typeof.getLlvmType(cunit);
- const llvm_fn = llvm.AddFunction(cunit.module, fn_val.symbol_name.ptr(), llvm_fn_type);
+ const llvm_fn_type = try fn_val.base.typeof.getLlvmType(ofile);
+ const llvm_fn = llvm.AddFunction(ofile.module, fn_val.symbol_name.ptr(), llvm_fn_type);
}
diff --git a/src-self-hosted/compilation.zig b/src-self-hosted/compilation.zig
new file mode 100644
index 0000000000..cbda7861bc
--- /dev/null
+++ b/src-self-hosted/compilation.zig
@@ -0,0 +1,747 @@
+const std = @import("std");
+const os = std.os;
+const io = std.io;
+const mem = std.mem;
+const Allocator = mem.Allocator;
+const Buffer = std.Buffer;
+const llvm = @import("llvm.zig");
+const c = @import("c.zig");
+const builtin = @import("builtin");
+const Target = @import("target.zig").Target;
+const warn = std.debug.warn;
+const Token = std.zig.Token;
+const ArrayList = std.ArrayList;
+const errmsg = @import("errmsg.zig");
+const ast = std.zig.ast;
+const event = std.event;
+const assert = std.debug.assert;
+const AtomicRmwOp = builtin.AtomicRmwOp;
+const AtomicOrder = builtin.AtomicOrder;
+const Scope = @import("scope.zig").Scope;
+const Decl = @import("decl.zig").Decl;
+const ir = @import("ir.zig");
+const Visib = @import("visib.zig").Visib;
+const ParsedFile = @import("parsed_file.zig").ParsedFile;
+const Value = @import("value.zig").Value;
+const Type = Value.Type;
+const Span = errmsg.Span;
+const codegen = @import("codegen.zig");
+
+/// Data that is local to the event loop.
+pub const EventLoopLocal = struct {
+ loop: *event.Loop,
+ llvm_handle_pool: std.atomic.Stack(llvm.ContextRef),
+
+ fn init(loop: *event.Loop) EventLoopLocal {
+ return EventLoopLocal{
+ .loop = loop,
+ .llvm_handle_pool = std.atomic.Stack(llvm.ContextRef).init(),
+ };
+ }
+
+ fn deinit(self: *EventLoopLocal) void {
+ while (self.llvm_handle_pool.pop()) |node| {
+ c.LLVMContextDispose(node.data);
+ self.loop.allocator.destroy(node);
+ }
+ }
+
+ /// Gets an exclusive handle on any LlvmContext.
+ /// Caller must release the handle when done.
+ pub fn getAnyLlvmContext(self: *EventLoopLocal) !LlvmHandle {
+ if (self.llvm_handle_pool.pop()) |node| return LlvmHandle{ .node = node };
+
+ const context_ref = c.LLVMContextCreate() orelse return error.OutOfMemory;
+ errdefer c.LLVMContextDispose(context_ref);
+
+ const node = try self.loop.allocator.create(std.atomic.Stack(llvm.ContextRef).Node{
+ .next = undefined,
+ .data = context_ref,
+ });
+ errdefer self.loop.allocator.destroy(node);
+
+ return LlvmHandle{ .node = node };
+ }
+};
+
+pub const LlvmHandle = struct {
+ node: *std.atomic.Stack(llvm.ContextRef).Node,
+
+ pub fn release(self: LlvmHandle, event_loop_local: *EventLoopLocal) void {
+ event_loop_local.llvm_handle_pool.push(self.node);
+ }
+};
+
+pub const Compilation = struct {
+ event_loop_local: *EventLoopLocal,
+ loop: *event.Loop,
+ name: Buffer,
+ root_src_path: ?[]const u8,
+ target: Target,
+ build_mode: builtin.Mode,
+ zig_lib_dir: []const u8,
+
+ version_major: u32,
+ version_minor: u32,
+ version_patch: u32,
+
+ linker_script: ?[]const u8,
+ cache_dir: []const u8,
+ libc_lib_dir: ?[]const u8,
+ libc_static_lib_dir: ?[]const u8,
+ libc_include_dir: ?[]const u8,
+ msvc_lib_dir: ?[]const u8,
+ kernel32_lib_dir: ?[]const u8,
+ dynamic_linker: ?[]const u8,
+ out_h_path: ?[]const u8,
+
+ is_test: bool,
+ each_lib_rpath: bool,
+ strip: bool,
+ is_static: bool,
+ linker_rdynamic: bool,
+
+ clang_argv: []const []const u8,
+ llvm_argv: []const []const u8,
+ lib_dirs: []const []const u8,
+ rpath_list: []const []const u8,
+ assembly_files: []const []const u8,
+ link_objects: []const []const u8,
+
+ windows_subsystem_windows: bool,
+ windows_subsystem_console: bool,
+
+ link_libs_list: ArrayList(*LinkLib),
+ libc_link_lib: ?*LinkLib,
+
+ err_color: errmsg.Color,
+
+ verbose_tokenize: bool,
+ verbose_ast_tree: bool,
+ verbose_ast_fmt: bool,
+ verbose_cimport: bool,
+ verbose_ir: bool,
+ verbose_llvm_ir: bool,
+ verbose_link: bool,
+
+ darwin_frameworks: []const []const u8,
+ darwin_version_min: DarwinVersionMin,
+
+ test_filters: []const []const u8,
+ test_name_prefix: ?[]const u8,
+
+ emit_file_type: Emit,
+
+ kind: Kind,
+
+ link_out_file: ?[]const u8,
+ events: *event.Channel(Event),
+
+ exported_symbol_names: event.Locked(Decl.Table),
+
+ /// Before code generation starts, must wait on this group to make sure
+ /// the build is complete.
+ build_group: event.Group(BuildError!void),
+
+ compile_errors: event.Locked(CompileErrList),
+
+ meta_type: *Type.MetaType,
+ void_type: *Type.Void,
+ bool_type: *Type.Bool,
+ noreturn_type: *Type.NoReturn,
+
+ void_value: *Value.Void,
+ true_value: *Value.Bool,
+ false_value: *Value.Bool,
+ noreturn_value: *Value.NoReturn,
+
+ const CompileErrList = std.ArrayList(*errmsg.Msg);
+
+ // TODO handle some of these earlier and report them in a way other than error codes
+ pub const BuildError = error{
+ OutOfMemory,
+ EndOfStream,
+ BadFd,
+ Io,
+ IsDir,
+ Unexpected,
+ SystemResources,
+ SharingViolation,
+ PathAlreadyExists,
+ FileNotFound,
+ AccessDenied,
+ PipeBusy,
+ FileTooBig,
+ SymLinkLoop,
+ ProcessFdQuotaExceeded,
+ NameTooLong,
+ SystemFdQuotaExceeded,
+ NoDevice,
+ PathNotFound,
+ NoSpaceLeft,
+ NotDir,
+ FileSystem,
+ OperationAborted,
+ IoPending,
+ BrokenPipe,
+ WouldBlock,
+ FileClosed,
+ DestinationAddressRequired,
+ DiskQuota,
+ InputOutput,
+ NoStdHandles,
+ Overflow,
+ NotSupported,
+ BufferTooSmall,
+ Unimplemented, // TODO remove this one
+ SemanticAnalysisFailed, // TODO remove this one
+ };
+
+ pub const Event = union(enum) {
+ Ok,
+ Error: BuildError,
+ Fail: []*errmsg.Msg,
+ };
+
+ pub const DarwinVersionMin = union(enum) {
+ None,
+ MacOS: []const u8,
+ Ios: []const u8,
+ };
+
+ pub const Kind = enum {
+ Exe,
+ Lib,
+ Obj,
+ };
+
+ pub const LinkLib = struct {
+ name: []const u8,
+ path: ?[]const u8,
+
+ /// the list of symbols we depend on from this lib
+ symbols: ArrayList([]u8),
+ provided_explicitly: bool,
+ };
+
+ pub const Emit = enum {
+ Binary,
+ Assembly,
+ LlvmIr,
+ };
+
+ pub fn create(
+ event_loop_local: *EventLoopLocal,
+ name: []const u8,
+ root_src_path: ?[]const u8,
+ target: *const Target,
+ kind: Kind,
+ build_mode: builtin.Mode,
+ zig_lib_dir: []const u8,
+ cache_dir: []const u8,
+ ) !*Compilation {
+ const loop = event_loop_local.loop;
+
+ var name_buffer = try Buffer.init(loop.allocator, name);
+ errdefer name_buffer.deinit();
+
+ const events = try event.Channel(Event).create(loop, 0);
+ errdefer events.destroy();
+
+ const comp = try loop.allocator.create(Compilation{
+ .loop = loop,
+ .event_loop_local = event_loop_local,
+ .events = events,
+ .name = name_buffer,
+ .root_src_path = root_src_path,
+ .target = target.*,
+ .kind = kind,
+ .build_mode = build_mode,
+ .zig_lib_dir = zig_lib_dir,
+ .cache_dir = cache_dir,
+
+ .version_major = 0,
+ .version_minor = 0,
+ .version_patch = 0,
+
+ .verbose_tokenize = false,
+ .verbose_ast_tree = false,
+ .verbose_ast_fmt = false,
+ .verbose_cimport = false,
+ .verbose_ir = false,
+ .verbose_llvm_ir = false,
+ .verbose_link = false,
+
+ .linker_script = null,
+ .libc_lib_dir = null,
+ .libc_static_lib_dir = null,
+ .libc_include_dir = null,
+ .msvc_lib_dir = null,
+ .kernel32_lib_dir = null,
+ .dynamic_linker = null,
+ .out_h_path = null,
+ .is_test = false,
+ .each_lib_rpath = false,
+ .strip = false,
+ .is_static = false,
+ .linker_rdynamic = false,
+ .clang_argv = [][]const u8{},
+ .llvm_argv = [][]const u8{},
+ .lib_dirs = [][]const u8{},
+ .rpath_list = [][]const u8{},
+ .assembly_files = [][]const u8{},
+ .link_objects = [][]const u8{},
+ .windows_subsystem_windows = false,
+ .windows_subsystem_console = false,
+ .link_libs_list = ArrayList(*LinkLib).init(loop.allocator),
+ .libc_link_lib = null,
+ .err_color = errmsg.Color.Auto,
+ .darwin_frameworks = [][]const u8{},
+ .darwin_version_min = DarwinVersionMin.None,
+ .test_filters = [][]const u8{},
+ .test_name_prefix = null,
+ .emit_file_type = Emit.Binary,
+ .link_out_file = null,
+ .exported_symbol_names = event.Locked(Decl.Table).init(loop, Decl.Table.init(loop.allocator)),
+ .build_group = event.Group(BuildError!void).init(loop),
+ .compile_errors = event.Locked(CompileErrList).init(loop, CompileErrList.init(loop.allocator)),
+
+ .meta_type = undefined,
+ .void_type = undefined,
+ .void_value = undefined,
+ .bool_type = undefined,
+ .true_value = undefined,
+ .false_value = undefined,
+ .noreturn_type = undefined,
+ .noreturn_value = undefined,
+ });
+ try comp.initTypes();
+ return comp;
+ }
+
+ fn initTypes(comp: *Compilation) !void {
+ comp.meta_type = try comp.a().create(Type.MetaType{
+ .base = Type{
+ .base = Value{
+ .id = Value.Id.Type,
+ .typeof = undefined,
+ .ref_count = std.atomic.Int(usize).init(3), // 3 because it references itself twice
+ },
+ .id = builtin.TypeId.Type,
+ },
+ .value = undefined,
+ });
+ comp.meta_type.value = &comp.meta_type.base;
+ comp.meta_type.base.base.typeof = &comp.meta_type.base;
+ errdefer comp.a().destroy(comp.meta_type);
+
+ comp.void_type = try comp.a().create(Type.Void{
+ .base = Type{
+ .base = Value{
+ .id = Value.Id.Type,
+ .typeof = &Type.MetaType.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .id = builtin.TypeId.Void,
+ },
+ });
+ errdefer comp.a().destroy(comp.void_type);
+
+ comp.noreturn_type = try comp.a().create(Type.NoReturn{
+ .base = Type{
+ .base = Value{
+ .id = Value.Id.Type,
+ .typeof = &Type.MetaType.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .id = builtin.TypeId.NoReturn,
+ },
+ });
+ errdefer comp.a().destroy(comp.noreturn_type);
+
+ comp.bool_type = try comp.a().create(Type.Bool{
+ .base = Type{
+ .base = Value{
+ .id = Value.Id.Type,
+ .typeof = &Type.MetaType.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .id = builtin.TypeId.Bool,
+ },
+ });
+ errdefer comp.a().destroy(comp.bool_type);
+
+ comp.void_value = try comp.a().create(Value.Void{
+ .base = Value{
+ .id = Value.Id.Void,
+ .typeof = &Type.Void.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ });
+ errdefer comp.a().destroy(comp.void_value);
+
+ comp.true_value = try comp.a().create(Value.Bool{
+ .base = Value{
+ .id = Value.Id.Bool,
+ .typeof = &Type.Bool.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .x = true,
+ });
+ errdefer comp.a().destroy(comp.true_value);
+
+ comp.false_value = try comp.a().create(Value.Bool{
+ .base = Value{
+ .id = Value.Id.Bool,
+ .typeof = &Type.Bool.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .x = false,
+ });
+ errdefer comp.a().destroy(comp.false_value);
+
+ comp.noreturn_value = try comp.a().create(Value.NoReturn{
+ .base = Value{
+ .id = Value.Id.NoReturn,
+ .typeof = &Type.NoReturn.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ });
+ errdefer comp.a().destroy(comp.noreturn_value);
+ }
+
+ pub fn destroy(self: *Compilation) void {
+ self.noreturn_value.base.deref(self);
+ self.void_value.base.deref(self);
+ self.false_value.base.deref(self);
+ self.true_value.base.deref(self);
+ self.noreturn_type.base.base.deref(self);
+ self.void_type.base.base.deref(self);
+ self.meta_type.base.base.deref(self);
+
+ self.events.destroy();
+ self.name.deinit();
+
+ self.a().destroy(self);
+ }
+
+ pub fn build(self: *Compilation) !void {
+ if (self.llvm_argv.len != 0) {
+ var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.a(), [][]const []const u8{
+ [][]const u8{"zig (LLVM option parsing)"},
+ self.llvm_argv,
+ });
+ defer c_compatible_args.deinit();
+ // TODO this sets global state
+ c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr);
+ }
+
+ _ = try async self.buildAsync();
+ }
+
+ async fn buildAsync(self: *Compilation) void {
+ while (true) {
+ // TODO directly awaiting async should guarantee memory allocation elision
+ // TODO also async before suspending should guarantee memory allocation elision
+ const build_result = await (async self.addRootSrc() catch unreachable);
+
+ // this makes a handy error return trace and stack trace in debug mode
+ if (std.debug.runtime_safety) {
+ build_result catch unreachable;
+ }
+
+ const compile_errors = blk: {
+ const held = await (async self.compile_errors.acquire() catch unreachable);
+ defer held.release();
+ break :blk held.value.toOwnedSlice();
+ };
+
+ if (build_result) |_| {
+ if (compile_errors.len == 0) {
+ await (async self.events.put(Event.Ok) catch unreachable);
+ } else {
+ await (async self.events.put(Event{ .Fail = compile_errors }) catch unreachable);
+ }
+ } else |err| {
+ // if there's an error then the compile errors have dangling references
+ self.a().free(compile_errors);
+
+ await (async self.events.put(Event{ .Error = err }) catch unreachable);
+ }
+
+ // for now we stop after 1
+ return;
+ }
+ }
+
+ async fn addRootSrc(self: *Compilation) !void {
+ const root_src_path = self.root_src_path orelse @panic("TODO handle null root src path");
+ // TODO async/await os.path.real
+ const root_src_real_path = os.path.real(self.a(), root_src_path) catch |err| {
+ try printError("unable to get real path '{}': {}", root_src_path, err);
+ return err;
+ };
+ errdefer self.a().free(root_src_real_path);
+
+ // TODO async/await readFileAlloc()
+ const source_code = io.readFileAlloc(self.a(), root_src_real_path) catch |err| {
+ try printError("unable to open '{}': {}", root_src_real_path, err);
+ return err;
+ };
+ errdefer self.a().free(source_code);
+
+ const parsed_file = try self.a().create(ParsedFile{
+ .tree = undefined,
+ .realpath = root_src_real_path,
+ });
+ errdefer self.a().destroy(parsed_file);
+
+ parsed_file.tree = try std.zig.parse(self.a(), source_code);
+ errdefer parsed_file.tree.deinit();
+
+ const tree = &parsed_file.tree;
+
+ // create empty struct for it
+ const decls = try Scope.Decls.create(self, null);
+ defer decls.base.deref(self);
+
+ var decl_group = event.Group(BuildError!void).init(self.loop);
+ errdefer decl_group.cancelAll();
+
+ var it = tree.root_node.decls.iterator(0);
+ while (it.next()) |decl_ptr| {
+ const decl = decl_ptr.*;
+ switch (decl.id) {
+ ast.Node.Id.Comptime => @panic("TODO"),
+ ast.Node.Id.VarDecl => @panic("TODO"),
+ ast.Node.Id.FnProto => {
+ const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
+
+ const name = if (fn_proto.name_token) |name_token| tree.tokenSlice(name_token) else {
+ try self.addCompileError(parsed_file, Span{
+ .first = fn_proto.fn_token,
+ .last = fn_proto.fn_token + 1,
+ }, "missing function name");
+ continue;
+ };
+
+ const fn_decl = try self.a().create(Decl.Fn{
+ .base = Decl{
+ .id = Decl.Id.Fn,
+ .name = name,
+ .visib = parseVisibToken(tree, fn_proto.visib_token),
+ .resolution = event.Future(BuildError!void).init(self.loop),
+ .resolution_in_progress = 0,
+ .parsed_file = parsed_file,
+ .parent_scope = &decls.base,
+ },
+ .value = Decl.Fn.Val{ .Unresolved = {} },
+ .fn_proto = fn_proto,
+ });
+ errdefer self.a().destroy(fn_decl);
+
+ try decl_group.call(addTopLevelDecl, self, &fn_decl.base);
+ },
+ ast.Node.Id.TestDecl => @panic("TODO"),
+ else => unreachable,
+ }
+ }
+ try await (async decl_group.wait() catch unreachable);
+ try await (async self.build_group.wait() catch unreachable);
+ }
+
+ async fn addTopLevelDecl(self: *Compilation, decl: *Decl) !void {
+ const is_export = decl.isExported(&decl.parsed_file.tree);
+
+ if (is_export) {
+ try self.build_group.call(verifyUniqueSymbol, self, decl);
+ try self.build_group.call(resolveDecl, self, decl);
+ }
+ }
+
+ fn addCompileError(self: *Compilation, parsed_file: *ParsedFile, span: Span, comptime fmt: []const u8, args: ...) !void {
+ const text = try std.fmt.allocPrint(self.loop.allocator, fmt, args);
+ errdefer self.loop.allocator.free(text);
+
+ try self.build_group.call(addCompileErrorAsync, self, parsed_file, span, text);
+ }
+
+ async fn addCompileErrorAsync(
+ self: *Compilation,
+ parsed_file: *ParsedFile,
+ span: Span,
+ text: []u8,
+ ) !void {
+ const msg = try self.loop.allocator.create(errmsg.Msg{
+ .path = parsed_file.realpath,
+ .text = text,
+ .span = span,
+ .tree = &parsed_file.tree,
+ });
+ errdefer self.loop.allocator.destroy(msg);
+
+ const compile_errors = await (async self.compile_errors.acquire() catch unreachable);
+ defer compile_errors.release();
+
+ try compile_errors.value.append(msg);
+ }
+
+ async fn verifyUniqueSymbol(self: *Compilation, decl: *Decl) !void {
+ const exported_symbol_names = await (async self.exported_symbol_names.acquire() catch unreachable);
+ defer exported_symbol_names.release();
+
+ if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| {
+ try self.addCompileError(
+ decl.parsed_file,
+ decl.getSpan(),
+ "exported symbol collision: '{}'",
+ decl.name,
+ );
+ // TODO add error note showing location of other symbol
+ }
+ }
+
+ pub fn link(self: *Compilation, out_file: ?[]const u8) !void {
+ warn("TODO link");
+ return error.Todo;
+ }
+
+ pub fn addLinkLib(self: *Compilation, name: []const u8, provided_explicitly: bool) !*LinkLib {
+ const is_libc = mem.eql(u8, name, "c");
+
+ if (is_libc) {
+ if (self.libc_link_lib) |libc_link_lib| {
+ return libc_link_lib;
+ }
+ }
+
+ for (self.link_libs_list.toSliceConst()) |existing_lib| {
+ if (mem.eql(u8, name, existing_lib.name)) {
+ return existing_lib;
+ }
+ }
+
+ const link_lib = try self.a().create(LinkLib{
+ .name = name,
+ .path = null,
+ .provided_explicitly = provided_explicitly,
+ .symbols = ArrayList([]u8).init(self.a()),
+ });
+ try self.link_libs_list.append(link_lib);
+ if (is_libc) {
+ self.libc_link_lib = link_lib;
+ }
+ return link_lib;
+ }
+
+ fn a(self: Compilation) *mem.Allocator {
+ return self.loop.allocator;
+ }
+};
+
+fn printError(comptime format: []const u8, args: ...) !void {
+ var stderr_file = try std.io.getStdErr();
+ var stderr_file_out_stream = std.io.FileOutStream.init(&stderr_file);
+ const out_stream = &stderr_file_out_stream.stream;
+ try out_stream.print(format, args);
+}
+
+fn parseVisibToken(tree: *ast.Tree, optional_token_index: ?ast.TokenIndex) Visib {
+ if (optional_token_index) |token_index| {
+ const token = tree.tokens.at(token_index);
+ assert(token.id == Token.Id.Keyword_pub);
+ return Visib.Pub;
+ } else {
+ return Visib.Private;
+ }
+}
+
+/// This declaration has been blessed as going into the final code generation.
+pub async fn resolveDecl(comp: *Compilation, decl: *Decl) !void {
+ if (@atomicRmw(u8, &decl.resolution_in_progress, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) == 0) {
+ decl.resolution.data = await (async generateDecl(comp, decl) catch unreachable);
+ decl.resolution.resolve();
+ return decl.resolution.data;
+ } else {
+ return (await (async decl.resolution.get() catch unreachable)).*;
+ }
+}
+
+/// The function that actually does the generation.
+async fn generateDecl(comp: *Compilation, decl: *Decl) !void {
+ switch (decl.id) {
+ Decl.Id.Var => @panic("TODO"),
+ Decl.Id.Fn => {
+ const fn_decl = @fieldParentPtr(Decl.Fn, "base", decl);
+ return await (async generateDeclFn(comp, fn_decl) catch unreachable);
+ },
+ Decl.Id.CompTime => @panic("TODO"),
+ }
+}
+
+async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
+ const body_node = fn_decl.fn_proto.body_node orelse @panic("TODO extern fn proto decl");
+
+ const fndef_scope = try Scope.FnDef.create(comp, fn_decl.base.parent_scope);
+ defer fndef_scope.base.deref(comp);
+
+ // TODO actually look at the return type of the AST
+ const return_type = &Type.Void.get(comp).base;
+ defer return_type.base.deref(comp);
+
+ const is_var_args = false;
+ const params = ([*]Type.Fn.Param)(undefined)[0..0];
+ const fn_type = try Type.Fn.create(comp, return_type, params, is_var_args);
+ defer fn_type.base.base.deref(comp);
+
+ var symbol_name = try std.Buffer.init(comp.a(), fn_decl.base.name);
+ errdefer symbol_name.deinit();
+
+ const fn_val = try Value.Fn.create(comp, fn_type, fndef_scope, symbol_name);
+ defer fn_val.base.deref(comp);
+
+ fn_decl.value = Decl.Fn.Val{ .Ok = fn_val };
+
+ const unanalyzed_code = (await (async ir.gen(
+ comp,
+ body_node,
+ &fndef_scope.base,
+ Span.token(body_node.lastToken()),
+ fn_decl.base.parsed_file,
+ ) catch unreachable)) catch |err| switch (err) {
+ // This poison value should not cause the errdefers to run. It simply means
+ // that self.compile_errors is populated.
+ // TODO https://github.com/ziglang/zig/issues/769
+ error.SemanticAnalysisFailed => return {},
+ else => return err,
+ };
+ defer unanalyzed_code.destroy(comp.a());
+
+ if (comp.verbose_ir) {
+ std.debug.warn("unanalyzed:\n");
+ unanalyzed_code.dump();
+ }
+
+ const analyzed_code = (await (async ir.analyze(
+ comp,
+ fn_decl.base.parsed_file,
+ unanalyzed_code,
+ null,
+ ) catch unreachable)) catch |err| switch (err) {
+ // This poison value should not cause the errdefers to run. It simply means
+ // that self.compile_errors is populated.
+ // TODO https://github.com/ziglang/zig/issues/769
+ error.SemanticAnalysisFailed => return {},
+ else => return err,
+ };
+ errdefer analyzed_code.destroy(comp.a());
+
+ if (comp.verbose_ir) {
+ std.debug.warn("analyzed:\n");
+ analyzed_code.dump();
+ }
+
+ // Kick off rendering to LLVM comp, but it doesn't block the fn decl
+ // analysis from being complete.
+ try comp.build_group.call(codegen.renderToLlvm, comp, fn_val, analyzed_code);
+}
diff --git a/src-self-hosted/decl.zig b/src-self-hosted/decl.zig
index 1a75a3249e..c0173266ee 100644
--- a/src-self-hosted/decl.zig
+++ b/src-self-hosted/decl.zig
@@ -9,13 +9,13 @@ const Value = @import("value.zig").Value;
const Token = std.zig.Token;
const errmsg = @import("errmsg.zig");
const Scope = @import("scope.zig").Scope;
-const Module = @import("module.zig").Module;
+const Compilation = @import("compilation.zig").Compilation;
pub const Decl = struct {
id: Id,
name: []const u8,
visib: Visib,
- resolution: event.Future(Module.BuildError!void),
+ resolution: event.Future(Compilation.BuildError!void),
resolution_in_progress: u8,
parsed_file: *ParsedFile,
parent_scope: *Scope,
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index f1c395a790..22d5a067a7 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -1,6 +1,6 @@
const std = @import("std");
const builtin = @import("builtin");
-const Module = @import("module.zig").Module;
+const Compilation = @import("compilation.zig").Compilation;
const Scope = @import("scope.zig").Scope;
const ast = std.zig.ast;
const Allocator = std.mem.Allocator;
@@ -243,7 +243,7 @@ pub const Instruction = struct {
Value.Ptr.Mut.CompTimeConst,
self.params.mut,
self.params.volatility,
- val.typeof.getAbiAlignment(ira.irb.module),
+ val.typeof.getAbiAlignment(ira.irb.comp),
);
}
@@ -254,12 +254,12 @@ pub const Instruction = struct {
});
const elem_type = target.getKnownType();
const ptr_type = Type.Pointer.get(
- ira.irb.module,
+ ira.irb.comp,
elem_type,
self.params.mut,
self.params.volatility,
Type.Pointer.Size.One,
- elem_type.getAbiAlignment(ira.irb.module),
+ elem_type.getAbiAlignment(ira.irb.comp),
);
// TODO: potentially set the hint that this is a stack pointer. But it might not be - this
// could be a ref of a global, for example
@@ -417,7 +417,7 @@ pub const Code = struct {
arena: std.heap.ArenaAllocator,
return_type: ?*Type,
- /// allocator is module.a()
+ /// allocator is comp.a()
pub fn destroy(self: *Code, allocator: *Allocator) void {
self.arena.deinit();
allocator.destroy(self);
@@ -437,7 +437,7 @@ pub const Code = struct {
};
pub const Builder = struct {
- module: *Module,
+ comp: *Compilation,
code: *Code,
current_basic_block: *BasicBlock,
next_debug_id: usize,
@@ -446,17 +446,17 @@ pub const Builder = struct {
pub const Error = Analyze.Error;
- pub fn init(module: *Module, parsed_file: *ParsedFile) !Builder {
- const code = try module.a().create(Code{
+ pub fn init(comp: *Compilation, parsed_file: *ParsedFile) !Builder {
+ const code = try comp.a().create(Code{
.basic_block_list = undefined,
- .arena = std.heap.ArenaAllocator.init(module.a()),
+ .arena = std.heap.ArenaAllocator.init(comp.a()),
.return_type = null,
});
code.basic_block_list = std.ArrayList(*BasicBlock).init(&code.arena.allocator);
- errdefer code.destroy(module.a());
+ errdefer code.destroy(comp.a());
return Builder{
- .module = module,
+ .comp = comp,
.parsed_file = parsed_file,
.current_basic_block = undefined,
.code = code,
@@ -466,7 +466,7 @@ pub const Builder = struct {
}
pub fn abort(self: *Builder) void {
- self.code.destroy(self.module.a());
+ self.code.destroy(self.comp.a());
}
/// Call code.destroy() when done
@@ -581,7 +581,7 @@ pub const Builder = struct {
}
pub fn genBlock(irb: *Builder, block: *ast.Node.Block, parent_scope: *Scope) !*Instruction {
- const block_scope = try Scope.Block.create(irb.module, parent_scope);
+ const block_scope = try Scope.Block.create(irb.comp, parent_scope);
const outer_block_scope = &block_scope.base;
var child_scope = outer_block_scope;
@@ -623,8 +623,8 @@ pub const Builder = struct {
Token.Id.Keyword_errdefer => Scope.Defer.Kind.ErrorExit,
else => unreachable,
};
- const defer_expr_scope = try Scope.DeferExpr.create(irb.module, parent_scope, defer_node.expr);
- const defer_child_scope = try Scope.Defer.create(irb.module, parent_scope, kind, defer_expr_scope);
+ const defer_expr_scope = try Scope.DeferExpr.create(irb.comp, parent_scope, defer_node.expr);
+ const defer_child_scope = try Scope.Defer.create(irb.comp, parent_scope, kind, defer_expr_scope);
child_scope = &defer_child_scope.base;
continue;
}
@@ -770,8 +770,8 @@ pub const Builder = struct {
.debug_id = self.next_debug_id,
.val = switch (I.ir_val_init) {
IrVal.Init.Unknown => IrVal.Unknown,
- IrVal.Init.NoReturn => IrVal{ .KnownValue = &Value.NoReturn.get(self.module).base },
- IrVal.Init.Void => IrVal{ .KnownValue = &Value.Void.get(self.module).base },
+ IrVal.Init.NoReturn => IrVal{ .KnownValue = &Value.NoReturn.get(self.comp).base },
+ IrVal.Init.Void => IrVal{ .KnownValue = &Value.Void.get(self.comp).base },
},
.ref_count = 0,
.span = span,
@@ -819,13 +819,13 @@ pub const Builder = struct {
fn buildConstBool(self: *Builder, scope: *Scope, span: Span, x: bool) !*Instruction {
const inst = try self.build(Instruction.Const, scope, span, Instruction.Const.Params{});
- inst.val = IrVal{ .KnownValue = &Value.Bool.get(self.module, x).base };
+ inst.val = IrVal{ .KnownValue = &Value.Bool.get(self.comp, x).base };
return inst;
}
fn buildConstVoid(self: *Builder, scope: *Scope, span: Span, is_generated: bool) !*Instruction {
const inst = try self.buildExtra(Instruction.Const, scope, span, Instruction.Const.Params{}, is_generated);
- inst.val = IrVal{ .KnownValue = &Value.Void.get(self.module).base };
+ inst.val = IrVal{ .KnownValue = &Value.Void.get(self.comp).base };
return inst;
}
};
@@ -850,8 +850,8 @@ const Analyze = struct {
OutOfMemory,
};
- pub fn init(module: *Module, parsed_file: *ParsedFile, explicit_return_type: ?*Type) !Analyze {
- var irb = try Builder.init(module, parsed_file);
+ pub fn init(comp: *Compilation, parsed_file: *ParsedFile, explicit_return_type: ?*Type) !Analyze {
+ var irb = try Builder.init(comp, parsed_file);
errdefer irb.abort();
return Analyze{
@@ -929,12 +929,12 @@ const Analyze = struct {
}
fn addCompileError(self: *Analyze, span: Span, comptime fmt: []const u8, args: ...) !void {
- return self.irb.module.addCompileError(self.irb.parsed_file, span, fmt, args);
+ return self.irb.comp.addCompileError(self.irb.parsed_file, span, fmt, args);
}
fn resolvePeerTypes(self: *Analyze, expected_type: ?*Type, peers: []const *Instruction) Analyze.Error!*Type {
// TODO actual implementation
- return &Type.Void.get(self.irb.module).base;
+ return &Type.Void.get(self.irb.comp).base;
}
fn implicitCast(self: *Analyze, target: *Instruction, optional_dest_type: ?*Type) Analyze.Error!*Instruction {
@@ -959,13 +959,13 @@ const Analyze = struct {
};
pub async fn gen(
- module: *Module,
+ comp: *Compilation,
body_node: *ast.Node,
scope: *Scope,
end_span: Span,
parsed_file: *ParsedFile,
) !*Code {
- var irb = try Builder.init(module, parsed_file);
+ var irb = try Builder.init(comp, parsed_file);
errdefer irb.abort();
const entry_block = try irb.createBasicBlock(scope, "Entry");
@@ -991,8 +991,8 @@ pub async fn gen(
return irb.finish();
}
-pub async fn analyze(module: *Module, parsed_file: *ParsedFile, old_code: *Code, expected_type: ?*Type) !*Code {
- var ira = try Analyze.init(module, parsed_file, expected_type);
+pub async fn analyze(comp: *Compilation, parsed_file: *ParsedFile, old_code: *Code, expected_type: ?*Type) !*Code {
+ var ira = try Analyze.init(comp, parsed_file, expected_type);
errdefer ira.abort();
const old_entry_bb = old_code.basic_block_list.at(0);
@@ -1025,7 +1025,7 @@ pub async fn analyze(module: *Module, parsed_file: *ParsedFile, old_code: *Code,
}
if (ira.src_implicit_return_type_list.len == 0) {
- ira.irb.code.return_type = &Type.NoReturn.get(module).base;
+ ira.irb.code.return_type = &Type.NoReturn.get(comp).base;
return ira.irb.finish();
}
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 77ec7f6d32..c9478954c5 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -14,8 +14,8 @@ const c = @import("c.zig");
const introspect = @import("introspect.zig");
const Args = arg.Args;
const Flag = arg.Flag;
-const EventLoopLocal = @import("module.zig").EventLoopLocal;
-const Module = @import("module.zig").Module;
+const EventLoopLocal = @import("compilation.zig").EventLoopLocal;
+const Compilation = @import("compilation.zig").Compilation;
const Target = @import("target.zig").Target;
const errmsg = @import("errmsg.zig");
@@ -258,7 +258,7 @@ const args_build_generic = []Flag{
Flag.Arg1("--ver-patch"),
};
-fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Module.Kind) !void {
+fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Compilation.Kind) !void {
var flags = try Args.parse(allocator, args_build_generic, args);
defer flags.deinit();
@@ -300,14 +300,14 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
const emit_type = blk: {
if (flags.single("emit")) |emit_flag| {
if (mem.eql(u8, emit_flag, "asm")) {
- break :blk Module.Emit.Assembly;
+ break :blk Compilation.Emit.Assembly;
} else if (mem.eql(u8, emit_flag, "bin")) {
- break :blk Module.Emit.Binary;
+ break :blk Compilation.Emit.Binary;
} else if (mem.eql(u8, emit_flag, "llvm-ir")) {
- break :blk Module.Emit.LlvmIr;
+ break :blk Compilation.Emit.LlvmIr;
} else unreachable;
} else {
- break :blk Module.Emit.Binary;
+ break :blk Compilation.Emit.Binary;
}
};
@@ -370,7 +370,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
os.exit(1);
}
- if (out_type == Module.Kind.Obj and link_objects.len != 0) {
+ if (out_type == Compilation.Kind.Obj and link_objects.len != 0) {
try stderr.write("When building an object file, --object arguments are invalid\n");
os.exit(1);
}
@@ -392,7 +392,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
var event_loop_local = EventLoopLocal.init(&loop);
defer event_loop_local.deinit();
- var module = try Module.create(
+ var comp = try Compilation.create(
&event_loop_local,
root_name,
root_source_file,
@@ -402,16 +402,16 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
zig_lib_dir,
full_cache_dir,
);
- defer module.destroy();
+ defer comp.destroy();
- module.version_major = try std.fmt.parseUnsigned(u32, flags.single("ver-major") orelse "0", 10);
- module.version_minor = try std.fmt.parseUnsigned(u32, flags.single("ver-minor") orelse "0", 10);
- module.version_patch = try std.fmt.parseUnsigned(u32, flags.single("ver-patch") orelse "0", 10);
+ comp.version_major = try std.fmt.parseUnsigned(u32, flags.single("ver-major") orelse "0", 10);
+ comp.version_minor = try std.fmt.parseUnsigned(u32, flags.single("ver-minor") orelse "0", 10);
+ comp.version_patch = try std.fmt.parseUnsigned(u32, flags.single("ver-patch") orelse "0", 10);
- module.is_test = false;
+ comp.is_test = false;
- module.linker_script = flags.single("linker-script");
- module.each_lib_rpath = flags.present("each-lib-rpath");
+ comp.linker_script = flags.single("linker-script");
+ comp.each_lib_rpath = flags.present("each-lib-rpath");
var clang_argv_buf = ArrayList([]const u8).init(allocator);
defer clang_argv_buf.deinit();
@@ -422,51 +422,51 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
try clang_argv_buf.append(mllvm);
}
- module.llvm_argv = mllvm_flags;
- module.clang_argv = clang_argv_buf.toSliceConst();
+ comp.llvm_argv = mllvm_flags;
+ comp.clang_argv = clang_argv_buf.toSliceConst();
- module.strip = flags.present("strip");
- module.is_static = flags.present("static");
+ comp.strip = flags.present("strip");
+ comp.is_static = flags.present("static");
if (flags.single("libc-lib-dir")) |libc_lib_dir| {
- module.libc_lib_dir = libc_lib_dir;
+ comp.libc_lib_dir = libc_lib_dir;
}
if (flags.single("libc-static-lib-dir")) |libc_static_lib_dir| {
- module.libc_static_lib_dir = libc_static_lib_dir;
+ comp.libc_static_lib_dir = libc_static_lib_dir;
}
if (flags.single("libc-include-dir")) |libc_include_dir| {
- module.libc_include_dir = libc_include_dir;
+ comp.libc_include_dir = libc_include_dir;
}
if (flags.single("msvc-lib-dir")) |msvc_lib_dir| {
- module.msvc_lib_dir = msvc_lib_dir;
+ comp.msvc_lib_dir = msvc_lib_dir;
}
if (flags.single("kernel32-lib-dir")) |kernel32_lib_dir| {
- module.kernel32_lib_dir = kernel32_lib_dir;
+ comp.kernel32_lib_dir = kernel32_lib_dir;
}
if (flags.single("dynamic-linker")) |dynamic_linker| {
- module.dynamic_linker = dynamic_linker;
+ comp.dynamic_linker = dynamic_linker;
}
- module.verbose_tokenize = flags.present("verbose-tokenize");
- module.verbose_ast_tree = flags.present("verbose-ast-tree");
- module.verbose_ast_fmt = flags.present("verbose-ast-fmt");
- module.verbose_link = flags.present("verbose-link");
- module.verbose_ir = flags.present("verbose-ir");
- module.verbose_llvm_ir = flags.present("verbose-llvm-ir");
- module.verbose_cimport = flags.present("verbose-cimport");
+ comp.verbose_tokenize = flags.present("verbose-tokenize");
+ comp.verbose_ast_tree = flags.present("verbose-ast-tree");
+ comp.verbose_ast_fmt = flags.present("verbose-ast-fmt");
+ comp.verbose_link = flags.present("verbose-link");
+ comp.verbose_ir = flags.present("verbose-ir");
+ comp.verbose_llvm_ir = flags.present("verbose-llvm-ir");
+ comp.verbose_cimport = flags.present("verbose-cimport");
- module.err_color = color;
- module.lib_dirs = flags.many("library-path");
- module.darwin_frameworks = flags.many("framework");
- module.rpath_list = flags.many("rpath");
+ comp.err_color = color;
+ comp.lib_dirs = flags.many("library-path");
+ comp.darwin_frameworks = flags.many("framework");
+ comp.rpath_list = flags.many("rpath");
if (flags.single("output-h")) |output_h| {
- module.out_h_path = output_h;
+ comp.out_h_path = output_h;
}
- module.windows_subsystem_windows = flags.present("mwindows");
- module.windows_subsystem_console = flags.present("mconsole");
- module.linker_rdynamic = flags.present("rdynamic");
+ comp.windows_subsystem_windows = flags.present("mwindows");
+ comp.windows_subsystem_console = flags.present("mconsole");
+ comp.linker_rdynamic = flags.present("rdynamic");
if (flags.single("mmacosx-version-min") != null and flags.single("mios-version-min") != null) {
try stderr.write("-mmacosx-version-min and -mios-version-min options not allowed together\n");
@@ -474,37 +474,37 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
}
if (flags.single("mmacosx-version-min")) |ver| {
- module.darwin_version_min = Module.DarwinVersionMin{ .MacOS = ver };
+ comp.darwin_version_min = Compilation.DarwinVersionMin{ .MacOS = ver };
}
if (flags.single("mios-version-min")) |ver| {
- module.darwin_version_min = Module.DarwinVersionMin{ .Ios = ver };
+ comp.darwin_version_min = Compilation.DarwinVersionMin{ .Ios = ver };
}
- module.emit_file_type = emit_type;
- module.link_objects = link_objects;
- module.assembly_files = assembly_files;
- module.link_out_file = flags.single("out-file");
+ comp.emit_file_type = emit_type;
+ comp.link_objects = link_objects;
+ comp.assembly_files = assembly_files;
+ comp.link_out_file = flags.single("out-file");
- try module.build();
- const process_build_events_handle = try async processBuildEvents(module, color);
+ try comp.build();
+ const process_build_events_handle = try async processBuildEvents(comp, color);
defer cancel process_build_events_handle;
loop.run();
}
-async fn processBuildEvents(module: *Module, color: errmsg.Color) void {
+async fn processBuildEvents(comp: *Compilation, color: errmsg.Color) void {
// TODO directly awaiting async should guarantee memory allocation elision
- const build_event = await (async module.events.get() catch unreachable);
+ const build_event = await (async comp.events.get() catch unreachable);
switch (build_event) {
- Module.Event.Ok => {
+ Compilation.Event.Ok => {
std.debug.warn("Build succeeded\n");
return;
},
- Module.Event.Error => |err| {
+ Compilation.Event.Error => |err| {
std.debug.warn("build failed: {}\n", @errorName(err));
os.exit(1);
},
- Module.Event.Fail => |msgs| {
+ Compilation.Event.Fail => |msgs| {
for (msgs) |msg| {
errmsg.printToFile(&stderr_file, msg, color) catch os.exit(1);
}
@@ -513,15 +513,15 @@ async fn processBuildEvents(module: *Module, color: errmsg.Color) void {
}
fn cmdBuildExe(allocator: *Allocator, args: []const []const u8) !void {
- return buildOutputType(allocator, args, Module.Kind.Exe);
+ return buildOutputType(allocator, args, Compilation.Kind.Exe);
}
fn cmdBuildLib(allocator: *Allocator, args: []const []const u8) !void {
- return buildOutputType(allocator, args, Module.Kind.Lib);
+ return buildOutputType(allocator, args, Compilation.Kind.Lib);
}
fn cmdBuildObj(allocator: *Allocator, args: []const []const u8) !void {
- return buildOutputType(allocator, args, Module.Kind.Obj);
+ return buildOutputType(allocator, args, Compilation.Kind.Obj);
}
const usage_fmt =
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
deleted file mode 100644
index 617bd0d44a..0000000000
--- a/src-self-hosted/module.zig
+++ /dev/null
@@ -1,747 +0,0 @@
-const std = @import("std");
-const os = std.os;
-const io = std.io;
-const mem = std.mem;
-const Allocator = mem.Allocator;
-const Buffer = std.Buffer;
-const llvm = @import("llvm.zig");
-const c = @import("c.zig");
-const builtin = @import("builtin");
-const Target = @import("target.zig").Target;
-const warn = std.debug.warn;
-const Token = std.zig.Token;
-const ArrayList = std.ArrayList;
-const errmsg = @import("errmsg.zig");
-const ast = std.zig.ast;
-const event = std.event;
-const assert = std.debug.assert;
-const AtomicRmwOp = builtin.AtomicRmwOp;
-const AtomicOrder = builtin.AtomicOrder;
-const Scope = @import("scope.zig").Scope;
-const Decl = @import("decl.zig").Decl;
-const ir = @import("ir.zig");
-const Visib = @import("visib.zig").Visib;
-const ParsedFile = @import("parsed_file.zig").ParsedFile;
-const Value = @import("value.zig").Value;
-const Type = Value.Type;
-const Span = errmsg.Span;
-const codegen = @import("codegen.zig");
-
-/// Data that is local to the event loop.
-pub const EventLoopLocal = struct {
- loop: *event.Loop,
- llvm_handle_pool: std.atomic.Stack(llvm.ContextRef),
-
- fn init(loop: *event.Loop) EventLoopLocal {
- return EventLoopLocal{
- .loop = loop,
- .llvm_handle_pool = std.atomic.Stack(llvm.ContextRef).init(),
- };
- }
-
- fn deinit(self: *EventLoopLocal) void {
- while (self.llvm_handle_pool.pop()) |node| {
- c.LLVMContextDispose(node.data);
- self.loop.allocator.destroy(node);
- }
- }
-
- /// Gets an exclusive handle on any LlvmContext.
- /// Caller must release the handle when done.
- pub fn getAnyLlvmContext(self: *EventLoopLocal) !LlvmHandle {
- if (self.llvm_handle_pool.pop()) |node| return LlvmHandle{ .node = node };
-
- const context_ref = c.LLVMContextCreate() orelse return error.OutOfMemory;
- errdefer c.LLVMContextDispose(context_ref);
-
- const node = try self.loop.allocator.create(std.atomic.Stack(llvm.ContextRef).Node{
- .next = undefined,
- .data = context_ref,
- });
- errdefer self.loop.allocator.destroy(node);
-
- return LlvmHandle{ .node = node };
- }
-};
-
-pub const LlvmHandle = struct {
- node: *std.atomic.Stack(llvm.ContextRef).Node,
-
- pub fn release(self: LlvmHandle, event_loop_local: *EventLoopLocal) void {
- event_loop_local.llvm_handle_pool.push(self.node);
- }
-};
-
-pub const Module = struct {
- event_loop_local: *EventLoopLocal,
- loop: *event.Loop,
- name: Buffer,
- root_src_path: ?[]const u8,
- target: Target,
- build_mode: builtin.Mode,
- zig_lib_dir: []const u8,
-
- version_major: u32,
- version_minor: u32,
- version_patch: u32,
-
- linker_script: ?[]const u8,
- cache_dir: []const u8,
- libc_lib_dir: ?[]const u8,
- libc_static_lib_dir: ?[]const u8,
- libc_include_dir: ?[]const u8,
- msvc_lib_dir: ?[]const u8,
- kernel32_lib_dir: ?[]const u8,
- dynamic_linker: ?[]const u8,
- out_h_path: ?[]const u8,
-
- is_test: bool,
- each_lib_rpath: bool,
- strip: bool,
- is_static: bool,
- linker_rdynamic: bool,
-
- clang_argv: []const []const u8,
- llvm_argv: []const []const u8,
- lib_dirs: []const []const u8,
- rpath_list: []const []const u8,
- assembly_files: []const []const u8,
- link_objects: []const []const u8,
-
- windows_subsystem_windows: bool,
- windows_subsystem_console: bool,
-
- link_libs_list: ArrayList(*LinkLib),
- libc_link_lib: ?*LinkLib,
-
- err_color: errmsg.Color,
-
- verbose_tokenize: bool,
- verbose_ast_tree: bool,
- verbose_ast_fmt: bool,
- verbose_cimport: bool,
- verbose_ir: bool,
- verbose_llvm_ir: bool,
- verbose_link: bool,
-
- darwin_frameworks: []const []const u8,
- darwin_version_min: DarwinVersionMin,
-
- test_filters: []const []const u8,
- test_name_prefix: ?[]const u8,
-
- emit_file_type: Emit,
-
- kind: Kind,
-
- link_out_file: ?[]const u8,
- events: *event.Channel(Event),
-
- exported_symbol_names: event.Locked(Decl.Table),
-
- /// Before code generation starts, must wait on this group to make sure
- /// the build is complete.
- build_group: event.Group(BuildError!void),
-
- compile_errors: event.Locked(CompileErrList),
-
- meta_type: *Type.MetaType,
- void_type: *Type.Void,
- bool_type: *Type.Bool,
- noreturn_type: *Type.NoReturn,
-
- void_value: *Value.Void,
- true_value: *Value.Bool,
- false_value: *Value.Bool,
- noreturn_value: *Value.NoReturn,
-
- const CompileErrList = std.ArrayList(*errmsg.Msg);
-
- // TODO handle some of these earlier and report them in a way other than error codes
- pub const BuildError = error{
- OutOfMemory,
- EndOfStream,
- BadFd,
- Io,
- IsDir,
- Unexpected,
- SystemResources,
- SharingViolation,
- PathAlreadyExists,
- FileNotFound,
- AccessDenied,
- PipeBusy,
- FileTooBig,
- SymLinkLoop,
- ProcessFdQuotaExceeded,
- NameTooLong,
- SystemFdQuotaExceeded,
- NoDevice,
- PathNotFound,
- NoSpaceLeft,
- NotDir,
- FileSystem,
- OperationAborted,
- IoPending,
- BrokenPipe,
- WouldBlock,
- FileClosed,
- DestinationAddressRequired,
- DiskQuota,
- InputOutput,
- NoStdHandles,
- Overflow,
- NotSupported,
- BufferTooSmall,
- Unimplemented, // TODO remove this one
- SemanticAnalysisFailed, // TODO remove this one
- };
-
- pub const Event = union(enum) {
- Ok,
- Error: BuildError,
- Fail: []*errmsg.Msg,
- };
-
- pub const DarwinVersionMin = union(enum) {
- None,
- MacOS: []const u8,
- Ios: []const u8,
- };
-
- pub const Kind = enum {
- Exe,
- Lib,
- Obj,
- };
-
- pub const LinkLib = struct {
- name: []const u8,
- path: ?[]const u8,
-
- /// the list of symbols we depend on from this lib
- symbols: ArrayList([]u8),
- provided_explicitly: bool,
- };
-
- pub const Emit = enum {
- Binary,
- Assembly,
- LlvmIr,
- };
-
- pub fn create(
- event_loop_local: *EventLoopLocal,
- name: []const u8,
- root_src_path: ?[]const u8,
- target: *const Target,
- kind: Kind,
- build_mode: builtin.Mode,
- zig_lib_dir: []const u8,
- cache_dir: []const u8,
- ) !*Module {
- const loop = event_loop_local.loop;
-
- var name_buffer = try Buffer.init(loop.allocator, name);
- errdefer name_buffer.deinit();
-
- const events = try event.Channel(Event).create(loop, 0);
- errdefer events.destroy();
-
- const module = try loop.allocator.create(Module{
- .loop = loop,
- .event_loop_local = event_loop_local,
- .events = events,
- .name = name_buffer,
- .root_src_path = root_src_path,
- .target = target.*,
- .kind = kind,
- .build_mode = build_mode,
- .zig_lib_dir = zig_lib_dir,
- .cache_dir = cache_dir,
-
- .version_major = 0,
- .version_minor = 0,
- .version_patch = 0,
-
- .verbose_tokenize = false,
- .verbose_ast_tree = false,
- .verbose_ast_fmt = false,
- .verbose_cimport = false,
- .verbose_ir = false,
- .verbose_llvm_ir = false,
- .verbose_link = false,
-
- .linker_script = null,
- .libc_lib_dir = null,
- .libc_static_lib_dir = null,
- .libc_include_dir = null,
- .msvc_lib_dir = null,
- .kernel32_lib_dir = null,
- .dynamic_linker = null,
- .out_h_path = null,
- .is_test = false,
- .each_lib_rpath = false,
- .strip = false,
- .is_static = false,
- .linker_rdynamic = false,
- .clang_argv = [][]const u8{},
- .llvm_argv = [][]const u8{},
- .lib_dirs = [][]const u8{},
- .rpath_list = [][]const u8{},
- .assembly_files = [][]const u8{},
- .link_objects = [][]const u8{},
- .windows_subsystem_windows = false,
- .windows_subsystem_console = false,
- .link_libs_list = ArrayList(*LinkLib).init(loop.allocator),
- .libc_link_lib = null,
- .err_color = errmsg.Color.Auto,
- .darwin_frameworks = [][]const u8{},
- .darwin_version_min = DarwinVersionMin.None,
- .test_filters = [][]const u8{},
- .test_name_prefix = null,
- .emit_file_type = Emit.Binary,
- .link_out_file = null,
- .exported_symbol_names = event.Locked(Decl.Table).init(loop, Decl.Table.init(loop.allocator)),
- .build_group = event.Group(BuildError!void).init(loop),
- .compile_errors = event.Locked(CompileErrList).init(loop, CompileErrList.init(loop.allocator)),
-
- .meta_type = undefined,
- .void_type = undefined,
- .void_value = undefined,
- .bool_type = undefined,
- .true_value = undefined,
- .false_value = undefined,
- .noreturn_type = undefined,
- .noreturn_value = undefined,
- });
- try module.initTypes();
- return module;
- }
-
- fn initTypes(module: *Module) !void {
- module.meta_type = try module.a().create(Type.MetaType{
- .base = Type{
- .base = Value{
- .id = Value.Id.Type,
- .typeof = undefined,
- .ref_count = std.atomic.Int(usize).init(3), // 3 because it references itself twice
- },
- .id = builtin.TypeId.Type,
- },
- .value = undefined,
- });
- module.meta_type.value = &module.meta_type.base;
- module.meta_type.base.base.typeof = &module.meta_type.base;
- errdefer module.a().destroy(module.meta_type);
-
- module.void_type = try module.a().create(Type.Void{
- .base = Type{
- .base = Value{
- .id = Value.Id.Type,
- .typeof = &Type.MetaType.get(module).base,
- .ref_count = std.atomic.Int(usize).init(1),
- },
- .id = builtin.TypeId.Void,
- },
- });
- errdefer module.a().destroy(module.void_type);
-
- module.noreturn_type = try module.a().create(Type.NoReturn{
- .base = Type{
- .base = Value{
- .id = Value.Id.Type,
- .typeof = &Type.MetaType.get(module).base,
- .ref_count = std.atomic.Int(usize).init(1),
- },
- .id = builtin.TypeId.NoReturn,
- },
- });
- errdefer module.a().destroy(module.noreturn_type);
-
- module.bool_type = try module.a().create(Type.Bool{
- .base = Type{
- .base = Value{
- .id = Value.Id.Type,
- .typeof = &Type.MetaType.get(module).base,
- .ref_count = std.atomic.Int(usize).init(1),
- },
- .id = builtin.TypeId.Bool,
- },
- });
- errdefer module.a().destroy(module.bool_type);
-
- module.void_value = try module.a().create(Value.Void{
- .base = Value{
- .id = Value.Id.Void,
- .typeof = &Type.Void.get(module).base,
- .ref_count = std.atomic.Int(usize).init(1),
- },
- });
- errdefer module.a().destroy(module.void_value);
-
- module.true_value = try module.a().create(Value.Bool{
- .base = Value{
- .id = Value.Id.Bool,
- .typeof = &Type.Bool.get(module).base,
- .ref_count = std.atomic.Int(usize).init(1),
- },
- .x = true,
- });
- errdefer module.a().destroy(module.true_value);
-
- module.false_value = try module.a().create(Value.Bool{
- .base = Value{
- .id = Value.Id.Bool,
- .typeof = &Type.Bool.get(module).base,
- .ref_count = std.atomic.Int(usize).init(1),
- },
- .x = false,
- });
- errdefer module.a().destroy(module.false_value);
-
- module.noreturn_value = try module.a().create(Value.NoReturn{
- .base = Value{
- .id = Value.Id.NoReturn,
- .typeof = &Type.NoReturn.get(module).base,
- .ref_count = std.atomic.Int(usize).init(1),
- },
- });
- errdefer module.a().destroy(module.noreturn_value);
- }
-
- pub fn destroy(self: *Module) void {
- self.noreturn_value.base.deref(self);
- self.void_value.base.deref(self);
- self.false_value.base.deref(self);
- self.true_value.base.deref(self);
- self.noreturn_type.base.base.deref(self);
- self.void_type.base.base.deref(self);
- self.meta_type.base.base.deref(self);
-
- self.events.destroy();
- self.name.deinit();
-
- self.a().destroy(self);
- }
-
- pub fn build(self: *Module) !void {
- if (self.llvm_argv.len != 0) {
- var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.a(), [][]const []const u8{
- [][]const u8{"zig (LLVM option parsing)"},
- self.llvm_argv,
- });
- defer c_compatible_args.deinit();
- // TODO this sets global state
- c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr);
- }
-
- _ = try async self.buildAsync();
- }
-
- async fn buildAsync(self: *Module) void {
- while (true) {
- // TODO directly awaiting async should guarantee memory allocation elision
- // TODO also async before suspending should guarantee memory allocation elision
- const build_result = await (async self.addRootSrc() catch unreachable);
-
- // this makes a handy error return trace and stack trace in debug mode
- if (std.debug.runtime_safety) {
- build_result catch unreachable;
- }
-
- const compile_errors = blk: {
- const held = await (async self.compile_errors.acquire() catch unreachable);
- defer held.release();
- break :blk held.value.toOwnedSlice();
- };
-
- if (build_result) |_| {
- if (compile_errors.len == 0) {
- await (async self.events.put(Event.Ok) catch unreachable);
- } else {
- await (async self.events.put(Event{ .Fail = compile_errors }) catch unreachable);
- }
- } else |err| {
- // if there's an error then the compile errors have dangling references
- self.a().free(compile_errors);
-
- await (async self.events.put(Event{ .Error = err }) catch unreachable);
- }
-
- // for now we stop after 1
- return;
- }
- }
-
- async fn addRootSrc(self: *Module) !void {
- const root_src_path = self.root_src_path orelse @panic("TODO handle null root src path");
- // TODO async/await os.path.real
- const root_src_real_path = os.path.real(self.a(), root_src_path) catch |err| {
- try printError("unable to get real path '{}': {}", root_src_path, err);
- return err;
- };
- errdefer self.a().free(root_src_real_path);
-
- // TODO async/await readFileAlloc()
- const source_code = io.readFileAlloc(self.a(), root_src_real_path) catch |err| {
- try printError("unable to open '{}': {}", root_src_real_path, err);
- return err;
- };
- errdefer self.a().free(source_code);
-
- const parsed_file = try self.a().create(ParsedFile{
- .tree = undefined,
- .realpath = root_src_real_path,
- });
- errdefer self.a().destroy(parsed_file);
-
- parsed_file.tree = try std.zig.parse(self.a(), source_code);
- errdefer parsed_file.tree.deinit();
-
- const tree = &parsed_file.tree;
-
- // create empty struct for it
- const decls = try Scope.Decls.create(self, null);
- defer decls.base.deref(self);
-
- var decl_group = event.Group(BuildError!void).init(self.loop);
- errdefer decl_group.cancelAll();
-
- var it = tree.root_node.decls.iterator(0);
- while (it.next()) |decl_ptr| {
- const decl = decl_ptr.*;
- switch (decl.id) {
- ast.Node.Id.Comptime => @panic("TODO"),
- ast.Node.Id.VarDecl => @panic("TODO"),
- ast.Node.Id.FnProto => {
- const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
-
- const name = if (fn_proto.name_token) |name_token| tree.tokenSlice(name_token) else {
- try self.addCompileError(parsed_file, Span{
- .first = fn_proto.fn_token,
- .last = fn_proto.fn_token + 1,
- }, "missing function name");
- continue;
- };
-
- const fn_decl = try self.a().create(Decl.Fn{
- .base = Decl{
- .id = Decl.Id.Fn,
- .name = name,
- .visib = parseVisibToken(tree, fn_proto.visib_token),
- .resolution = event.Future(BuildError!void).init(self.loop),
- .resolution_in_progress = 0,
- .parsed_file = parsed_file,
- .parent_scope = &decls.base,
- },
- .value = Decl.Fn.Val{ .Unresolved = {} },
- .fn_proto = fn_proto,
- });
- errdefer self.a().destroy(fn_decl);
-
- try decl_group.call(addTopLevelDecl, self, &fn_decl.base);
- },
- ast.Node.Id.TestDecl => @panic("TODO"),
- else => unreachable,
- }
- }
- try await (async decl_group.wait() catch unreachable);
- try await (async self.build_group.wait() catch unreachable);
- }
-
- async fn addTopLevelDecl(self: *Module, decl: *Decl) !void {
- const is_export = decl.isExported(&decl.parsed_file.tree);
-
- if (is_export) {
- try self.build_group.call(verifyUniqueSymbol, self, decl);
- try self.build_group.call(resolveDecl, self, decl);
- }
- }
-
- fn addCompileError(self: *Module, parsed_file: *ParsedFile, span: Span, comptime fmt: []const u8, args: ...) !void {
- const text = try std.fmt.allocPrint(self.loop.allocator, fmt, args);
- errdefer self.loop.allocator.free(text);
-
- try self.build_group.call(addCompileErrorAsync, self, parsed_file, span, text);
- }
-
- async fn addCompileErrorAsync(
- self: *Module,
- parsed_file: *ParsedFile,
- span: Span,
- text: []u8,
- ) !void {
- const msg = try self.loop.allocator.create(errmsg.Msg{
- .path = parsed_file.realpath,
- .text = text,
- .span = span,
- .tree = &parsed_file.tree,
- });
- errdefer self.loop.allocator.destroy(msg);
-
- const compile_errors = await (async self.compile_errors.acquire() catch unreachable);
- defer compile_errors.release();
-
- try compile_errors.value.append(msg);
- }
-
- async fn verifyUniqueSymbol(self: *Module, decl: *Decl) !void {
- const exported_symbol_names = await (async self.exported_symbol_names.acquire() catch unreachable);
- defer exported_symbol_names.release();
-
- if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| {
- try self.addCompileError(
- decl.parsed_file,
- decl.getSpan(),
- "exported symbol collision: '{}'",
- decl.name,
- );
- // TODO add error note showing location of other symbol
- }
- }
-
- pub fn link(self: *Module, out_file: ?[]const u8) !void {
- warn("TODO link");
- return error.Todo;
- }
-
- pub fn addLinkLib(self: *Module, name: []const u8, provided_explicitly: bool) !*LinkLib {
- const is_libc = mem.eql(u8, name, "c");
-
- if (is_libc) {
- if (self.libc_link_lib) |libc_link_lib| {
- return libc_link_lib;
- }
- }
-
- for (self.link_libs_list.toSliceConst()) |existing_lib| {
- if (mem.eql(u8, name, existing_lib.name)) {
- return existing_lib;
- }
- }
-
- const link_lib = try self.a().create(LinkLib{
- .name = name,
- .path = null,
- .provided_explicitly = provided_explicitly,
- .symbols = ArrayList([]u8).init(self.a()),
- });
- try self.link_libs_list.append(link_lib);
- if (is_libc) {
- self.libc_link_lib = link_lib;
- }
- return link_lib;
- }
-
- fn a(self: Module) *mem.Allocator {
- return self.loop.allocator;
- }
-};
-
-fn printError(comptime format: []const u8, args: ...) !void {
- var stderr_file = try std.io.getStdErr();
- var stderr_file_out_stream = std.io.FileOutStream.init(&stderr_file);
- const out_stream = &stderr_file_out_stream.stream;
- try out_stream.print(format, args);
-}
-
-fn parseVisibToken(tree: *ast.Tree, optional_token_index: ?ast.TokenIndex) Visib {
- if (optional_token_index) |token_index| {
- const token = tree.tokens.at(token_index);
- assert(token.id == Token.Id.Keyword_pub);
- return Visib.Pub;
- } else {
- return Visib.Private;
- }
-}
-
-/// This declaration has been blessed as going into the final code generation.
-pub async fn resolveDecl(module: *Module, decl: *Decl) !void {
- if (@atomicRmw(u8, &decl.resolution_in_progress, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) == 0) {
- decl.resolution.data = await (async generateDecl(module, decl) catch unreachable);
- decl.resolution.resolve();
- return decl.resolution.data;
- } else {
- return (await (async decl.resolution.get() catch unreachable)).*;
- }
-}
-
-/// The function that actually does the generation.
-async fn generateDecl(module: *Module, decl: *Decl) !void {
- switch (decl.id) {
- Decl.Id.Var => @panic("TODO"),
- Decl.Id.Fn => {
- const fn_decl = @fieldParentPtr(Decl.Fn, "base", decl);
- return await (async generateDeclFn(module, fn_decl) catch unreachable);
- },
- Decl.Id.CompTime => @panic("TODO"),
- }
-}
-
-async fn generateDeclFn(module: *Module, fn_decl: *Decl.Fn) !void {
- const body_node = fn_decl.fn_proto.body_node orelse @panic("TODO extern fn proto decl");
-
- const fndef_scope = try Scope.FnDef.create(module, fn_decl.base.parent_scope);
- defer fndef_scope.base.deref(module);
-
- // TODO actually look at the return type of the AST
- const return_type = &Type.Void.get(module).base;
- defer return_type.base.deref(module);
-
- const is_var_args = false;
- const params = ([*]Type.Fn.Param)(undefined)[0..0];
- const fn_type = try Type.Fn.create(module, return_type, params, is_var_args);
- defer fn_type.base.base.deref(module);
-
- var symbol_name = try std.Buffer.init(module.a(), fn_decl.base.name);
- errdefer symbol_name.deinit();
-
- const fn_val = try Value.Fn.create(module, fn_type, fndef_scope, symbol_name);
- defer fn_val.base.deref(module);
-
- fn_decl.value = Decl.Fn.Val{ .Ok = fn_val };
-
- const unanalyzed_code = (await (async ir.gen(
- module,
- body_node,
- &fndef_scope.base,
- Span.token(body_node.lastToken()),
- fn_decl.base.parsed_file,
- ) catch unreachable)) catch |err| switch (err) {
- // This poison value should not cause the errdefers to run. It simply means
- // that self.compile_errors is populated.
- // TODO https://github.com/ziglang/zig/issues/769
- error.SemanticAnalysisFailed => return {},
- else => return err,
- };
- defer unanalyzed_code.destroy(module.a());
-
- if (module.verbose_ir) {
- std.debug.warn("unanalyzed:\n");
- unanalyzed_code.dump();
- }
-
- const analyzed_code = (await (async ir.analyze(
- module,
- fn_decl.base.parsed_file,
- unanalyzed_code,
- null,
- ) catch unreachable)) catch |err| switch (err) {
- // This poison value should not cause the errdefers to run. It simply means
- // that self.compile_errors is populated.
- // TODO https://github.com/ziglang/zig/issues/769
- error.SemanticAnalysisFailed => return {},
- else => return err,
- };
- errdefer analyzed_code.destroy(module.a());
-
- if (module.verbose_ir) {
- std.debug.warn("analyzed:\n");
- analyzed_code.dump();
- }
-
- // Kick off rendering to LLVM module, but it doesn't block the fn decl
- // analysis from being complete.
- try module.build_group.call(codegen.renderToLlvm, module, fn_val, analyzed_code);
-}
diff --git a/src-self-hosted/scope.zig b/src-self-hosted/scope.zig
index 8f8d016a7c..6fd6456b12 100644
--- a/src-self-hosted/scope.zig
+++ b/src-self-hosted/scope.zig
@@ -1,7 +1,7 @@
const std = @import("std");
const Allocator = mem.Allocator;
const Decl = @import("decl.zig").Decl;
-const Module = @import("module.zig").Module;
+const Compilation = @import("compilation.zig").Compilation;
const mem = std.mem;
const ast = std.zig.ast;
const Value = @import("value.zig").Value;
@@ -16,17 +16,17 @@ pub const Scope = struct {
base.ref_count += 1;
}
- pub fn deref(base: *Scope, module: *Module) void {
+ pub fn deref(base: *Scope, comp: *Compilation) void {
base.ref_count -= 1;
if (base.ref_count == 0) {
- if (base.parent) |parent| parent.deref(module);
+ if (base.parent) |parent| parent.deref(comp);
switch (base.id) {
Id.Decls => @fieldParentPtr(Decls, "base", base).destroy(),
- Id.Block => @fieldParentPtr(Block, "base", base).destroy(module),
- Id.FnDef => @fieldParentPtr(FnDef, "base", base).destroy(module),
- Id.CompTime => @fieldParentPtr(CompTime, "base", base).destroy(module),
- Id.Defer => @fieldParentPtr(Defer, "base", base).destroy(module),
- Id.DeferExpr => @fieldParentPtr(DeferExpr, "base", base).destroy(module),
+ Id.Block => @fieldParentPtr(Block, "base", base).destroy(comp),
+ Id.FnDef => @fieldParentPtr(FnDef, "base", base).destroy(comp),
+ Id.CompTime => @fieldParentPtr(CompTime, "base", base).destroy(comp),
+ Id.Defer => @fieldParentPtr(Defer, "base", base).destroy(comp),
+ Id.DeferExpr => @fieldParentPtr(DeferExpr, "base", base).destroy(comp),
}
}
}
@@ -61,8 +61,8 @@ pub const Scope = struct {
table: Decl.Table,
/// Creates a Decls scope with 1 reference
- pub fn create(module: *Module, parent: ?*Scope) !*Decls {
- const self = try module.a().create(Decls{
+ pub fn create(comp: *Compilation, parent: ?*Scope) !*Decls {
+ const self = try comp.a().create(Decls{
.base = Scope{
.id = Id.Decls,
.parent = parent,
@@ -70,9 +70,9 @@ pub const Scope = struct {
},
.table = undefined,
});
- errdefer module.a().destroy(self);
+ errdefer comp.a().destroy(self);
- self.table = Decl.Table.init(module.a());
+ self.table = Decl.Table.init(comp.a());
errdefer self.table.deinit();
if (parent) |p| p.ref();
@@ -94,8 +94,8 @@ pub const Scope = struct {
is_comptime: *ir.Instruction,
/// Creates a Block scope with 1 reference
- pub fn create(module: *Module, parent: ?*Scope) !*Block {
- const self = try module.a().create(Block{
+ pub fn create(comp: *Compilation, parent: ?*Scope) !*Block {
+ const self = try comp.a().create(Block{
.base = Scope{
.id = Id.Block,
.parent = parent,
@@ -106,14 +106,14 @@ pub const Scope = struct {
.end_block = undefined,
.is_comptime = undefined,
});
- errdefer module.a().destroy(self);
+ errdefer comp.a().destroy(self);
if (parent) |p| p.ref();
return self;
}
- pub fn destroy(self: *Block, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Block, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
@@ -125,8 +125,8 @@ pub const Scope = struct {
/// Creates a FnDef scope with 1 reference
/// Must set the fn_val later
- pub fn create(module: *Module, parent: ?*Scope) !*FnDef {
- const self = try module.a().create(FnDef{
+ pub fn create(comp: *Compilation, parent: ?*Scope) !*FnDef {
+ const self = try comp.a().create(FnDef{
.base = Scope{
.id = Id.FnDef,
.parent = parent,
@@ -140,8 +140,8 @@ pub const Scope = struct {
return self;
}
- pub fn destroy(self: *FnDef, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *FnDef, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
@@ -149,8 +149,8 @@ pub const Scope = struct {
base: Scope,
/// Creates a CompTime scope with 1 reference
- pub fn create(module: *Module, parent: ?*Scope) !*CompTime {
- const self = try module.a().create(CompTime{
+ pub fn create(comp: *Compilation, parent: ?*Scope) !*CompTime {
+ const self = try comp.a().create(CompTime{
.base = Scope{
.id = Id.CompTime,
.parent = parent,
@@ -162,8 +162,8 @@ pub const Scope = struct {
return self;
}
- pub fn destroy(self: *CompTime, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *CompTime, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
@@ -179,12 +179,12 @@ pub const Scope = struct {
/// Creates a Defer scope with 1 reference
pub fn create(
- module: *Module,
+ comp: *Compilation,
parent: ?*Scope,
kind: Kind,
defer_expr_scope: *DeferExpr,
) !*Defer {
- const self = try module.a().create(Defer{
+ const self = try comp.a().create(Defer{
.base = Scope{
.id = Id.Defer,
.parent = parent,
@@ -193,7 +193,7 @@ pub const Scope = struct {
.defer_expr_scope = defer_expr_scope,
.kind = kind,
});
- errdefer module.a().destroy(self);
+ errdefer comp.a().destroy(self);
defer_expr_scope.base.ref();
@@ -201,9 +201,9 @@ pub const Scope = struct {
return self;
}
- pub fn destroy(self: *Defer, module: *Module) void {
- self.defer_expr_scope.base.deref(module);
- module.a().destroy(self);
+ pub fn destroy(self: *Defer, comp: *Compilation) void {
+ self.defer_expr_scope.base.deref(comp);
+ comp.a().destroy(self);
}
};
@@ -212,8 +212,8 @@ pub const Scope = struct {
expr_node: *ast.Node,
/// Creates a DeferExpr scope with 1 reference
- pub fn create(module: *Module, parent: ?*Scope, expr_node: *ast.Node) !*DeferExpr {
- const self = try module.a().create(DeferExpr{
+ pub fn create(comp: *Compilation, parent: ?*Scope, expr_node: *ast.Node) !*DeferExpr {
+ const self = try comp.a().create(DeferExpr{
.base = Scope{
.id = Id.DeferExpr,
.parent = parent,
@@ -221,14 +221,14 @@ pub const Scope = struct {
},
.expr_node = expr_node,
});
- errdefer module.a().destroy(self);
+ errdefer comp.a().destroy(self);
if (parent) |p| p.ref();
return self;
}
- pub fn destroy(self: *DeferExpr, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *DeferExpr, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
};
diff --git a/src-self-hosted/test.zig b/src-self-hosted/test.zig
index e609eb2791..3edb267ca9 100644
--- a/src-self-hosted/test.zig
+++ b/src-self-hosted/test.zig
@@ -2,11 +2,11 @@ const std = @import("std");
const mem = std.mem;
const builtin = @import("builtin");
const Target = @import("target.zig").Target;
-const Module = @import("module.zig").Module;
+const Compilation = @import("compilation.zig").Compilation;
const introspect = @import("introspect.zig");
const assertOrPanic = std.debug.assertOrPanic;
const errmsg = @import("errmsg.zig");
-const EventLoopLocal = @import("module.zig").EventLoopLocal;
+const EventLoopLocal = @import("compilation.zig").EventLoopLocal;
test "compile errors" {
var ctx: TestContext = undefined;
@@ -100,42 +100,42 @@ pub const TestContext = struct {
// TODO async I/O
try std.io.writeFile(allocator, file1_path, source);
- var module = try Module.create(
+ var comp = try Compilation.create(
&self.event_loop_local,
"test",
file1_path,
Target.Native,
- Module.Kind.Obj,
+ Compilation.Kind.Obj,
builtin.Mode.Debug,
self.zig_lib_dir,
self.zig_cache_dir,
);
- errdefer module.destroy();
+ errdefer comp.destroy();
- try module.build();
+ try comp.build();
- try self.group.call(getModuleEvent, module, source, path, line, column, msg);
+ try self.group.call(getModuleEvent, comp, source, path, line, column, msg);
}
async fn getModuleEvent(
- module: *Module,
+ comp: *Compilation,
source: []const u8,
path: []const u8,
line: usize,
column: usize,
text: []const u8,
) !void {
- defer module.destroy();
- const build_event = await (async module.events.get() catch unreachable);
+ defer comp.destroy();
+ const build_event = await (async comp.events.get() catch unreachable);
switch (build_event) {
- Module.Event.Ok => {
+ Compilation.Event.Ok => {
@panic("build incorrectly succeeded");
},
- Module.Event.Error => |err| {
+ Compilation.Event.Error => |err| {
@panic("build incorrectly failed");
},
- Module.Event.Fail => |msgs| {
+ Compilation.Event.Fail => |msgs| {
assertOrPanic(msgs.len != 0);
for (msgs) |msg| {
if (mem.endsWith(u8, msg.path, path) and mem.eql(u8, msg.text, text)) {
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
index e4c31018a3..8349047749 100644
--- a/src-self-hosted/type.zig
+++ b/src-self-hosted/type.zig
@@ -1,10 +1,10 @@
const std = @import("std");
const builtin = @import("builtin");
const Scope = @import("scope.zig").Scope;
-const Module = @import("module.zig").Module;
+const Compilation = @import("compilation.zig").Compilation;
const Value = @import("value.zig").Value;
const llvm = @import("llvm.zig");
-const CompilationUnit = @import("codegen.zig").CompilationUnit;
+const ObjectFile = @import("codegen.zig").ObjectFile;
pub const Type = struct {
base: Value,
@@ -12,63 +12,63 @@ pub const Type = struct {
pub const Id = builtin.TypeId;
- pub fn destroy(base: *Type, module: *Module) void {
+ pub fn destroy(base: *Type, comp: *Compilation) void {
switch (base.id) {
- Id.Struct => @fieldParentPtr(Struct, "base", base).destroy(module),
- Id.Fn => @fieldParentPtr(Fn, "base", base).destroy(module),
- Id.Type => @fieldParentPtr(MetaType, "base", base).destroy(module),
- Id.Void => @fieldParentPtr(Void, "base", base).destroy(module),
- Id.Bool => @fieldParentPtr(Bool, "base", base).destroy(module),
- Id.NoReturn => @fieldParentPtr(NoReturn, "base", base).destroy(module),
- Id.Int => @fieldParentPtr(Int, "base", base).destroy(module),
- Id.Float => @fieldParentPtr(Float, "base", base).destroy(module),
- Id.Pointer => @fieldParentPtr(Pointer, "base", base).destroy(module),
- Id.Array => @fieldParentPtr(Array, "base", base).destroy(module),
- Id.ComptimeFloat => @fieldParentPtr(ComptimeFloat, "base", base).destroy(module),
- Id.ComptimeInt => @fieldParentPtr(ComptimeInt, "base", base).destroy(module),
- Id.Undefined => @fieldParentPtr(Undefined, "base", base).destroy(module),
- Id.Null => @fieldParentPtr(Null, "base", base).destroy(module),
- Id.Optional => @fieldParentPtr(Optional, "base", base).destroy(module),
- Id.ErrorUnion => @fieldParentPtr(ErrorUnion, "base", base).destroy(module),
- Id.ErrorSet => @fieldParentPtr(ErrorSet, "base", base).destroy(module),
- Id.Enum => @fieldParentPtr(Enum, "base", base).destroy(module),
- Id.Union => @fieldParentPtr(Union, "base", base).destroy(module),
- Id.Namespace => @fieldParentPtr(Namespace, "base", base).destroy(module),
- Id.Block => @fieldParentPtr(Block, "base", base).destroy(module),
- Id.BoundFn => @fieldParentPtr(BoundFn, "base", base).destroy(module),
- Id.ArgTuple => @fieldParentPtr(ArgTuple, "base", base).destroy(module),
- Id.Opaque => @fieldParentPtr(Opaque, "base", base).destroy(module),
- Id.Promise => @fieldParentPtr(Promise, "base", base).destroy(module),
+ Id.Struct => @fieldParentPtr(Struct, "base", base).destroy(comp),
+ Id.Fn => @fieldParentPtr(Fn, "base", base).destroy(comp),
+ Id.Type => @fieldParentPtr(MetaType, "base", base).destroy(comp),
+ Id.Void => @fieldParentPtr(Void, "base", base).destroy(comp),
+ Id.Bool => @fieldParentPtr(Bool, "base", base).destroy(comp),
+ Id.NoReturn => @fieldParentPtr(NoReturn, "base", base).destroy(comp),
+ Id.Int => @fieldParentPtr(Int, "base", base).destroy(comp),
+ Id.Float => @fieldParentPtr(Float, "base", base).destroy(comp),
+ Id.Pointer => @fieldParentPtr(Pointer, "base", base).destroy(comp),
+ Id.Array => @fieldParentPtr(Array, "base", base).destroy(comp),
+ Id.ComptimeFloat => @fieldParentPtr(ComptimeFloat, "base", base).destroy(comp),
+ Id.ComptimeInt => @fieldParentPtr(ComptimeInt, "base", base).destroy(comp),
+ Id.Undefined => @fieldParentPtr(Undefined, "base", base).destroy(comp),
+ Id.Null => @fieldParentPtr(Null, "base", base).destroy(comp),
+ Id.Optional => @fieldParentPtr(Optional, "base", base).destroy(comp),
+ Id.ErrorUnion => @fieldParentPtr(ErrorUnion, "base", base).destroy(comp),
+ Id.ErrorSet => @fieldParentPtr(ErrorSet, "base", base).destroy(comp),
+ Id.Enum => @fieldParentPtr(Enum, "base", base).destroy(comp),
+ Id.Union => @fieldParentPtr(Union, "base", base).destroy(comp),
+ Id.Namespace => @fieldParentPtr(Namespace, "base", base).destroy(comp),
+ Id.Block => @fieldParentPtr(Block, "base", base).destroy(comp),
+ Id.BoundFn => @fieldParentPtr(BoundFn, "base", base).destroy(comp),
+ Id.ArgTuple => @fieldParentPtr(ArgTuple, "base", base).destroy(comp),
+ Id.Opaque => @fieldParentPtr(Opaque, "base", base).destroy(comp),
+ Id.Promise => @fieldParentPtr(Promise, "base", base).destroy(comp),
}
}
- pub fn getLlvmType(base: *Type, cunit: *CompilationUnit) (error{OutOfMemory}!llvm.TypeRef) {
+ pub fn getLlvmType(base: *Type, ofile: *ObjectFile) (error{OutOfMemory}!llvm.TypeRef) {
switch (base.id) {
- Id.Struct => return @fieldParentPtr(Struct, "base", base).getLlvmType(cunit),
- Id.Fn => return @fieldParentPtr(Fn, "base", base).getLlvmType(cunit),
+ Id.Struct => return @fieldParentPtr(Struct, "base", base).getLlvmType(ofile),
+ Id.Fn => return @fieldParentPtr(Fn, "base", base).getLlvmType(ofile),
Id.Type => unreachable,
Id.Void => unreachable,
- Id.Bool => return @fieldParentPtr(Bool, "base", base).getLlvmType(cunit),
+ Id.Bool => return @fieldParentPtr(Bool, "base", base).getLlvmType(ofile),
Id.NoReturn => unreachable,
- Id.Int => return @fieldParentPtr(Int, "base", base).getLlvmType(cunit),
- Id.Float => return @fieldParentPtr(Float, "base", base).getLlvmType(cunit),
- Id.Pointer => return @fieldParentPtr(Pointer, "base", base).getLlvmType(cunit),
- Id.Array => return @fieldParentPtr(Array, "base", base).getLlvmType(cunit),
+ Id.Int => return @fieldParentPtr(Int, "base", base).getLlvmType(ofile),
+ Id.Float => return @fieldParentPtr(Float, "base", base).getLlvmType(ofile),
+ Id.Pointer => return @fieldParentPtr(Pointer, "base", base).getLlvmType(ofile),
+ Id.Array => return @fieldParentPtr(Array, "base", base).getLlvmType(ofile),
Id.ComptimeFloat => unreachable,
Id.ComptimeInt => unreachable,
Id.Undefined => unreachable,
Id.Null => unreachable,
- Id.Optional => return @fieldParentPtr(Optional, "base", base).getLlvmType(cunit),
- Id.ErrorUnion => return @fieldParentPtr(ErrorUnion, "base", base).getLlvmType(cunit),
- Id.ErrorSet => return @fieldParentPtr(ErrorSet, "base", base).getLlvmType(cunit),
- Id.Enum => return @fieldParentPtr(Enum, "base", base).getLlvmType(cunit),
- Id.Union => return @fieldParentPtr(Union, "base", base).getLlvmType(cunit),
+ Id.Optional => return @fieldParentPtr(Optional, "base", base).getLlvmType(ofile),
+ Id.ErrorUnion => return @fieldParentPtr(ErrorUnion, "base", base).getLlvmType(ofile),
+ Id.ErrorSet => return @fieldParentPtr(ErrorSet, "base", base).getLlvmType(ofile),
+ Id.Enum => return @fieldParentPtr(Enum, "base", base).getLlvmType(ofile),
+ Id.Union => return @fieldParentPtr(Union, "base", base).getLlvmType(ofile),
Id.Namespace => unreachable,
Id.Block => unreachable,
- Id.BoundFn => return @fieldParentPtr(BoundFn, "base", base).getLlvmType(cunit),
+ Id.BoundFn => return @fieldParentPtr(BoundFn, "base", base).getLlvmType(ofile),
Id.ArgTuple => unreachable,
- Id.Opaque => return @fieldParentPtr(Opaque, "base", base).getLlvmType(cunit),
- Id.Promise => return @fieldParentPtr(Promise, "base", base).getLlvmType(cunit),
+ Id.Opaque => return @fieldParentPtr(Opaque, "base", base).getLlvmType(ofile),
+ Id.Promise => return @fieldParentPtr(Promise, "base", base).getLlvmType(ofile),
}
}
@@ -76,7 +76,7 @@ pub const Type = struct {
std.debug.warn("{}", @tagName(base.id));
}
- pub fn getAbiAlignment(base: *Type, module: *Module) u32 {
+ pub fn getAbiAlignment(base: *Type, comp: *Compilation) u32 {
@panic("TODO getAbiAlignment");
}
@@ -84,11 +84,11 @@ pub const Type = struct {
base: Type,
decls: *Scope.Decls,
- pub fn destroy(self: *Struct, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Struct, comp: *Compilation) void {
+ comp.a().destroy(self);
}
- pub fn getLlvmType(self: *Struct, cunit: *CompilationUnit) llvm.TypeRef {
+ pub fn getLlvmType(self: *Struct, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
@@ -104,12 +104,12 @@ pub const Type = struct {
typeof: *Type,
};
- pub fn create(module: *Module, return_type: *Type, params: []Param, is_var_args: bool) !*Fn {
- const result = try module.a().create(Fn{
+ pub fn create(comp: *Compilation, return_type: *Type, params: []Param, is_var_args: bool) !*Fn {
+ const result = try comp.a().create(Fn{
.base = Type{
.base = Value{
.id = Value.Id.Type,
- .typeof = &MetaType.get(module).base,
+ .typeof = &MetaType.get(comp).base,
.ref_count = std.atomic.Int(usize).init(1),
},
.id = builtin.TypeId.Fn,
@@ -118,7 +118,7 @@ pub const Type = struct {
.params = params,
.is_var_args = is_var_args,
});
- errdefer module.a().destroy(result);
+ errdefer comp.a().destroy(result);
result.return_type.base.ref();
for (result.params) |param| {
@@ -127,23 +127,23 @@ pub const Type = struct {
return result;
}
- pub fn destroy(self: *Fn, module: *Module) void {
- self.return_type.base.deref(module);
+ pub fn destroy(self: *Fn, comp: *Compilation) void {
+ self.return_type.base.deref(comp);
for (self.params) |param| {
- param.typeof.base.deref(module);
+ param.typeof.base.deref(comp);
}
- module.a().destroy(self);
+ comp.a().destroy(self);
}
- pub fn getLlvmType(self: *Fn, cunit: *CompilationUnit) !llvm.TypeRef {
+ pub fn getLlvmType(self: *Fn, ofile: *ObjectFile) !llvm.TypeRef {
const llvm_return_type = switch (self.return_type.id) {
- Type.Id.Void => llvm.VoidTypeInContext(cunit.context) orelse return error.OutOfMemory,
- else => try self.return_type.getLlvmType(cunit),
+ Type.Id.Void => llvm.VoidTypeInContext(ofile.context) orelse return error.OutOfMemory,
+ else => try self.return_type.getLlvmType(ofile),
};
- const llvm_param_types = try cunit.a().alloc(llvm.TypeRef, self.params.len);
- defer cunit.a().free(llvm_param_types);
+ const llvm_param_types = try ofile.a().alloc(llvm.TypeRef, self.params.len);
+ defer ofile.a().free(llvm_param_types);
for (llvm_param_types) |*llvm_param_type, i| {
- llvm_param_type.* = try self.params[i].typeof.getLlvmType(cunit);
+ llvm_param_type.* = try self.params[i].typeof.getLlvmType(ofile);
}
return llvm.FunctionType(
@@ -160,13 +160,13 @@ pub const Type = struct {
value: *Type,
/// Adds 1 reference to the resulting type
- pub fn get(module: *Module) *MetaType {
- module.meta_type.base.base.ref();
- return module.meta_type;
+ pub fn get(comp: *Compilation) *MetaType {
+ comp.meta_type.base.base.ref();
+ return comp.meta_type;
}
- pub fn destroy(self: *MetaType, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *MetaType, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
@@ -174,13 +174,13 @@ pub const Type = struct {
base: Type,
/// Adds 1 reference to the resulting type
- pub fn get(module: *Module) *Void {
- module.void_type.base.base.ref();
- return module.void_type;
+ pub fn get(comp: *Compilation) *Void {
+ comp.void_type.base.base.ref();
+ return comp.void_type;
}
- pub fn destroy(self: *Void, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Void, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
@@ -188,16 +188,16 @@ pub const Type = struct {
base: Type,
/// Adds 1 reference to the resulting type
- pub fn get(module: *Module) *Bool {
- module.bool_type.base.base.ref();
- return module.bool_type;
+ pub fn get(comp: *Compilation) *Bool {
+ comp.bool_type.base.base.ref();
+ return comp.bool_type;
}
- pub fn destroy(self: *Bool, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Bool, comp: *Compilation) void {
+ comp.a().destroy(self);
}
- pub fn getLlvmType(self: *Bool, cunit: *CompilationUnit) llvm.TypeRef {
+ pub fn getLlvmType(self: *Bool, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
@@ -206,24 +206,24 @@ pub const Type = struct {
base: Type,
/// Adds 1 reference to the resulting type
- pub fn get(module: *Module) *NoReturn {
- module.noreturn_type.base.base.ref();
- return module.noreturn_type;
+ pub fn get(comp: *Compilation) *NoReturn {
+ comp.noreturn_type.base.base.ref();
+ return comp.noreturn_type;
}
- pub fn destroy(self: *NoReturn, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *NoReturn, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
pub const Int = struct {
base: Type,
- pub fn destroy(self: *Int, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Int, comp: *Compilation) void {
+ comp.a().destroy(self);
}
- pub fn getLlvmType(self: *Int, cunit: *CompilationUnit) llvm.TypeRef {
+ pub fn getLlvmType(self: *Int, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
@@ -231,11 +231,11 @@ pub const Type = struct {
pub const Float = struct {
base: Type,
- pub fn destroy(self: *Float, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Float, comp: *Compilation) void {
+ comp.a().destroy(self);
}
- pub fn getLlvmType(self: *Float, cunit: *CompilationUnit) llvm.TypeRef {
+ pub fn getLlvmType(self: *Float, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
@@ -256,12 +256,12 @@ pub const Type = struct {
};
pub const Size = builtin.TypeInfo.Pointer.Size;
- pub fn destroy(self: *Pointer, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Pointer, comp: *Compilation) void {
+ comp.a().destroy(self);
}
pub fn get(
- module: *Module,
+ comp: *Compilation,
elem_type: *Type,
mut: Mut,
vol: Vol,
@@ -271,7 +271,7 @@ pub const Type = struct {
@panic("TODO get pointer");
}
- pub fn getLlvmType(self: *Pointer, cunit: *CompilationUnit) llvm.TypeRef {
+ pub fn getLlvmType(self: *Pointer, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
@@ -279,11 +279,11 @@ pub const Type = struct {
pub const Array = struct {
base: Type,
- pub fn destroy(self: *Array, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Array, comp: *Compilation) void {
+ comp.a().destroy(self);
}
- pub fn getLlvmType(self: *Array, cunit: *CompilationUnit) llvm.TypeRef {
+ pub fn getLlvmType(self: *Array, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
@@ -291,43 +291,43 @@ pub const Type = struct {
pub const ComptimeFloat = struct {
base: Type,
- pub fn destroy(self: *ComptimeFloat, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *ComptimeFloat, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
pub const ComptimeInt = struct {
base: Type,
- pub fn destroy(self: *ComptimeInt, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *ComptimeInt, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
pub const Undefined = struct {
base: Type,
- pub fn destroy(self: *Undefined, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Undefined, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
pub const Null = struct {
base: Type,
- pub fn destroy(self: *Null, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Null, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
pub const Optional = struct {
base: Type,
- pub fn destroy(self: *Optional, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Optional, comp: *Compilation) void {
+ comp.a().destroy(self);
}
- pub fn getLlvmType(self: *Optional, cunit: *CompilationUnit) llvm.TypeRef {
+ pub fn getLlvmType(self: *Optional, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
@@ -335,11 +335,11 @@ pub const Type = struct {
pub const ErrorUnion = struct {
base: Type,
- pub fn destroy(self: *ErrorUnion, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *ErrorUnion, comp: *Compilation) void {
+ comp.a().destroy(self);
}
- pub fn getLlvmType(self: *ErrorUnion, cunit: *CompilationUnit) llvm.TypeRef {
+ pub fn getLlvmType(self: *ErrorUnion, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
@@ -347,11 +347,11 @@ pub const Type = struct {
pub const ErrorSet = struct {
base: Type,
- pub fn destroy(self: *ErrorSet, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *ErrorSet, comp: *Compilation) void {
+ comp.a().destroy(self);
}
- pub fn getLlvmType(self: *ErrorSet, cunit: *CompilationUnit) llvm.TypeRef {
+ pub fn getLlvmType(self: *ErrorSet, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
@@ -359,11 +359,11 @@ pub const Type = struct {
pub const Enum = struct {
base: Type,
- pub fn destroy(self: *Enum, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Enum, comp: *Compilation) void {
+ comp.a().destroy(self);
}
- pub fn getLlvmType(self: *Enum, cunit: *CompilationUnit) llvm.TypeRef {
+ pub fn getLlvmType(self: *Enum, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
@@ -371,11 +371,11 @@ pub const Type = struct {
pub const Union = struct {
base: Type,
- pub fn destroy(self: *Union, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Union, comp: *Compilation) void {
+ comp.a().destroy(self);
}
- pub fn getLlvmType(self: *Union, cunit: *CompilationUnit) llvm.TypeRef {
+ pub fn getLlvmType(self: *Union, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
@@ -383,27 +383,27 @@ pub const Type = struct {
pub const Namespace = struct {
base: Type,
- pub fn destroy(self: *Namespace, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Namespace, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
pub const Block = struct {
base: Type,
- pub fn destroy(self: *Block, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Block, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
pub const BoundFn = struct {
base: Type,
- pub fn destroy(self: *BoundFn, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *BoundFn, comp: *Compilation) void {
+ comp.a().destroy(self);
}
- pub fn getLlvmType(self: *BoundFn, cunit: *CompilationUnit) llvm.TypeRef {
+ pub fn getLlvmType(self: *BoundFn, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
@@ -411,19 +411,19 @@ pub const Type = struct {
pub const ArgTuple = struct {
base: Type,
- pub fn destroy(self: *ArgTuple, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *ArgTuple, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
pub const Opaque = struct {
base: Type,
- pub fn destroy(self: *Opaque, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Opaque, comp: *Compilation) void {
+ comp.a().destroy(self);
}
- pub fn getLlvmType(self: *Opaque, cunit: *CompilationUnit) llvm.TypeRef {
+ pub fn getLlvmType(self: *Opaque, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
@@ -431,11 +431,11 @@ pub const Type = struct {
pub const Promise = struct {
base: Type,
- pub fn destroy(self: *Promise, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Promise, comp: *Compilation) void {
+ comp.a().destroy(self);
}
- pub fn getLlvmType(self: *Promise, cunit: *CompilationUnit) llvm.TypeRef {
+ pub fn getLlvmType(self: *Promise, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig
index 779e5c2e45..8c047b1513 100644
--- a/src-self-hosted/value.zig
+++ b/src-self-hosted/value.zig
@@ -1,7 +1,7 @@
const std = @import("std");
const builtin = @import("builtin");
const Scope = @import("scope.zig").Scope;
-const Module = @import("module.zig").Module;
+const Compilation = @import("compilation.zig").Compilation;
/// Values are ref-counted, heap-allocated, and copy-on-write
/// If there is only 1 ref then write need not copy
@@ -16,16 +16,16 @@ pub const Value = struct {
}
/// Thread-safe
- pub fn deref(base: *Value, module: *Module) void {
+ pub fn deref(base: *Value, comp: *Compilation) void {
if (base.ref_count.decr() == 1) {
- base.typeof.base.deref(module);
+ base.typeof.base.deref(comp);
switch (base.id) {
- Id.Type => @fieldParentPtr(Type, "base", base).destroy(module),
- Id.Fn => @fieldParentPtr(Fn, "base", base).destroy(module),
- Id.Void => @fieldParentPtr(Void, "base", base).destroy(module),
- Id.Bool => @fieldParentPtr(Bool, "base", base).destroy(module),
- Id.NoReturn => @fieldParentPtr(NoReturn, "base", base).destroy(module),
- Id.Ptr => @fieldParentPtr(Ptr, "base", base).destroy(module),
+ Id.Type => @fieldParentPtr(Type, "base", base).destroy(comp),
+ Id.Fn => @fieldParentPtr(Fn, "base", base).destroy(comp),
+ Id.Void => @fieldParentPtr(Void, "base", base).destroy(comp),
+ Id.Bool => @fieldParentPtr(Bool, "base", base).destroy(comp),
+ Id.NoReturn => @fieldParentPtr(NoReturn, "base", base).destroy(comp),
+ Id.Ptr => @fieldParentPtr(Ptr, "base", base).destroy(comp),
}
}
}
@@ -68,8 +68,8 @@ pub const Value = struct {
/// Creates a Fn value with 1 ref
/// Takes ownership of symbol_name
- pub fn create(module: *Module, fn_type: *Type.Fn, fndef_scope: *Scope.FnDef, symbol_name: std.Buffer) !*Fn {
- const self = try module.a().create(Fn{
+ pub fn create(comp: *Compilation, fn_type: *Type.Fn, fndef_scope: *Scope.FnDef, symbol_name: std.Buffer) !*Fn {
+ const self = try comp.a().create(Fn{
.base = Value{
.id = Value.Id.Fn,
.typeof = &fn_type.base,
@@ -86,23 +86,23 @@ pub const Value = struct {
return self;
}
- pub fn destroy(self: *Fn, module: *Module) void {
- self.fndef_scope.base.deref(module);
+ pub fn destroy(self: *Fn, comp: *Compilation) void {
+ self.fndef_scope.base.deref(comp);
self.symbol_name.deinit();
- module.a().destroy(self);
+ comp.a().destroy(self);
}
};
pub const Void = struct {
base: Value,
- pub fn get(module: *Module) *Void {
- module.void_value.base.ref();
- return module.void_value;
+ pub fn get(comp: *Compilation) *Void {
+ comp.void_value.base.ref();
+ return comp.void_value;
}
- pub fn destroy(self: *Void, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Void, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
@@ -110,31 +110,31 @@ pub const Value = struct {
base: Value,
x: bool,
- pub fn get(module: *Module, x: bool) *Bool {
+ pub fn get(comp: *Compilation, x: bool) *Bool {
if (x) {
- module.true_value.base.ref();
- return module.true_value;
+ comp.true_value.base.ref();
+ return comp.true_value;
} else {
- module.false_value.base.ref();
- return module.false_value;
+ comp.false_value.base.ref();
+ return comp.false_value;
}
}
- pub fn destroy(self: *Bool, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Bool, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
pub const NoReturn = struct {
base: Value,
- pub fn get(module: *Module) *NoReturn {
- module.noreturn_value.base.ref();
- return module.noreturn_value;
+ pub fn get(comp: *Compilation) *NoReturn {
+ comp.noreturn_value.base.ref();
+ return comp.noreturn_value;
}
- pub fn destroy(self: *NoReturn, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *NoReturn, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
@@ -147,8 +147,8 @@ pub const Value = struct {
RunTime,
};
- pub fn destroy(self: *Ptr, module: *Module) void {
- module.a().destroy(self);
+ pub fn destroy(self: *Ptr, comp: *Compilation) void {
+ comp.a().destroy(self);
}
};
};
--
cgit v1.2.3
|